repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
Chiel92/evolutionary-computing
|
refs/heads/master
|
final1/source/setup.py
|
1
|
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
setup(
ext_modules=cythonize("*.pyx",
compiler_directives={'profile': True})
)
|
doismellburning/django
|
refs/heads/master
|
django/contrib/gis/tests/geoapp/feeds.py
|
367
|
from __future__ import unicode_literals
from django.contrib.gis import feeds
from .models import City
class TestGeoRSS1(feeds.Feed):
link = '/city/'
title = 'Test GeoDjango Cities'
def items(self):
return City.objects.all()
def item_link(self, item):
return '/city/%s/' % item.pk
def item_geometry(self, item):
return item.point
class TestGeoRSS2(TestGeoRSS1):
def geometry(self, obj):
# This should attach a <georss:box> element for the extent of
# of the cities in the database. This tuple came from
# calling `City.objects.extent()` -- we can't do that call here
# because `extent` is not implemented for MySQL/Oracle.
return (-123.30, -41.32, 174.78, 48.46)
def item_geometry(self, item):
# Returning a simple tuple for the geometry.
return item.point.x, item.point.y
class TestGeoAtom1(TestGeoRSS1):
feed_type = feeds.GeoAtom1Feed
class TestGeoAtom2(TestGeoRSS2):
feed_type = feeds.GeoAtom1Feed
def geometry(self, obj):
# This time we'll use a 2-tuple of coordinates for the box.
return ((-123.30, -41.32), (174.78, 48.46))
class TestW3CGeo1(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
# The following feeds are invalid, and will raise exceptions.
class TestW3CGeo2(TestGeoRSS2):
feed_type = feeds.W3CGeoFeed
class TestW3CGeo3(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
def item_geometry(self, item):
from django.contrib.gis.geos import Polygon
return Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
# The feed dictionary to use for URLs.
feed_dict = {
'rss1': TestGeoRSS1,
'rss2': TestGeoRSS2,
'atom1': TestGeoAtom1,
'atom2': TestGeoAtom2,
'w3cgeo1': TestW3CGeo1,
'w3cgeo2': TestW3CGeo2,
'w3cgeo3': TestW3CGeo3,
}
|
Tesora/tesora-tempest
|
refs/heads/master
|
tempest/common/custom_matchers.py
|
2
|
# Copyright 2013 NTT Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import six
from testtools import helpers
class ExistsAllResponseHeaders(object):
"""Specific matcher to check the existence of Swift's response headers
This matcher checks the existence of common headers for each HTTP method
or the target, which means account, container or object.
When checking the existence of 'specific' headers such as
X-Account-Meta-* or X-Object-Manifest for example, those headers must be
checked in each test code.
"""
def __init__(self, target, method, policies=None):
"""Initialization of ExistsAllResponseHeaders
param: target Account/Container/Object
param: method PUT/GET/HEAD/DELETE/COPY/POST
"""
self.target = target
self.method = method
self.policies = policies or []
def _content_length_required(self, resp):
# Verify whether given HTTP response must contain content-length.
# Take into account the exceptions defined in RFC 7230.
if resp.status in range(100, 200) or resp.status == 204:
return False
return True
def match(self, actual):
"""Check headers
param: actual HTTP response object containing headers and status
"""
# Check common headers for all HTTP methods.
#
# Please note that for 1xx and 204 responses Content-Length presence
# is not checked intensionally. According to RFC 7230 a server MUST
# NOT send the header in such responses. Thus, clients should not
# depend on this header. However, the standard does not require them
# to validate the server's behavior. We leverage that to not refuse
# any implementation violating it like Swift [1] or some versions of
# Ceph RadosGW [2].
# [1] https://bugs.launchpad.net/swift/+bug/1537811
# [2] http://tracker.ceph.com/issues/13582
if ('content-length' not in actual and
self._content_length_required(actual)):
return NonExistentHeader('content-length')
if 'content-type' not in actual:
return NonExistentHeader('content-type')
if 'x-trans-id' not in actual:
return NonExistentHeader('x-trans-id')
if 'date' not in actual:
return NonExistentHeader('date')
# Check headers for a specific method or target
if self.method == 'GET' or self.method == 'HEAD':
if 'x-timestamp' not in actual:
return NonExistentHeader('x-timestamp')
if 'accept-ranges' not in actual:
return NonExistentHeader('accept-ranges')
if self.target == 'Account':
if 'x-account-bytes-used' not in actual:
return NonExistentHeader('x-account-bytes-used')
if 'x-account-container-count' not in actual:
return NonExistentHeader('x-account-container-count')
if 'x-account-object-count' not in actual:
return NonExistentHeader('x-account-object-count')
if actual['x-account-container-count'] > 0:
acct_header = "x-account-storage-policy-"
matched_policy_count = 0
# Loop through the policies and look for account
# usage data. There should be at least 1 set
for policy in self.policies:
front_header = acct_header + policy['name'].lower()
usage_policies = [
front_header + '-bytes-used',
front_header + '-object-count',
front_header + '-container-count'
]
# There should be 3 usage values for a give storage
# policy in an account bytes, object count, and
# container count
policy_hdrs = sum(1 for use_hdr in usage_policies
if use_hdr in actual)
# If there are less than 3 headers here then 1 is
# missing, let's figure out which one and report
if policy_hdrs == 3:
matched_policy_count = matched_policy_count + 1
else:
if policy_hdrs > 0 and policy_hdrs < 3:
for use_hdr in usage_policies:
if use_hdr not in actual:
return NonExistentHeader(use_hdr)
# Only flag an error if actual policies have been read and
# no usage has been found
if self.policies and matched_policy_count == 0:
return GenericError("No storage policy usage headers")
elif self.target == 'Container':
if 'x-container-bytes-used' not in actual:
return NonExistentHeader('x-container-bytes-used')
if 'x-container-object-count' not in actual:
return NonExistentHeader('x-container-object-count')
if 'x-storage-policy' not in actual:
return NonExistentHeader('x-storage-policy')
else:
policy_name = actual['x-storage-policy']
# loop through the policies and ensure that
# the value in the container header matches
# one of the storage policies
for policy in self.policies:
if policy['name'] == policy_name:
break
else:
# Ensure that there are actual policies stored
if self.policies:
return InvalidHeaderValue('x-storage-policy',
policy_name)
elif self.target == 'Object':
if 'etag' not in actual:
return NonExistentHeader('etag')
if 'last-modified' not in actual:
return NonExistentHeader('last-modified')
elif self.method == 'PUT':
if self.target == 'Object':
if 'etag' not in actual:
return NonExistentHeader('etag')
if 'last-modified' not in actual:
return NonExistentHeader('last-modified')
elif self.method == 'COPY':
if self.target == 'Object':
if 'etag' not in actual:
return NonExistentHeader('etag')
if 'last-modified' not in actual:
return NonExistentHeader('last-modified')
if 'x-copied-from' not in actual:
return NonExistentHeader('x-copied-from')
if 'x-copied-from-last-modified' not in actual:
return NonExistentHeader('x-copied-from-last-modified')
return None
class GenericError(object):
"""Informs an error message of a generic error during header evaluation"""
def __init__(self, body):
self.body = body
def describe(self):
return "%s" % self.body
def get_details(self):
return {}
class NonExistentHeader(object):
"""Informs an error message in the case of missing a certain header"""
def __init__(self, header):
self.header = header
def describe(self):
return "%s header does not exist" % self.header
def get_details(self):
return {}
class InvalidHeaderValue(object):
"""Informs an error message when a header contains a bad value"""
def __init__(self, header, value):
self.header = header
self.value = value
def describe(self):
return "InvalidValue (%s, %s)" % (self.header, self.value)
def get_details(self):
return {}
class AreAllWellFormatted(object):
"""Specific matcher to check the correctness of formats of values
This matcher checks the format of values of response headers.
When checking the format of values of 'specific' headers such as
X-Account-Meta-* or X-Object-Manifest for example, those values must be
checked in each test code.
"""
def match(self, actual):
for key, value in six.iteritems(actual):
if key in ('content-length', 'x-account-bytes-used',
'x-account-container-count', 'x-account-object-count',
'x-container-bytes-used', 'x-container-object-count')\
and not value.isdigit():
return InvalidFormat(key, value)
elif key in ('content-type', 'date', 'last-modified',
'x-copied-from-last-modified') and not value:
return InvalidFormat(key, value)
elif key == 'x-timestamp' and not re.match("^\d+\.?\d*\Z", value):
return InvalidFormat(key, value)
elif key == 'x-copied-from' and not re.match("\S+/\S+", value):
return InvalidFormat(key, value)
elif key == 'x-trans-id' and \
not re.match("^tx[0-9a-f]{21}-[0-9a-f]{10}.*", value):
return InvalidFormat(key, value)
elif key == 'accept-ranges' and not value == 'bytes':
return InvalidFormat(key, value)
elif key == 'etag' and not value.isalnum():
return InvalidFormat(key, value)
elif key == 'transfer-encoding' and not value == 'chunked':
return InvalidFormat(key, value)
return None
class InvalidFormat(object):
"""Informs an error message if a format of a certain header is invalid"""
def __init__(self, key, value):
self.key = key
self.value = value
def describe(self):
return "InvalidFormat (%s, %s)" % (self.key, self.value)
def get_details(self):
return {}
class MatchesDictExceptForKeys(object):
"""Matches two dictionaries.
Verifies all items are equals except for those identified by a list of keys
"""
def __init__(self, expected, excluded_keys=None):
self.expected = expected
self.excluded_keys = excluded_keys if excluded_keys is not None else []
def match(self, actual):
filtered_expected = helpers.dict_subtract(self.expected,
self.excluded_keys)
filtered_actual = helpers.dict_subtract(actual,
self.excluded_keys)
if filtered_actual != filtered_expected:
return DictMismatch(filtered_expected, filtered_actual)
class DictMismatch(object):
"""Mismatch between two dicts describes deltas"""
def __init__(self, expected, actual):
self.expected = expected
self.actual = actual
self.intersect = set(self.expected) & set(self.actual)
self.symmetric_diff = set(self.expected) ^ set(self.actual)
def _format_dict(self, dict_to_format):
# Ensure the error string dict is printed in a set order
# NOTE(mtreinish): needed to ensure a deterministic error msg for
# testing. Otherwise the error message will be dependent on the
# dict ordering.
dict_string = "{"
for key in sorted(dict_to_format):
dict_string += "'%s': %s, " % (key, dict_to_format[key])
dict_string = dict_string[:-2] + '}'
return dict_string
def describe(self):
msg = ""
if self.symmetric_diff:
only_expected = helpers.dict_subtract(self.expected, self.actual)
only_actual = helpers.dict_subtract(self.actual, self.expected)
if only_expected:
msg += "Only in expected:\n %s\n" % self._format_dict(
only_expected)
if only_actual:
msg += "Only in actual:\n %s\n" % self._format_dict(
only_actual)
diff_set = set(o for o in self.intersect if
self.expected[o] != self.actual[o])
if diff_set:
msg += "Differences:\n"
for o in diff_set:
msg += " %s: expected %s, actual %s\n" % (
o, self.expected[o], self.actual[o])
return msg
def get_details(self):
return {}
|
akintolga/superdesk-aap
|
refs/heads/master
|
server/aap_mm/aap_mm_datalayer.py
|
1
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import datetime
import json
import logging
import urllib
from io import BytesIO
import urllib3
from eve.io.base import DataLayer
from eve_elastic.elastic import ElasticCursor
from flask import url_for
import pytz
from superdesk.errors import SuperdeskApiError
from superdesk.io.iptc import subject_codes
from superdesk.media.media_operations import process_file_from_stream, decode_metadata
from superdesk.media.renditions import generate_renditions, delete_file_on_error
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE
from superdesk.utc import utcnow
urllib3.disable_warnings()
logger = logging.getLogger(__name__)
class AAPMMDatalayer(DataLayer):
def __set_auth_cookie(self, app):
if self._username is not None and self._password is not None:
url = app.config['AAP_MM_SEARCH_URL'] + '/Users/login'
values = {'username': self._username, 'password': self._password}
r = self._http.urlopen('POST', url, headers={'Content-Type': 'application/json'}, body=json.dumps(values))
else:
url = app.config['AAP_MM_SEARCH_URL'] + '/Users/AnonymousToken'
r = self._http.request('GET', url, redirect=False)
self._headers = {'cookie': r.getheader('set-cookie'), 'Content-Type': 'application/json'}
def set_credentials(self, username, password):
if username and username != self._username and password and password != self._password:
self._username = username
self._password = password
self.__set_auth_cookie(self._app)
def init_app(self, app):
app.config.setdefault('AAP_MM_SEARCH_URL', 'https://one-api.aap.com.au/api/v3')
app.config.setdefault('AAP_MM_CDN_URL', 'http://one-cdn.aap.com.au/Preview.mp4')
self._app = app
self._headers = None
self._username = None
self._password = None
self._http = urllib3.PoolManager()
def find(self, resource, req, lookup):
"""
Called to execute a search against the AAP Mulitmedia API. It attempts to translate the search request
passed in req to a suitable form for a search request against the API. It parses the response into a
suitable ElasticCursor, the front end will never know.
:param resource:
:param req:
:param lookup:
:return:
"""
if self._headers is None:
self.__set_auth_cookie(self._app)
url = self._app.config['AAP_MM_SEARCH_URL'] + '/Assets/search'
query_keywords = '*:*'
if 'query' in req['query']['filtered']:
query_keywords = req['query']['filtered']['query']['query_string']['query']
query_keywords = query_keywords.replace('slugline:', 'objectname:')
query_keywords = query_keywords.replace('description_text:', 'captionabstract:')
fields = {}
for criterion in req.get('post_filter', {}).get('and', {}):
# parse out the date range if possible
if 'range' in criterion:
start = None
end = None
daterange = None
if 'firstcreated' in criterion.get('range', {}):
if 'gte' in criterion['range']['firstcreated']:
start = criterion['range']['firstcreated']['gte'][0:10]
if 'lte' in criterion['range']['firstcreated']:
end = criterion['range']['firstcreated']['lte'][0:10]
# if there is a special start and no end it's one of the date buttons
if start and not end:
if start == 'now-24H':
daterange = {'Dates': ['[NOW/HOUR-24HOURS TO NOW/HOUR]']}
if start == 'now-1w':
daterange = {'Dates': ['[NOW/DAY-7DAYS TO NOW/DAY]']}
if start == 'now-1M':
daterange = {'Dates': ['[NOW/DAY-1MONTH TO NOW/DAY]']}
# we've got something but no daterange set above
if (start or end) and not daterange:
daterange = {'DateRange': [{'Start': start, 'End': end}], 'DateCreatedFilter': 'true'}
if daterange:
fields.update(daterange)
if 'terms' in criterion:
if 'type' in criterion.get('terms', {}):
fields.update({'MediaTypes': [self.map_types(key, False) for key in criterion['terms']['type']]})
if 'credit' in criterion.get('terms', {}):
fields.update({'Credits': criterion['terms']['credit']})
if 'anpa_category.name' in criterion.get('terms', {}):
cat_list = []
for cat in criterion['terms']['anpa_category.name']:
qcode = [key for key, value in subject_codes.items() if value == cat]
if qcode:
for code in qcode:
cat_list.append(code)
else:
cat_list.append(cat)
fields.update({'Categories': cat_list})
# restricting only to image for initial implementation.
if not fields.get('MediaTypes'):
fields.update({'MediaTypes': self._app.config['AAP_MM_SEARCH_MEDIA_TYPES']})
size = int(req.get('size', '25')) if int(req.get('size', '25')) > 0 else 25
query = {'Query': query_keywords, 'pageSize': str(size),
'pageNumber': str(int(req.get('from', '0')) // size + 1)}
r = self._http.urlopen('POST', url + '?' + urllib.parse.urlencode(query),
body=json.dumps(fields), headers=self._headers)
hits = self._parse_hits(json.loads(r.data.decode('UTF-8')))
return ElasticCursor(docs=hits['docs'], hits={'hits': hits, 'aggregations': self._parse_aggregations(hits)})
def _parse_doc(self, doc):
new_doc = {'_id': doc['AssetId'], 'guid': doc['AssetId'], 'headline': doc['Title'],
'description_text': doc['Description'], 'source': doc['Credit']}
if 'Source' in doc:
new_doc['original_source'] = doc['Credit'] + '/' + str(doc.get('Source', ''))
else:
new_doc['original_source'] = doc['Credit']
new_doc['versioncreated'] = self._datetime(doc['ModifiedDate'])
new_doc['firstcreated'] = self._datetime(doc['CreationDate'])
new_doc['pubstatus'] = 'usable'
# This must match the action
new_doc['_type'] = 'externalsource'
# entry that the client can use to identify the fetch endpoint
new_doc['fetch_endpoint'] = 'search_providers_proxy'
new_doc[ITEM_TYPE] = self.map_types(doc['AssetType'].lower())
if doc['AssetType'] == 'VIDEO':
purl = '{}?assetType=VIDEO&'.format(self._app.config['AAP_MM_CDN_URL'])
purl += 'path=/rest/aap/archives/imagearc/dossiers/{}'.format(doc['AssetId'])
purl += '/files/ipod&assetId={}&mimeType=video/mp4&dummy.mp4'.format(doc['AssetId'])
new_doc['renditions'] = {'original': {'href': purl, 'mimetype': 'video/mp4'}}
else:
new_doc['renditions'] = {
'viewImage': {'href': doc.get('Preview', doc.get('Layout'))['Href']},
'thumbnail': {'href': doc.get('Thumbnail', doc.get('Layout'))['Href']},
'original': {'href': doc.get('Preview', doc.get('Layout'))['Href']},
'baseImage': {'href': doc.get('Preview', doc.get('Layout'))['Href']},
}
new_doc['slugline'] = doc['Title']
new_doc['byline'] = doc['Byline']
new_doc['ednote'] = doc['SpecialInstructions']
doc.clear()
doc.update(new_doc)
def _parse_hits(self, hits):
hits['docs'] = hits.pop('Assets')
hits['total'] = hits.pop('Total')
for doc in hits['docs']:
self._parse_doc(doc)
return hits
def _parse_aggregation(self, aggregations, facet, aggregation, hits):
"""
Converts the "facet" to the "aggregate" based on the FacetResults in hits returns the equivalent
aggregation in aggregations
:param aggregations:
:param facet:
:param aggregation:
:param hits:
:return:
"""
if 'FacetResults' in hits and facet in hits.get('FacetResults', {}):
buckets = []
name_set = set()
for cat in hits.get('FacetResults', {}).get(facet, {}):
if cat['DisplayName'] in name_set:
buckets.append({'doc_count': cat['Count'], 'key': cat['DisplayName'] + '/' + cat['Name'],
'qcode': cat['Name']})
elif facet == "MediaTypes":
buckets.append({'doc_count': cat['Count'],
'key': self.map_types(cat['DisplayName'].lower()), 'qcode': cat['Name']})
name_set.add(cat['DisplayName'])
else:
buckets.append({'doc_count': cat['Count'], 'key': cat['DisplayName'], 'qcode': cat['Name']})
name_set.add(cat['DisplayName'])
aggregations[aggregation] = {'buckets': buckets}
def _parse_aggregation_dates(self, aggregations, hits):
"""
Extract the date related facets and convert into aggregations
:param aggregations:
:param hits:
:return:
"""
if 'FacetResults' in hits and 'Dates' in hits.get('FacetResults', {}):
for date in hits.get('FacetResults', {}).get('Dates', {}):
if date['Name'] == '[NOW/HOUR-24HOURS TO NOW/HOUR]':
aggregations['day'] = {'buckets': [{'doc_count': date['Count'], 'key': date['Name']}]}
if date['Name'] == '[NOW/DAY-7DAYS TO NOW/DAY]':
aggregations['week'] = {'buckets': [{'doc_count': date['Count'], 'key': date['Name']}]}
if date['Name'] == '[NOW/DAY-1MONTH TO NOW/DAY]':
aggregations['month'] = {'buckets': [{'doc_count': date['Count'], 'key': date['Name']}]}
def _parse_aggregations(self, hits):
"""
Given the hits returned from the AAP Mulitmedia API it will convert them to the same format as the
Aggregations returned from the superdesk search against Elastic
:param hits:
:return: The converted aggregations
"""
aggregations = {}
self._parse_aggregation(aggregations, 'Categories', 'category', hits)
self._parse_aggregation(aggregations, 'MediaTypes', 'type', hits)
self._parse_aggregation(aggregations, 'Credits', 'credit', hits)
self._parse_aggregation_dates(aggregations, hits)
hits.pop('FacetResults')
return aggregations
def _datetime(self, string):
try:
dt = datetime.datetime.strptime(string[0:19] + string[19:25].replace(':', ''),
'%Y-%m-%dT%H:%M:%S%z').astimezone(pytz.utc)
except:
dt = utcnow()
return dt
def _get_resolutions(self, id):
url = self._app.config['AAP_MM_SEARCH_URL'] + '/Assets/Resolutions'
values = [id]
headers = dict(self._headers)
headers['Content-Type'] = 'application/json'
r = self._http.urlopen('POST', url, headers=headers, body=json.dumps(values))
return json.loads(r.data.decode('UTF-8'))
def find_all(self, resource, max_results=1000):
raise NotImplementedError
def find_one(self, resource, req, **lookup):
raise NotImplementedError
def find_one_raw(self, resource, _id):
if self._headers is None:
self.__set_auth_cookie(self._app)
url = self._app.config['AAP_MM_SEARCH_URL'] + '/Assets/{}'.format(_id)
r = self._http.request('GET', url, headers=self._headers)
doc = json.loads(r.data.decode('UTF-8'))
self._parse_doc(doc)
if 'fetch_endpoint' in doc:
del doc['fetch_endpoint']
# Only if we have credentials can we download the original if the account has that privilege
if self._username is not None and self._password is not None:
resolutions = self._get_resolutions(_id)
if doc[ITEM_TYPE] == CONTENT_TYPE.PICTURE:
if any(i['Name'] == 'Original' for i in resolutions['Image']):
url = self._app.config['AAP_MM_SEARCH_URL'] + '/Assets/{}/Original/download'.format(_id)
mime_type = 'image/jpeg'
else:
raise FileNotFoundError
elif doc[ITEM_TYPE] == CONTENT_TYPE.VIDEO:
if any(v['Name'] == 'Ipod' for v in resolutions['Video']):
url = self._app.config['AAP_MM_SEARCH_URL'] + '/Assets/{}/Ipod/download'.format(_id)
mime_type = doc.get('renditions').get('original').get('mimetype')
else:
raise FileNotFoundError
else:
raise NotImplementedError
else:
if doc[ITEM_TYPE] == CONTENT_TYPE.VIDEO:
mime_type = doc.get('renditions').get('original').get('mimetype')
else:
mime_type = 'image/jpeg'
url = doc['renditions']['original']['href']
r = self._http.request('GET', url, headers=self._headers)
out = BytesIO(r.data)
file_name, content_type, metadata = process_file_from_stream(out, mime_type)
inserted = []
try:
logger.debug('Going to save media file with %s ' % file_name)
out.seek(0)
file_id = self._app.media.put(out, filename=file_name, content_type=content_type, metadata=None)
doc['mimetype'] = content_type
doc['filemeta'] = decode_metadata(metadata)
# set the version created to now to bring it to the top of the desk, images can be quite old
doc['versioncreated'] = utcnow()
inserted = [file_id]
file_type = content_type.split('/')[0]
rendition_spec = self._app.config['RENDITIONS']['picture']
renditions = generate_renditions(out, file_id, inserted, file_type,
content_type, rendition_spec,
self.url_for_media, insert_metadata=False)
doc['renditions'] = renditions
except Exception as io:
logger.exception(io)
for file_id in inserted:
delete_file_on_error(doc, file_id)
raise SuperdeskApiError.internalError('Generating renditions failed')
return doc
def url_for_media(self, media_id, mimetype=None):
return url_for('upload_raw.get_upload_as_data_uri', media_id=media_id,
_external=True, _schema=self._app.config['URL_PROTOCOL'])
def find_list_of_ids(self, resource, ids, client_projection=None):
raise NotImplementedError()
def insert(self, resource, docs, **kwargs):
raise NotImplementedError()
def update(self, resource, id_, updates, original):
raise NotImplementedError()
def update_all(self, resource, query, updates):
raise NotImplementedError()
def replace(self, resource, id_, document, original):
raise NotImplementedError()
def remove(self, resource, lookup=None):
raise NotImplementedError()
def is_empty(self, resource):
raise NotImplementedError()
def query_contains_field(self, query, field_name):
raise NotImplementedError()
def get_value_from_query(self, query, field_name):
raise NotImplementedError()
def combine_queries(self, query_a, query_b):
raise NotImplementedError()
def map_types(self, key, to_superdesk=True):
"""
:param key:
:param to_superdesk:
:return:
"""
aap_to_superdesk = {
'image': 'picture',
'video': 'video',
'audio': 'audio',
'graphics': 'graphic',
'interactive': 'interactive'
}
superdesk_to_aap = dict(reversed(item) for item in aap_to_superdesk.items())
return aap_to_superdesk.get(key, 'Unknown') if to_superdesk else superdesk_to_aap.get(key, 'Unknown')
|
Zhongqilong/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/chardet/chardistribution.py
|
2754
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
|
adrianholovaty/django
|
refs/heads/master
|
django/template/loaders/app_directories.py
|
5
|
"""
Wrapper for loading templates from "templates" directories in INSTALLED_APPS
packages.
"""
import os
import sys
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
from django.utils.importlib import import_module
# At compile time, cache the directories to search.
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
app_template_dirs = []
for app in settings.INSTALLED_APPS:
try:
mod = import_module(app)
except ImportError as e:
raise ImproperlyConfigured('ImportError %s: %s' % (app, e.args[0]))
template_dir = os.path.join(os.path.dirname(mod.__file__), 'templates')
if os.path.isdir(template_dir):
app_template_dirs.append(template_dir.decode(fs_encoding))
# It won't change, so convert it to a tuple to save memory.
app_template_dirs = tuple(app_template_dirs)
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = app_template_dirs
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of template_dir.
pass
def load_template_source(self, template_name, template_dirs=None):
for filepath in self.get_template_sources(template_name, template_dirs):
try:
with open(filepath) as fp:
return (fp.read().decode(settings.FILE_CHARSET), filepath)
except IOError:
pass
raise TemplateDoesNotExist(template_name)
_loader = Loader()
|
Mohamed711/Quiz-Program
|
refs/heads/master
|
vendor/bundle/ruby/2.2.0/gems/libv8-3.16.14.7/vendor/v8/tools/testrunner/network/perfdata.py
|
100
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shelve
import threading
class PerfDataEntry(object):
def __init__(self):
self.avg = 0.0
self.count = 0
def AddResult(self, result):
kLearnRateLimiter = 99 # Greater value means slower learning.
# We use an approximation of the average of the last 100 results here:
# The existing average is weighted with kLearnRateLimiter (or less
# if there are fewer data points).
effective_count = min(self.count, kLearnRateLimiter)
self.avg = self.avg * effective_count + result
self.count = effective_count + 1
self.avg /= self.count
class PerfDataStore(object):
def __init__(self, datadir, arch, mode):
filename = os.path.join(datadir, "%s.%s.perfdata" % (arch, mode))
self.database = shelve.open(filename, protocol=2)
self.closed = False
self.lock = threading.Lock()
def __del__(self):
self.close()
def close(self):
if self.closed: return
self.database.close()
self.closed = True
def GetKey(self, test):
"""Computes the key used to access data for the given testcase."""
flags = "".join(test.flags)
return str("%s.%s.%s" % (test.suitename(), test.path, flags))
def FetchPerfData(self, test):
"""Returns the observed duration for |test| as read from the store."""
key = self.GetKey(test)
if key in self.database:
return self.database[key].avg
return None
def UpdatePerfData(self, test):
"""Updates the persisted value in the store with test.duration."""
testkey = self.GetKey(test)
self.RawUpdatePerfData(testkey, test.duration)
def RawUpdatePerfData(self, testkey, duration):
with self.lock:
if testkey in self.database:
entry = self.database[testkey]
else:
entry = PerfDataEntry()
entry.AddResult(duration)
self.database[testkey] = entry
class PerfDataManager(object):
def __init__(self, datadir):
self.datadir = os.path.abspath(datadir)
if not os.path.exists(self.datadir):
os.makedirs(self.datadir)
self.stores = {} # Keyed by arch, then mode.
self.closed = False
self.lock = threading.Lock()
def __del__(self):
self.close()
def close(self):
if self.closed: return
for arch in self.stores:
modes = self.stores[arch]
for mode in modes:
store = modes[mode]
store.close()
self.closed = True
def GetStore(self, arch, mode):
with self.lock:
if not arch in self.stores:
self.stores[arch] = {}
modes = self.stores[arch]
if not mode in modes:
modes[mode] = PerfDataStore(self.datadir, arch, mode)
return modes[mode]
|
ssaeger/scikit-learn
|
refs/heads/master
|
examples/feature_selection/plot_permutation_test_for_classification.py
|
94
|
"""
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
|
DXCanas/kolibri
|
refs/heads/develop
|
kolibri/core/device/models.py
|
1
|
import time
from django.conf import settings
from django.core.cache import cache
from django.db import models
from .permissions import UserCanManageDevicePermissions
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
device_permissions_fields = [
'is_superuser',
'can_manage_content',
]
class DevicePermissions(models.Model):
"""
This class stores metadata about device permissions for FacilityUsers.
"""
permissions = UserCanManageDevicePermissions()
user = models.OneToOneField(
FacilityUser,
on_delete=models.CASCADE,
related_name='devicepermissions',
blank=False,
null=False,
primary_key=True,
)
is_superuser = models.BooleanField(default=False)
can_manage_content = models.BooleanField(default=False)
class DeviceSettings(models.Model):
"""
This class stores data about settings particular to this device
"""
is_provisioned = models.BooleanField(default=False)
language_id = models.CharField(max_length=15, default=settings.LANGUAGE_CODE)
default_facility = models.ForeignKey(Facility, on_delete=models.SET_NULL, blank=True, null=True)
def save(self, *args, **kwargs):
self.pk = 1
super(DeviceSettings, self).save(*args, **kwargs)
CONTENT_CACHE_KEY_CACHE_KEY = 'content_cache_key'
class ContentCacheKey(models.Model):
"""
This class stores a cache key for content models that should be updated
whenever the content metadata stored on the device changes.
"""
key = models.IntegerField(default=time.time)
def save(self, *args, **kwargs):
self.pk = 1
super(ContentCacheKey, self).save(*args, **kwargs)
@classmethod
def update_cache_key(cls):
cache_key, created = cls.objects.get_or_create()
cache_key.key = time.time()
cache_key.save()
cache.delete(CONTENT_CACHE_KEY_CACHE_KEY)
return cache_key
@classmethod
def get_cache_key(cls):
key = cache.get(CONTENT_CACHE_KEY_CACHE_KEY)
if key is None:
try:
cache_key = cls.objects.get()
except cls.DoesNotExist:
cache_key = cls.update_cache_key()
key = cache_key.key
cache.set(CONTENT_CACHE_KEY_CACHE_KEY, key, 5000)
return key
|
nlandais/ansible-modules-core
|
refs/heads/devel
|
packaging/__init__.py
|
12133432
| |
rudaoshi/neuralmachines
|
refs/heads/master
|
neural_machine/tasks/language/common/corpus/__init__.py
|
12133432
| |
TechBK/horizon-dev
|
refs/heads/master
|
openstack_dashboard/dashboards/logdashboard/detail/__init__.py
|
12133432
| |
talk-to/PjSip-Repo
|
refs/heads/master
|
tests/pjsua/scripts-pesq/201_codec_speex_16000.py
|
42
|
# $Id$
#
from inc_cfg import *
# Call with Speex/16000 codec
test_param = TestParam(
"PESQ codec Speex WB (RX side uses snd dev)",
[
InstanceParam("UA1", "--max-calls=1 --clock-rate 16000 --add-codec speex/16000 --play-file wavs/input.16.wav --null-audio"),
InstanceParam("UA2", "--max-calls=1 --clock-rate 16000 --add-codec speex/16000 --rec-file wavs/tmp.16.wav --auto-answer 200")
]
)
if (HAS_SND_DEV == 0):
test_param.skip = True
pesq_threshold = 3.7
|
jk1/intellij-community
|
refs/heads/master
|
python/helpers/pydev/_pydev_bundle/pydev_localhost.py
|
10
|
from _pydevd_bundle import pydevd_constants
from _pydev_imps._pydev_saved_modules import socket
import sys
IS_JYTHON = sys.platform.find('java') != -1
_cache = None
def get_localhost():
'''
Should return 127.0.0.1 in ipv4 and ::1 in ipv6
localhost is not used because on windows vista/windows 7, there can be issues where the resolving doesn't work
properly and takes a lot of time (had this issue on the pyunit server).
Using the IP directly solves the problem.
'''
# TODO: Needs better investigation!
global _cache
if _cache is None:
try:
for addr_info in socket.getaddrinfo("localhost", 80, 0, 0, socket.SOL_TCP):
config = addr_info[4]
if config[0] == '127.0.0.1':
_cache = '127.0.0.1'
return _cache
except:
# Ok, some versions of Python don't have getaddrinfo or SOL_TCP... Just consider it 127.0.0.1 in this case.
_cache = '127.0.0.1'
else:
_cache = 'localhost'
return _cache
def get_socket_names(n_sockets, close=False):
socket_names = []
sockets = []
for _ in range(n_sockets):
if IS_JYTHON:
# Although the option which would be pure java *should* work for Jython, the socket being returned is still 0
# (i.e.: it doesn't give the local port bound, only the original port, which was 0).
from java.net import ServerSocket
sock = ServerSocket(0)
socket_name = get_localhost(), sock.getLocalPort()
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((get_localhost(), 0))
socket_name = sock.getsockname()
sockets.append(sock)
socket_names.append(socket_name)
if close:
for s in sockets:
s.close()
return socket_names
def get_socket_name(close=False):
return get_socket_names(1, close)[0]
if __name__ == '__main__':
print(get_socket_name())
|
Lightmatter/django-inlineformfield
|
refs/heads/master
|
.tox/py27/lib/python2.7/site-packages/django/template/defaultfilters.py
|
18
|
"""Default variable filters."""
from __future__ import unicode_literals
import re
import random as random_module
from decimal import Decimal, InvalidOperation, Context, ROUND_HALF_UP
from functools import wraps
from pprint import pformat
from django.template.base import Variable, Library, VariableDoesNotExist
from django.conf import settings
from django.utils import formats
from django.utils.dateformat import format, time_format
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import (conditional_escape, escapejs, fix_ampersands,
escape, urlize as _urlize, linebreaks, strip_tags, avoid_wrapping,
remove_tags)
from django.utils.http import urlquote
from django.utils.text import Truncator, wrap, phone2numeric
from django.utils.safestring import mark_safe, SafeData, mark_for_escaping
from django.utils import six
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext, ungettext
from django.utils.text import normalize_newlines, slugify as _slugify
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_text(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encodes characters for use in JavaScript strings."""
return escapejs(value)
@register.filter("fix_ampersands", is_safe=True)
@stringfilter
def fix_ampersands_filter(value):
"""Replaces ampersands with ``&`` entities."""
return fix_ampersands(value)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completely invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) // (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_text(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return ''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_text(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return ''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format('%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal('1.0') / (Decimal(10) ** abs(p))
try:
# Set the precision high enough to avoid an exception, see #15789.
tupl = d.as_tuple()
units = len(tupl[1]) - tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`
# and `exponent` from `Decimal.as_tuple()` directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP,
Context(prec=prec)).as_tuple()
digits = [six.text_type(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append('0')
digits.insert(-exponent, '.')
if sign:
digits.append('-')
number = ''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
except InvalidOperation:
return input_val
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_text(iri_to_uri(value))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=None):
"""Displays text with line numbers."""
lines = value.split('\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = six.text_type(len(six.text_type(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line))
return mark_safe('\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Converts to lowercase, removes non-word characters (alphanumerics and
underscores) and converts spaces to hyphens. Also strips leading and
trailing whitespace.
"""
return _slugify(value)
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return ("%" + six.text_type(arg)) % value
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""
Truncates a string after a certain number of characters.
Argument: Number of characters to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatechars_html(value, arg):
"""
Truncates HTML after a certain number of chars.
Argument: Number of chars to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).chars(length, html=True)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' ...')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' ...')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escapes a value for use in a URL.
Takes an optional ``safe`` parameter used to determine the characters which
should not be escaped by Django's ``urlquote`` method. If not provided, the
default safe characters will be used (but an empty string can be provided
when *all* characters should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return urlquote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=None):
"""Converts URLs in plain text into clickable links."""
return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=None):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(_urlize(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, '')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_for_escaping(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=None):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=None):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_text(obj)) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def removetags(value, tags):
"""Removes a space separated list of [X]HTML tags from the output."""
return remove_tags(value, tags)
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strips all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve, reverse=True)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return ''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=None):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_text, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return ''
@register.filter(is_safe=True)
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=False)
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://www.diveintopython3.net/native-datatypes.html#slicinglists
for an introduction.
"""
try:
bits = []
for x in arg.split(':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=None):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
escaper = lambda x: x
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
try:
# see if second item is iterable
iter(second_item)
except TypeError:
return list_, False
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def _helper(list_, tabs=1):
indent = '\t' * tabs
output = []
list_length = len(list_)
i = 0
while i < list_length:
title = list_[i]
sublist = ''
sublist_item = None
if isinstance(title, (list, tuple)):
sublist_item = title
title = ''
elif i < list_length - 1:
next_item = list_[i + 1]
if next_item and isinstance(next_item, (list, tuple)):
# The next item is a sub-list.
sublist_item = next_item
# We've processed the next item now too.
i += 1
if sublist_item:
sublist = _helper(sublist_item, tabs + 1)
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (indent, sublist,
indent, indent)
output.append('%s<li>%s%s</li>' % (indent,
escaper(force_text(title)), sublist))
i += 1
return '\n'.join(output)
value, converted = convert_old_style_list(value)
return mark_safe(_helper(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Formats a date according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Formats a time according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return time_format(value, arg)
except AttributeError:
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return ''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return ''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Returns True if the value is devisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc).
"""
try:
bytes = float(bytes)
except (TypeError, ValueError, UnicodeDecodeError):
value = ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
return avoid_wrapping(value)
filesize_number_format = lambda value: formats.number_format(round(value, 1), 1)
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
if bytes < KB:
value = ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes}
elif bytes < MB:
value = ugettext("%s KB") % filesize_number_format(bytes / KB)
elif bytes < GB:
value = ugettext("%s MB") % filesize_number_format(bytes / MB)
elif bytes < TB:
value = ugettext("%s GB") % filesize_number_format(bytes / GB)
elif bytes < PB:
value = ugettext("%s TB") % filesize_number_format(bytes / TB)
else:
value = ugettext("%s PB") % filesize_number_format(bytes / PB)
return avoid_wrapping(value)
@register.filter(is_safe=False)
def pluralize(value, arg='s'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if ',' not in arg:
arg = ',' + arg
bits = arg.split(',')
if len(bits) > 2:
return ''
singular_suffix, plural_suffix = bits[:2]
try:
if int(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s" % force_text(e, errors="replace")
|
mdaniel/intellij-community
|
refs/heads/master
|
python/helpers/tests/generator3_tests/data/FileSystemUtil/copy_skeleton_package_replaced/dst/before/foo/bar/baz/__init__.py
|
50
|
version = 1
|
oinopion/django
|
refs/heads/master
|
django/conf/locale/mn/formats.py
|
619
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'g:i A'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
SHORT_DATE_FORMAT = 'j M Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
adrianlee/rcon-cs
|
refs/heads/master
|
rconsoft/rcon/__init__.py
|
6
|
# Read LICENSE for licensing details.
|
allenlavoie/tensorflow
|
refs/heads/master
|
tensorflow/contrib/autograph/utils/testing.py
|
22
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
def fake_tf():
"""Creates a fake module that looks like TensorFlow, for testing."""
mod = imp.new_module('tensorflow')
mod_contents = dict()
mod_contents.update(math_ops.__dict__)
mod_contents.update(ops.__dict__)
mod_contents.update(mod.__dict__)
mod.__dict__.update(mod_contents)
return mod
|
8u1a/plaso
|
refs/heads/master
|
tools/psort_test.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the psort CLI tool."""
import os
import unittest
from plaso.cli.helpers import interface as helpers_interface
from plaso.cli.helpers import manager as helpers_manager
from plaso.lib import errors
from plaso.output import manager as output_manager
from tests import test_lib as shared_test_lib
from tests.cli import test_lib as cli_test_lib
from tests.frontend import psort as psort_test
from tools import psort
from tools import test_lib
class TestInputReader(object):
"""Test input reader."""
def __init__(self):
"""Initialize the reader."""
super(TestInputReader, self).__init__()
self.read_called = False
def Read(self):
"""Mock a read operation by user."""
self.read_called = True
return u'foobar'
class TestOutputModuleArgumentHelper(helpers_interface.ArgumentsHelper):
"""Test argument helper for the test output module."""
NAME = u'test_missing'
@classmethod
def AddArguments(cls, argument_group):
"""Mock the add argument section."""
pass
@classmethod
def ParseOptions(cls, options, output_module):
"""Provide a test parse options section."""
if not isinstance(output_module, TestOutputModuleMissingParameters):
raise errors.BadConfigObject((
u'Output module is not an instance of '
u'TestOutputModuleMissingParameters'))
missing = getattr(options, u'missing', None)
if missing:
output_module.SetMissingValue(u'missing', missing)
parameters = getattr(options, u'parameters', None)
if parameters:
output_module.SetMissingValue(u'parameters', parameters)
class TestOutputModuleMissingParameters(psort_test.TestOutputModule):
"""Test output module that is missing some parameters."""
NAME = u'test_missing'
# For test purpose assign these as class attributes.
missing = None
parameters = None
def GetMissingArguments(self):
"""Return a list of missing parameters."""
missing_parameters = []
if self.missing is None:
missing_parameters.append(u'missing')
if self.parameters is None:
missing_parameters.append(u'parameters')
return missing_parameters
@classmethod
def SetMissingValue(cls, attribute, value):
"""Set missing value."""
setattr(cls, attribute, value)
class PsortToolTest(test_lib.ToolTestCase):
"""Tests for the psort tool."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._input_reader = TestInputReader()
self._output_writer = cli_test_lib.TestOutputWriter(encoding=u'utf-8')
self._test_tool = psort.PsortTool(
input_reader=self._input_reader, output_writer=self._output_writer)
def testListOutputModules(self):
"""Test the listing of output modules."""
self._test_tool.ListOutputModules()
raw_data = self._output_writer.ReadOutput()
# Since the printed output varies depending on which output modules are
# enabled we cannot test the complete string but rather test substrings.
expected_raw_data = (
b'\n'
b'******************************** Output Modules '
b'********************************\n')
self.assertTrue(raw_data.startswith(expected_raw_data))
for name, output_class in output_manager.OutputManager.GetOutputClasses():
expected_string = b'{0:s} : {1:s}'.format(name, output_class.DESCRIPTION)
# Note that the description can be continued on the next line. Therefore
# only the words in the first 80 characters are compared.
expected_string, _, _ = expected_string[0:80].rpartition(b' ')
self.assertTrue(expected_string in raw_data)
def testProcessStorageWithMissingParameters(self):
"""Test the ProcessStorage function with half-configured output module."""
options = cli_test_lib.TestOptions()
options.storage_file = self._GetTestFilePath([u'psort_test.proto.plaso'])
options.output_format = u'test_missing'
output_manager.OutputManager.RegisterOutput(
TestOutputModuleMissingParameters)
helpers_manager.ArgumentHelperManager.RegisterHelper(
TestOutputModuleArgumentHelper)
lines = []
with shared_test_lib.TempDirectory() as temp_directory:
temp_file_name = os.path.join(temp_directory, u'output.txt')
options.write = temp_file_name
self._test_tool.ParseOptions(options)
self._test_tool.ProcessStorage()
with open(temp_file_name, 'rb') as file_object:
for line in file_object:
lines.append(line.strip())
self.assertTrue(self._input_reader.read_called)
self.assertEqual(TestOutputModuleMissingParameters.missing, u'foobar')
self.assertEqual(TestOutputModuleMissingParameters.parameters, u'foobar')
self.assertIn(u'FILE/UNKNOWN ctime OS:syslog', lines)
output_manager.OutputManager.DeregisterOutput(
TestOutputModuleMissingParameters)
helpers_manager.ArgumentHelperManager.DeregisterHelper(
TestOutputModuleArgumentHelper)
if __name__ == '__main__':
unittest.main()
|
frubar/rpi-wheatley
|
refs/heads/master
|
wheatley/admin.py
|
1
|
# -*- coding: UTF-8 -*-
from django.contrib import admin
from wheatley.models import Photo, Location, Tweet, Settings
class SettingsAdmin(admin.ModelAdmin):
list_display = ('id', 'created_at',)
list_display_links = ('id', 'created_at',)
date_hierachy = ('created_at',)
list_filter = ('created_at',)
class PhotoAdmin(admin.ModelAdmin):
list_display = ('id', 'created_at', 'filename', 'location',)
list_display_links = ('id', 'filename')
date_hierachy = ('created_at',)
list_filter = ('created_at', 'location',)
class LocationAdmin(admin.ModelAdmin):
list_display = ('id', 'created_at', 'title',)
list_display_links = ('id', 'title',)
date_hierachy = ('created_at',)
list_filter = ('created_at',)
search_fields = ('title',)
class TweetAdmin(admin.ModelAdmin):
list_display = ('id', 'created_at', '__unicode__', 'location',)
list_display_links = ('id', '__unicode__')
date_hierachy = ('created_at',)
list_filter = ('created_at', 'location',)
admin.site.register(Settings, SettingsAdmin)
admin.site.register(Photo, PhotoAdmin)
admin.site.register(Location, LocationAdmin)
admin.site.register(Tweet, TweetAdmin)
# vim: set ft=python ts=4 sw=4 :
|
kangkot/arangodb
|
refs/heads/devel
|
3rdParty/V8-4.3.61/third_party/python_26/Tools/scripts/objgraph.py
|
96
|
#! /usr/bin/env python
# objgraph
#
# Read "nm -o" input (on IRIX: "nm -Bo") of a set of libraries or modules
# and print various interesting listings, such as:
#
# - which names are used but not defined in the set (and used where),
# - which names are defined in the set (and where),
# - which modules use which other modules,
# - which modules are used by which other modules.
#
# Usage: objgraph [-cdu] [file] ...
# -c: print callers per objectfile
# -d: print callees per objectfile
# -u: print usage of undefined symbols
# If none of -cdu is specified, all are assumed.
# Use "nm -o" to generate the input (on IRIX: "nm -Bo"),
# e.g.: nm -o /lib/libc.a | objgraph
import sys
import os
import getopt
import re
# Types of symbols.
#
definitions = 'TRGDSBAEC'
externals = 'UV'
ignore = 'Nntrgdsbavuc'
# Regular expression to parse "nm -o" output.
#
matcher = re.compile('(.*):\t?........ (.) (.*)$')
# Store "item" in "dict" under "key".
# The dictionary maps keys to lists of items.
# If there is no list for the key yet, it is created.
#
def store(dict, key, item):
if dict.has_key(key):
dict[key].append(item)
else:
dict[key] = [item]
# Return a flattened version of a list of strings: the concatenation
# of its elements with intervening spaces.
#
def flat(list):
s = ''
for item in list:
s = s + ' ' + item
return s[1:]
# Global variables mapping defined/undefined names to files and back.
#
file2undef = {}
def2file = {}
file2def = {}
undef2file = {}
# Read one input file and merge the data into the tables.
# Argument is an open file.
#
def readinput(fp):
while 1:
s = fp.readline()
if not s:
break
# If you get any output from this line,
# it is probably caused by an unexpected input line:
if matcher.search(s) < 0: s; continue # Shouldn't happen
(ra, rb), (r1a, r1b), (r2a, r2b), (r3a, r3b) = matcher.regs[:4]
fn, name, type = s[r1a:r1b], s[r3a:r3b], s[r2a:r2b]
if type in definitions:
store(def2file, name, fn)
store(file2def, fn, name)
elif type in externals:
store(file2undef, fn, name)
store(undef2file, name, fn)
elif not type in ignore:
print fn + ':' + name + ': unknown type ' + type
# Print all names that were undefined in some module and where they are
# defined.
#
def printcallee():
flist = file2undef.keys()
flist.sort()
for filename in flist:
print filename + ':'
elist = file2undef[filename]
elist.sort()
for ext in elist:
if len(ext) >= 8:
tabs = '\t'
else:
tabs = '\t\t'
if not def2file.has_key(ext):
print '\t' + ext + tabs + ' *undefined'
else:
print '\t' + ext + tabs + flat(def2file[ext])
# Print for each module the names of the other modules that use it.
#
def printcaller():
files = file2def.keys()
files.sort()
for filename in files:
callers = []
for label in file2def[filename]:
if undef2file.has_key(label):
callers = callers + undef2file[label]
if callers:
callers.sort()
print filename + ':'
lastfn = ''
for fn in callers:
if fn <> lastfn:
print '\t' + fn
lastfn = fn
else:
print filename + ': unused'
# Print undefined names and where they are used.
#
def printundef():
undefs = {}
for filename in file2undef.keys():
for ext in file2undef[filename]:
if not def2file.has_key(ext):
store(undefs, ext, filename)
elist = undefs.keys()
elist.sort()
for ext in elist:
print ext + ':'
flist = undefs[ext]
flist.sort()
for filename in flist:
print '\t' + filename
# Print warning messages about names defined in more than one file.
#
def warndups():
savestdout = sys.stdout
sys.stdout = sys.stderr
names = def2file.keys()
names.sort()
for name in names:
if len(def2file[name]) > 1:
print 'warning:', name, 'multiply defined:',
print flat(def2file[name])
sys.stdout = savestdout
# Main program
#
def main():
try:
optlist, args = getopt.getopt(sys.argv[1:], 'cdu')
except getopt.error:
sys.stdout = sys.stderr
print 'Usage:', os.path.basename(sys.argv[0]),
print '[-cdu] [file] ...'
print '-c: print callers per objectfile'
print '-d: print callees per objectfile'
print '-u: print usage of undefined symbols'
print 'If none of -cdu is specified, all are assumed.'
print 'Use "nm -o" to generate the input (on IRIX: "nm -Bo"),'
print 'e.g.: nm -o /lib/libc.a | objgraph'
return 1
optu = optc = optd = 0
for opt, void in optlist:
if opt == '-u':
optu = 1
elif opt == '-c':
optc = 1
elif opt == '-d':
optd = 1
if optu == optc == optd == 0:
optu = optc = optd = 1
if not args:
args = ['-']
for filename in args:
if filename == '-':
readinput(sys.stdin)
else:
readinput(open(filename, 'r'))
#
warndups()
#
more = (optu + optc + optd > 1)
if optd:
if more:
print '---------------All callees------------------'
printcallee()
if optu:
if more:
print '---------------Undefined callees------------'
printundef()
if optc:
if more:
print '---------------All Callers------------------'
printcaller()
return 0
# Call the main program.
# Use its return value as exit status.
# Catch interrupts to avoid stack trace.
#
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
|
vikas1885/test1
|
refs/heads/master
|
lms/djangoapps/instructor/features/common.py
|
63
|
"""
Define common steps for instructor dashboard acceptance tests.
"""
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from __future__ import absolute_import
from lettuce import world, step
from mock import patch
from nose.tools import assert_in # pylint: disable=no-name-in-module
from courseware.tests.factories import StaffFactory, InstructorFactory
@step(u'Given I am "([^"]*)" for a very large course')
def make_staff_or_instructor_for_large_course(step, role):
make_large_course(step, role)
@patch.dict('courseware.access.settings.FEATURES', {"MAX_ENROLLMENT_INSTR_BUTTONS": 0})
def make_large_course(step, role):
i_am_staff_or_instructor(step, role)
@step(u'Given I am "([^"]*)" for a course')
def i_am_staff_or_instructor(step, role): # pylint: disable=unused-argument
## In summary: makes a test course, makes a new Staff or Instructor user
## (depending on `role`), and logs that user in to the course
# Store the role
assert_in(role, ['instructor', 'staff'])
# Clear existing courses to avoid conflicts
world.clear_courses()
# Create a new course
course = world.CourseFactory.create(
org='edx',
number='999',
display_name='Test Course'
)
world.course_key = course.id
world.role = 'instructor'
# Log in as the an instructor or staff for the course
if role == 'instructor':
# Make & register an instructor for the course
world.instructor = InstructorFactory(course_key=world.course_key)
world.enroll_user(world.instructor, world.course_key)
world.log_in(
username=world.instructor.username,
password='test',
email=world.instructor.email,
name=world.instructor.profile.name
)
else:
world.role = 'staff'
# Make & register a staff member
world.staff = StaffFactory(course_key=world.course_key)
world.enroll_user(world.staff, world.course_key)
world.log_in(
username=world.staff.username,
password='test',
email=world.staff.email,
name=world.staff.profile.name
)
def go_to_section(section_name):
# section name should be one of
# course_info, membership, student_admin, data_download, analytics, send_email
world.visit(u'/courses/{}'.format(world.course_key))
world.css_click(u'a[href="/courses/{}/instructor"]'.format(world.course_key))
world.css_click('a[data-section="{0}"]'.format(section_name))
@step(u'I click "([^"]*)"')
def click_a_button(step, button): # pylint: disable=unused-argument
if button == "Generate Grade Report":
# Go to the data download section of the instructor dash
go_to_section("data_download")
# Click generate grade report button
world.css_click('input[name="calculate-grades-csv"]')
# Expect to see a message that grade report is being generated
expected_msg = "The grade report is being created." \
" To view the status of the report, see" \
" Pending Instructor Tasks below."
world.wait_for_visible('#report-request-response')
assert_in(
expected_msg, world.css_text('#report-request-response'),
msg="Could not find grade report generation success message."
)
elif button == "Grading Configuration":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="dump-gradeconf"]')
elif button == "List enrolled students' profile information":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="list-profiles"]')
elif button == "Download profile information as a CSV":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="list-profiles-csv"]')
else:
raise ValueError("Unrecognized button option " + button)
@step(u'I visit the "([^"]*)" tab')
def click_a_button(step, tab_name): # pylint: disable=unused-argument
# course_info, membership, student_admin, data_download, analytics, send_email
tab_name_dict = {
'Course Info': 'course_info',
'Membership': 'membership',
'Student Admin': 'student_admin',
'Data Download': 'data_download',
'Analytics': 'analytics',
'Email': 'send_email',
}
go_to_section(tab_name_dict[tab_name])
|
lidiamcfreitas/FenixScheduleMaker
|
refs/heads/master
|
oldFiles/project-env/lib/python2.7/site-packages/wheel/test/simple.dist/simpledist/__init__.py
|
12133432
| |
patrickm/chromium.src
|
refs/heads/nw
|
tools/memory_inspector/memory_inspector/backends/__init__.py
|
12133432
| |
Zahajamaan/Fudulbank
|
refs/heads/master
|
mailer/migrations/__init__.py
|
12133432
| |
open-switch/ops-cli
|
refs/heads/master
|
ops-tests/component/test_vtysh_ct_bgp_router_cli.py
|
1
|
# -*- coding: utf-8 -*-
# (C) Copyright 2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
##########################################################################
"""
OpenSwitch Test for switchd related configurations.
"""
TOPOLOGY = """
# +-------+
# | ops1 |
# +-------+
# Nodes
[type=openswitch name="OpenSwitch 1"] ops1
"""
def verify_bgp_router_table(dut, step):
step("Test to verify BGP router table")
out = dut("show ip bgp summary")
assert "No bgp router configured." in out
def configure_bgp_router_flags(dut, step):
step("Test to configure BGP router flags")
fast_ext_failover_str = "bgp fast-external-failover"
fast_ext_failover_flag = False
log_neighbor_changes_str = "bgp log-neighbor-changes"
log_neighbor_changes_flag = False
dut("configure terminal")
dut("router bgp 100")
dut(fast_ext_failover_str)
dut(log_neighbor_changes_str)
dut("end")
dump = dut("show running-config")
lines = dump.splitlines()
for line in lines:
if fast_ext_failover_str in line:
fast_ext_failover_flag = True
elif log_neighbor_changes_str in line:
log_neighbor_changes_flag = True
if fast_ext_failover_flag is False:
print("### BGP fast-external-failover flag not set ###")
elif log_neighbor_changes_flag is False:
print("### BGP log-neighbor-changes flag not set ###")
if (
fast_ext_failover_flag is False or log_neighbor_changes_flag is False
):
print("### Test to set BGP Router flags-FAILED! ###")
def unconfigure_bgp_router_flags(dut, step):
step("Test to unconfigure BGP router flags")
fast_ext_failover_str = "bgp fast-external-failover"
no_fast_ext_failover_str = "no bgp fast-external-failover"
fast_ext_failover_flag = False
log_neighbor_changes_str = "bgp log-neighbor-changes"
no_log_neighbor_changes_str = "no bgp log-neighbor-changes"
log_neighbor_changes_flag = False
dut("configure terminal")
dut("router bgp 100")
dut(no_fast_ext_failover_str)
dut(no_log_neighbor_changes_str)
dut("end")
dump = dut("show running-config")
lines = dump.splitlines()
for line in lines:
if fast_ext_failover_str in line:
fast_ext_failover_flag = True
elif log_neighbor_changes_str in line:
log_neighbor_changes_flag = True
if fast_ext_failover_flag is True:
print("### BGP fast-external-failover flag is set ###")
elif log_neighbor_changes_flag is True:
print("### BGP log-neighbor-changes flag is set ###")
if (
fast_ext_failover_flag is True or log_neighbor_changes_flag is True
):
print("### Test to unconfigure BGP Router flags-FAILED! ###")
def configure_bgp_network(dut, step):
step("Test to configure BGP network")
network_str = "network 3001::/32"
network_str_flag = False
dut("configure terminal")
dut("router bgp 100")
dut("network 3001::1/32")
dut("end")
dump = dut("show running-config")
lines = dump.splitlines()
for line in lines:
if network_str in line:
network_str_flag = True
assert network_str_flag is True
def unconfigure_bgp_network(dut, step):
step("Test to unconfigure BGP network")
network_str = "network 3001::/32"
network_str_flag = False
dut("configure terminal")
dut("router bgp 100")
dut("no network 3001::1/32")
dut("end")
dump = dut("show running-config")
lines = dump.splitlines()
for line in lines:
if network_str in line:
network_str_flag = True
assert network_str_flag is False
def configure_routemap_match(dut, step):
step("Test to configure Route-Map Match commands")
match_ipv6_prefix_list_str = "match ipv6 address prefix-list 5"
match_ipv6_prefix_list_flag = False
match_community_str = "match community 100"
match_community_str_flag = False
match_extcommunity_str = "match extcommunity e1"
match_extcommunity_str_flag = False
dut("configure terminal")
dut("route-map r1 permit 10")
dut(match_ipv6_prefix_list_str)
dut(match_community_str)
dut(match_extcommunity_str)
dut("end")
dump = dut("show running-config")
lines = dump.splitlines()
for line in lines:
if match_ipv6_prefix_list_str in line:
match_ipv6_prefix_list_flag = True
elif match_community_str in line:
match_community_str_flag = True
elif match_extcommunity_str in line:
match_extcommunity_str_flag = True
if match_ipv6_prefix_list_flag is False:
print("### Error configuring 'match ipv6 address prefix-list' ###")
elif match_community_str_flag is False:
print("### Error configuring 'match community' ###\n")
elif match_extcommunity_str_flag is False:
print("### Error configuring 'match extcommunity' ###\n")
if match_ipv6_prefix_list_flag is False or \
match_community_str_flag is False or \
match_extcommunity_str_flag is False:
print("### Test to configure Route-Map match commands FAILED! ###")
def unconfigure_routemap_match(dut, step):
step("Test to unconfigure Route-Map Match commands")
match_ipv6_prefix_list_str = "match ipv6 address prefix-list 5"
no_match_ipv6_prefix_list_str = "no match ipv6 address prefix-list 5"
match_ipv6_prefix_list_flag = False
match_community_str = "match community 100"
no_match_community_str = "no match community 100"
match_community_str_flag = False
match_extcommunity_str = "match extcommunity e1"
no_match_extcommunity_str = "no match extcommunity e1"
match_extcommunity_str_flag = False
dut("configure terminal")
dut("route-map r1 permit 10")
dut(no_match_ipv6_prefix_list_str)
dut(no_match_community_str)
dut(no_match_extcommunity_str)
dut("end")
dump = dut("show running-config")
lines = dump.split('\n')
for line in lines:
if match_ipv6_prefix_list_str in line:
match_ipv6_prefix_list_flag = True
elif match_community_str in line:
match_community_str_flag = True
elif match_extcommunity_str in line:
match_extcommunity_str_flag = True
if match_ipv6_prefix_list_flag is True:
print("### Error unconfiguring 'match ipv6 address prefix-list' ###")
elif match_community_str_flag is True:
print("### Error unconfiguring 'match community' ###")
elif match_extcommunity_str_flag is True:
print("### Error unconfiguring 'match extcommunity' ###")
if (
match_ipv6_prefix_list_flag is True or
match_community_str_flag is True or
match_extcommunity_str_flag is True
):
print("### Test to unconfigure Route-Map match commands FAILED! ###")
def test_vtysh_ct_bgp_router_cli(topology, step):
ops1 = topology.get("ops1")
assert ops1 is not None
verify_bgp_router_table(ops1, step)
configure_bgp_router_flags(ops1, step)
unconfigure_bgp_router_flags(ops1, step)
configure_bgp_network(ops1, step)
unconfigure_bgp_network(ops1, step)
configure_routemap_match(ops1, step)
unconfigure_routemap_match(ops1, step)
|
kanagasabapathi/python-for-android
|
refs/heads/master
|
python-build/python-libs/python-twitter/setup.py
|
90
|
#!/usr/bin/python2.4
#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''The setup and build script for the python-twitter library.'''
__author__ = 'dewitt@google.com'
__version__ = '0.7-devel'
# The base package metadata to be used by both distutils and setuptools
METADATA = dict(
name = "python-twitter",
version = __version__,
py_modules = ['twitter'],
author='DeWitt Clinton',
author_email='dewitt@google.com',
description='A python wrapper around the Twitter API',
license='Apache License 2.0',
url='http://code.google.com/p/python-twitter/',
keywords='twitter api',
)
# Extra package metadata to be used only if setuptools is installed
SETUPTOOLS_METADATA = dict(
install_requires = ['setuptools', 'simplejson'],
include_package_data = True,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
],
test_suite = 'twitter_test.suite',
)
def Read(file):
return open(file).read()
def BuildLongDescription():
return '\n'.join([Read('README'), Read('CHANGES')])
def Main():
# Build the long_description from the README and CHANGES
METADATA['long_description'] = BuildLongDescription()
# Use setuptools if available, otherwise fallback and use distutils
try:
import setuptools
METADATA.update(SETUPTOOLS_METADATA)
setuptools.setup(**METADATA)
except ImportError:
import distutils.core
distutils.core.setup(**METADATA)
if __name__ == '__main__':
Main()
|
shyamalschandra/scikit-learn
|
refs/heads/master
|
examples/covariance/plot_robust_vs_empirical_covariance.py
|
73
|
r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
|
mgorny/PyGithub
|
refs/heads/master
|
tests/IssueEvent.py
|
2
|
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Simon <spam@esemi.ru> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import datetime
from . import Framework
class IssueEvent(Framework.TestCase):
def setUp(self):
super().setUp()
repo = self.g.get_repo("PyGithub/PyGithub", lazy=True)
# From Issue #30
self.event_subscribed = repo.get_issues_event(16347479)
self.event_assigned = repo.get_issues_event(16347480)
self.event_referenced = repo.get_issues_event(16348656)
self.event_closed = repo.get_issues_event(16351220)
self.event_labeled = repo.get_issues_event(98136337)
# From Issue 538
self.event_mentioned = repo.get_issues_event(1009034767)
self.event_merged = repo.get_issues_event(1015402964)
self.event_review_requested = repo.get_issues_event(1011101309)
# From Issue 857
self.event_reopened = repo.get_issues_event(1782463023)
self.event_unassigned = repo.get_issues_event(1782463379)
self.event_unlabeled = repo.get_issues_event(1782463917)
self.event_renamed = repo.get_issues_event(1782472556)
self.event_base_ref_changed = repo.get_issues_event(1782915693)
self.event_head_ref_deleted = repo.get_issues_event(1782917185)
self.event_head_ref_restored = repo.get_issues_event(1782917299)
self.event_milestoned = repo.get_issues_event(1783596418)
self.event_demilestoned = repo.get_issues_event(1783596452)
self.event_locked = repo.get_issues_event(1783596743)
self.event_unlocked = repo.get_issues_event(1783596818)
self.event_review_dismissed = repo.get_issues_event(1783605084)
self.event_review_request_removed = repo.get_issues_event(1783779835)
self.event_marked_as_duplicate = repo.get_issues_event(1783779725)
self.event_unmarked_as_duplicate = repo.get_issues_event(1789228962)
self.event_added_to_project = repo.get_issues_event(1791766828)
self.event_moved_columns_in_project = repo.get_issues_event(1791767766)
self.event_removed_from_project = repo.get_issues_event(1791768212)
# From Issue 866
self.event_converted_note_to_issue = repo.get_issues_event(1791769149)
def testEvent_subscribed_Attributes(self):
self.assertEqual(self.event_subscribed.actor.login, "jacquev6")
self.assertEqual(self.event_subscribed.commit_id, None)
self.assertEqual(
self.event_subscribed.created_at, datetime.datetime(2012, 5, 27, 5, 40, 15)
)
self.assertEqual(self.event_subscribed.event, "subscribed")
self.assertEqual(self.event_subscribed.id, 16347479)
self.assertEqual(self.event_subscribed.issue.number, 30)
self.assertEqual(
self.event_subscribed.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/16347479",
)
self.assertEqual(
self.event_subscribed.node_id, "MDE1OlN1YnNjcmliZWRFdmVudDE2MzQ3NDc5"
)
self.assertEqual(self.event_subscribed.commit_url, None)
self.assertEqual(self.event_subscribed.label, None)
self.assertEqual(self.event_subscribed.assignee, None)
self.assertEqual(self.event_subscribed.assigner, None)
self.assertEqual(self.event_subscribed.review_requester, None)
self.assertEqual(self.event_subscribed.requested_reviewer, None)
self.assertEqual(self.event_subscribed.milestone, None)
self.assertEqual(self.event_subscribed.rename, None)
self.assertEqual(self.event_subscribed.dismissed_review, None)
self.assertEqual(self.event_subscribed.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_subscribed.__repr__(), "IssueEvent(id=16347479)")
def testEvent_assigned_Attributes(self):
self.assertEqual(self.event_assigned.actor.login, "jacquev6")
self.assertEqual(self.event_assigned.commit_id, None)
self.assertEqual(
self.event_assigned.created_at, datetime.datetime(2012, 5, 27, 5, 40, 15)
)
self.assertEqual(self.event_assigned.event, "assigned")
self.assertEqual(self.event_assigned.id, 16347480)
self.assertEqual(self.event_assigned.issue.number, 30)
self.assertEqual(
self.event_assigned.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/16347480",
)
self.assertEqual(
self.event_assigned.node_id, "MDEzOkFzc2lnbmVkRXZlbnQxNjM0NzQ4MA=="
)
self.assertEqual(self.event_assigned.commit_url, None)
self.assertEqual(self.event_assigned.label, None)
self.assertEqual(self.event_assigned.assignee.login, "jacquev6")
self.assertEqual(self.event_assigned.assigner.login, "ghost")
self.assertEqual(self.event_assigned.review_requester, None)
self.assertEqual(self.event_assigned.requested_reviewer, None)
self.assertEqual(self.event_assigned.milestone, None)
self.assertEqual(self.event_assigned.rename, None)
self.assertEqual(self.event_assigned.dismissed_review, None)
self.assertEqual(self.event_assigned.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_assigned.__repr__(), "IssueEvent(id=16347480)")
def testEvent_referenced_Attributes(self):
self.assertEqual(self.event_referenced.actor.login, "jacquev6")
self.assertEqual(
self.event_referenced.commit_id, "ed866fc43833802ab553e5ff8581c81bb00dd433"
)
self.assertEqual(
self.event_referenced.created_at, datetime.datetime(2012, 5, 27, 7, 29, 25)
)
self.assertEqual(self.event_referenced.event, "referenced")
self.assertEqual(self.event_referenced.id, 16348656)
self.assertEqual(self.event_referenced.issue.number, 30)
self.assertEqual(
self.event_referenced.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/16348656",
)
self.assertEqual(
self.event_referenced.node_id, "MDE1OlJlZmVyZW5jZWRFdmVudDE2MzQ4NjU2"
)
self.assertEqual(
self.event_referenced.commit_url,
"https://api.github.com/repos/PyGithub/PyGithub/commits/ed866fc43833802ab553e5ff8581c81bb00dd433",
)
self.assertEqual(self.event_referenced.label, None)
self.assertEqual(self.event_referenced.assignee, None)
self.assertEqual(self.event_referenced.assigner, None)
self.assertEqual(self.event_referenced.review_requester, None)
self.assertEqual(self.event_referenced.requested_reviewer, None)
self.assertEqual(self.event_referenced.milestone, None)
self.assertEqual(self.event_referenced.rename, None)
self.assertEqual(self.event_referenced.dismissed_review, None)
self.assertEqual(self.event_referenced.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_referenced.__repr__(), "IssueEvent(id=16348656)")
def testEvent_closed_Attributes(self):
self.assertEqual(self.event_closed.actor.login, "jacquev6")
self.assertEqual(self.event_closed.commit_id, None)
self.assertEqual(
self.event_closed.created_at, datetime.datetime(2012, 5, 27, 11, 4, 25)
)
self.assertEqual(self.event_closed.event, "closed")
self.assertEqual(self.event_closed.id, 16351220)
self.assertEqual(self.event_closed.issue.number, 30)
self.assertEqual(
self.event_closed.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/16351220",
)
self.assertEqual(self.event_closed.node_id, "MDExOkNsb3NlZEV2ZW50MTYzNTEyMjA=")
self.assertEqual(self.event_closed.commit_url, None)
self.assertEqual(self.event_closed.label, None)
self.assertEqual(self.event_closed.assignee, None)
self.assertEqual(self.event_closed.assigner, None)
self.assertEqual(self.event_closed.review_requester, None)
self.assertEqual(self.event_closed.requested_reviewer, None)
self.assertEqual(self.event_closed.milestone, None)
self.assertEqual(self.event_closed.rename, None)
self.assertEqual(self.event_closed.dismissed_review, None)
self.assertEqual(self.event_closed.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_closed.__repr__(), "IssueEvent(id=16351220)")
def testEvent_labeled_Attributes(self):
self.assertEqual(self.event_labeled.actor.login, "jacquev6")
self.assertEqual(self.event_labeled.commit_id, None)
self.assertEqual(
self.event_labeled.created_at, datetime.datetime(2014, 3, 2, 18, 55, 10)
)
self.assertEqual(self.event_labeled.event, "labeled")
self.assertEqual(self.event_labeled.id, 98136337)
self.assertEqual(self.event_labeled.issue.number, 30)
self.assertEqual(
self.event_labeled.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/98136337",
)
self.assertEqual(self.event_labeled.node_id, "MDEyOkxhYmVsZWRFdmVudDk4MTM2MzM3")
self.assertEqual(self.event_labeled.commit_url, None)
self.assertEqual(self.event_labeled.label.name, "v1")
self.assertEqual(self.event_labeled.assignee, None)
self.assertEqual(self.event_labeled.assigner, None)
self.assertEqual(self.event_labeled.review_requester, None)
self.assertEqual(self.event_labeled.requested_reviewer, None)
self.assertEqual(self.event_labeled.milestone, None)
self.assertEqual(self.event_labeled.rename, None)
self.assertEqual(self.event_labeled.dismissed_review, None)
self.assertEqual(self.event_labeled.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_labeled.__repr__(), "IssueEvent(id=98136337)")
def testEvent_mentioned_Attributes(self):
self.assertEqual(self.event_mentioned.actor.login, "jzelinskie")
self.assertEqual(self.event_mentioned.commit_id, None)
self.assertEqual(
self.event_mentioned.created_at, datetime.datetime(2017, 3, 21, 17, 30, 14)
)
self.assertEqual(self.event_mentioned.event, "mentioned")
self.assertEqual(self.event_mentioned.id, 1009034767)
self.assertEqual(self.event_mentioned.issue.number, 538)
self.assertEqual(
self.event_mentioned.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1009034767",
)
self.assertEqual(
self.event_mentioned.node_id, "MDE0Ok1lbnRpb25lZEV2ZW50MTAwOTAzNDc2Nw=="
)
self.assertEqual(self.event_mentioned.commit_url, None)
self.assertEqual(self.event_mentioned.label, None)
self.assertEqual(self.event_mentioned.assignee, None)
self.assertEqual(self.event_mentioned.assigner, None)
self.assertEqual(self.event_mentioned.review_requester, None)
self.assertEqual(self.event_mentioned.requested_reviewer, None)
self.assertEqual(self.event_mentioned.milestone, None)
self.assertEqual(self.event_mentioned.rename, None)
self.assertEqual(self.event_mentioned.dismissed_review, None)
self.assertEqual(self.event_mentioned.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_mentioned.__repr__(), "IssueEvent(id=1009034767)")
def testEvent_merged_Attributes(self):
self.assertEqual(self.event_merged.actor.login, "jzelinskie")
self.assertEqual(
self.event_merged.commit_id, "2525515b094d7425f7018eb5b66171e21c5fbc10"
)
self.assertEqual(
self.event_merged.created_at, datetime.datetime(2017, 3, 25, 16, 52, 49)
)
self.assertEqual(self.event_merged.event, "merged")
self.assertEqual(self.event_merged.id, 1015402964)
self.assertEqual(self.event_merged.issue.number, 538)
self.assertEqual(
self.event_merged.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1015402964",
)
self.assertEqual(
self.event_merged.node_id, "MDExOk1lcmdlZEV2ZW50MTAxNTQwMjk2NA=="
)
self.assertEqual(
self.event_merged.commit_url,
"https://api.github.com/repos/PyGithub/PyGithub/commits/2525515b094d7425f7018eb5b66171e21c5fbc10",
)
self.assertEqual(self.event_merged.label, None)
self.assertEqual(self.event_merged.assignee, None)
self.assertEqual(self.event_merged.assigner, None)
self.assertEqual(self.event_merged.review_requester, None)
self.assertEqual(self.event_merged.requested_reviewer, None)
self.assertEqual(self.event_merged.milestone, None)
self.assertEqual(self.event_merged.rename, None)
self.assertEqual(self.event_merged.dismissed_review, None)
self.assertEqual(self.event_merged.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_merged.__repr__(), "IssueEvent(id=1015402964)")
def testEvent_review_requested_Attributes(self):
self.assertEqual(self.event_review_requested.actor.login, "jzelinskie")
self.assertEqual(self.event_review_requested.commit_id, None)
self.assertEqual(
self.event_review_requested.created_at,
datetime.datetime(2017, 3, 22, 19, 6, 44),
)
self.assertEqual(self.event_review_requested.event, "review_requested")
self.assertEqual(self.event_review_requested.id, 1011101309)
self.assertEqual(self.event_review_requested.issue.number, 538)
self.assertEqual(
self.event_review_requested.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1011101309",
)
self.assertEqual(
self.event_review_requested.node_id,
"MDIwOlJldmlld1JlcXVlc3RlZEV2ZW50MTAxMTEwMTMwOQ==",
)
self.assertEqual(self.event_review_requested.commit_url, None)
self.assertEqual(self.event_review_requested.label, None)
self.assertEqual(self.event_review_requested.assignee, None)
self.assertEqual(self.event_review_requested.assigner, None)
self.assertEqual(
self.event_review_requested.review_requester.login, "jzelinskie"
)
self.assertEqual(
self.event_review_requested.requested_reviewer.login, "jzelinskie"
)
self.assertEqual(self.event_review_requested.milestone, None)
self.assertEqual(self.event_review_requested.rename, None)
self.assertEqual(self.event_review_requested.dismissed_review, None)
self.assertEqual(self.event_review_requested.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(
self.event_review_requested.__repr__(), "IssueEvent(id=1011101309)"
)
def testEvent_reopened_Attributes(self):
self.assertEqual(self.event_reopened.actor.login, "sfdye")
self.assertEqual(self.event_reopened.commit_id, None)
self.assertEqual(
self.event_reopened.created_at, datetime.datetime(2018, 8, 10, 13, 10, 9)
)
self.assertEqual(self.event_reopened.event, "reopened")
self.assertEqual(self.event_reopened.id, 1782463023)
self.assertEqual(self.event_reopened.issue.number, 857)
self.assertEqual(
self.event_reopened.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782463023",
)
self.assertEqual(
self.event_reopened.node_id, "MDEzOlJlb3BlbmVkRXZlbnQxNzgyNDYzMDIz"
)
self.assertEqual(self.event_reopened.commit_url, None)
self.assertEqual(self.event_reopened.label, None)
self.assertEqual(self.event_reopened.assignee, None)
self.assertEqual(self.event_reopened.assigner, None)
self.assertEqual(self.event_reopened.review_requester, None)
self.assertEqual(self.event_reopened.requested_reviewer, None)
self.assertEqual(self.event_reopened.milestone, None)
self.assertEqual(self.event_reopened.rename, None)
self.assertEqual(self.event_reopened.dismissed_review, None)
self.assertEqual(self.event_reopened.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_reopened.__repr__(), "IssueEvent(id=1782463023)")
def testEvent_unassigned_Attributes(self):
self.assertEqual(self.event_unassigned.actor.login, "sfdye")
self.assertEqual(self.event_unassigned.commit_id, None)
self.assertEqual(
self.event_unassigned.created_at, datetime.datetime(2018, 8, 10, 13, 10, 21)
)
self.assertEqual(self.event_unassigned.event, "unassigned")
self.assertEqual(self.event_unassigned.id, 1782463379)
self.assertEqual(self.event_unassigned.issue.number, 857)
self.assertEqual(
self.event_unassigned.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782463379",
)
self.assertEqual(
self.event_unassigned.node_id, "MDE1OlVuYXNzaWduZWRFdmVudDE3ODI0NjMzNzk="
)
self.assertEqual(self.event_unassigned.commit_url, None)
self.assertEqual(self.event_unassigned.label, None)
self.assertEqual(self.event_unassigned.actor.login, "sfdye")
self.assertEqual(self.event_unassigned.actor.login, "sfdye")
self.assertEqual(self.event_unassigned.review_requester, None)
self.assertEqual(self.event_unassigned.requested_reviewer, None)
self.assertEqual(self.event_unassigned.milestone, None)
self.assertEqual(self.event_unassigned.rename, None)
self.assertEqual(self.event_unassigned.dismissed_review, None)
self.assertEqual(self.event_unassigned.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_unassigned.__repr__(), "IssueEvent(id=1782463379)")
def testEvent_unlabeled_Attributes(self):
self.assertEqual(self.event_unlabeled.actor.login, "sfdye")
self.assertEqual(self.event_unlabeled.commit_id, None)
self.assertEqual(
self.event_unlabeled.created_at, datetime.datetime(2018, 8, 10, 13, 10, 38)
)
self.assertEqual(self.event_unlabeled.event, "unlabeled")
self.assertEqual(self.event_unlabeled.id, 1782463917)
self.assertEqual(self.event_unlabeled.issue.number, 857)
self.assertEqual(
self.event_unlabeled.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782463917",
)
self.assertEqual(
self.event_unlabeled.node_id, "MDE0OlVubGFiZWxlZEV2ZW50MTc4MjQ2MzkxNw=="
)
self.assertEqual(self.event_unlabeled.commit_url, None)
self.assertEqual(self.event_unlabeled.label.name, "improvement")
self.assertEqual(self.event_unlabeled.assignee, None)
self.assertEqual(self.event_unlabeled.assigner, None)
self.assertEqual(self.event_unlabeled.review_requester, None)
self.assertEqual(self.event_unlabeled.requested_reviewer, None)
self.assertEqual(self.event_unlabeled.milestone, None)
self.assertEqual(self.event_unlabeled.rename, None)
self.assertEqual(self.event_unlabeled.dismissed_review, None)
self.assertEqual(self.event_unlabeled.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_unlabeled.__repr__(), "IssueEvent(id=1782463917)")
def testEvent_renamed_Attributes(self):
self.assertEqual(self.event_renamed.actor.login, "sfdye")
self.assertEqual(self.event_renamed.commit_id, None)
self.assertEqual(
self.event_renamed.created_at, datetime.datetime(2018, 8, 10, 13, 15, 18)
)
self.assertEqual(self.event_renamed.event, "renamed")
self.assertEqual(self.event_renamed.id, 1782472556)
self.assertEqual(self.event_renamed.issue.number, 857)
self.assertEqual(
self.event_renamed.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782472556",
)
self.assertEqual(
self.event_renamed.node_id, "MDE3OlJlbmFtZWRUaXRsZUV2ZW50MTc4MjQ3MjU1Ng=="
)
self.assertEqual(self.event_renamed.commit_url, None)
self.assertEqual(self.event_renamed.label, None)
self.assertEqual(self.event_renamed.assignee, None)
self.assertEqual(self.event_renamed.assigner, None)
self.assertEqual(self.event_renamed.review_requester, None)
self.assertEqual(self.event_renamed.requested_reviewer, None)
self.assertEqual(self.event_renamed.milestone, None)
self.assertEqual(
self.event_renamed.rename,
{
u"to": u"Adding new attributes to IssueEvent",
u"from": u"Adding new attributes to IssueEvent Object (DO NOT MERGE - SEE NOTES)",
},
)
self.assertEqual(self.event_renamed.dismissed_review, None)
self.assertEqual(self.event_renamed.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_renamed.__repr__(), "IssueEvent(id=1782472556)")
def testEvent_base_ref_changed_Attributes(self):
self.assertEqual(self.event_base_ref_changed.actor.login, "allevin")
self.assertEqual(self.event_base_ref_changed.commit_id, None)
self.assertEqual(
self.event_base_ref_changed.created_at,
datetime.datetime(2018, 8, 10, 16, 38, 22),
)
self.assertEqual(self.event_base_ref_changed.event, "base_ref_changed")
self.assertEqual(self.event_base_ref_changed.id, 1782915693)
self.assertEqual(self.event_base_ref_changed.issue.number, 857)
self.assertEqual(
self.event_base_ref_changed.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782915693",
)
self.assertEqual(
self.event_base_ref_changed.node_id,
"MDE5OkJhc2VSZWZDaGFuZ2VkRXZlbnQxNzgyOTE1Njkz",
)
self.assertEqual(self.event_base_ref_changed.commit_url, None)
self.assertEqual(self.event_base_ref_changed.label, None)
self.assertEqual(self.event_base_ref_changed.assignee, None)
self.assertEqual(self.event_base_ref_changed.assigner, None)
self.assertEqual(self.event_base_ref_changed.review_requester, None)
self.assertEqual(self.event_base_ref_changed.requested_reviewer, None)
self.assertEqual(self.event_base_ref_changed.milestone, None)
self.assertEqual(self.event_head_ref_deleted.rename, None)
self.assertEqual(self.event_base_ref_changed.dismissed_review, None)
self.assertEqual(self.event_base_ref_changed.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(
self.event_base_ref_changed.__repr__(), "IssueEvent(id=1782915693)"
)
def testEvent_head_ref_deleted_Attributes(self):
self.assertEqual(self.event_head_ref_deleted.actor.login, "allevin")
self.assertEqual(self.event_head_ref_deleted.commit_id, None)
self.assertEqual(
self.event_head_ref_deleted.created_at,
datetime.datetime(2018, 8, 10, 16, 39, 20),
)
self.assertEqual(self.event_head_ref_deleted.event, "head_ref_deleted")
self.assertEqual(self.event_head_ref_deleted.id, 1782917185)
self.assertEqual(self.event_head_ref_deleted.issue.number, 857)
self.assertEqual(
self.event_head_ref_deleted.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782917185",
)
self.assertEqual(
self.event_head_ref_deleted.node_id,
"MDE5OkhlYWRSZWZEZWxldGVkRXZlbnQxNzgyOTE3MTg1",
)
self.assertEqual(self.event_head_ref_deleted.commit_url, None)
self.assertEqual(self.event_head_ref_deleted.label, None)
self.assertEqual(self.event_head_ref_deleted.assignee, None)
self.assertEqual(self.event_head_ref_deleted.assigner, None)
self.assertEqual(self.event_head_ref_deleted.review_requester, None)
self.assertEqual(self.event_head_ref_deleted.requested_reviewer, None)
self.assertEqual(self.event_head_ref_deleted.milestone, None)
self.assertEqual(self.event_head_ref_deleted.rename, None)
self.assertEqual(self.event_head_ref_deleted.dismissed_review, None)
self.assertEqual(self.event_head_ref_deleted.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(
self.event_head_ref_deleted.__repr__(), "IssueEvent(id=1782917185)"
)
def testEvent_head_ref_restored_Attributes(self):
self.assertEqual(self.event_head_ref_restored.actor.login, "allevin")
self.assertEqual(self.event_head_ref_restored.commit_id, None)
self.assertEqual(
self.event_head_ref_restored.created_at,
datetime.datetime(2018, 8, 10, 16, 39, 23),
)
self.assertEqual(self.event_head_ref_restored.event, "head_ref_restored")
self.assertEqual(self.event_head_ref_restored.id, 1782917299)
self.assertEqual(self.event_head_ref_restored.issue.number, 857)
self.assertEqual(
self.event_head_ref_restored.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782917299",
)
self.assertEqual(
self.event_head_ref_restored.node_id,
"MDIwOkhlYWRSZWZSZXN0b3JlZEV2ZW50MTc4MjkxNzI5OQ==",
)
self.assertEqual(self.event_head_ref_restored.commit_url, None)
self.assertEqual(self.event_head_ref_restored.label, None)
self.assertEqual(self.event_head_ref_restored.assignee, None)
self.assertEqual(self.event_head_ref_restored.assigner, None)
self.assertEqual(self.event_head_ref_restored.review_requester, None)
self.assertEqual(self.event_head_ref_restored.requested_reviewer, None)
self.assertEqual(self.event_head_ref_restored.milestone, None)
self.assertEqual(self.event_head_ref_deleted.rename, None)
self.assertEqual(self.event_head_ref_restored.dismissed_review, None)
self.assertEqual(self.event_head_ref_deleted.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(
self.event_head_ref_restored.__repr__(), "IssueEvent(id=1782917299)"
)
def testEvent_milestoned_Attributes(self):
self.assertEqual(self.event_milestoned.actor.login, "sfdye")
self.assertEqual(self.event_milestoned.commit_id, None)
self.assertEqual(
self.event_milestoned.created_at, datetime.datetime(2018, 8, 11, 0, 46, 19)
)
self.assertEqual(self.event_milestoned.event, "milestoned")
self.assertEqual(self.event_milestoned.id, 1783596418)
self.assertEqual(self.event_milestoned.issue.number, 857)
self.assertEqual(
self.event_milestoned.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783596418",
)
self.assertEqual(
self.event_milestoned.node_id, "MDE1Ok1pbGVzdG9uZWRFdmVudDE3ODM1OTY0MTg="
)
self.assertEqual(self.event_milestoned.commit_url, None)
self.assertEqual(self.event_milestoned.label, None)
self.assertEqual(self.event_milestoned.assignee, None)
self.assertEqual(self.event_milestoned.assigner, None)
self.assertEqual(self.event_milestoned.review_requester, None)
self.assertEqual(self.event_milestoned.requested_reviewer, None)
self.assertEqual(self.event_milestoned.milestone.title, "Version 1.25.0")
self.assertEqual(self.event_milestoned.rename, None)
self.assertEqual(self.event_milestoned.dismissed_review, None)
self.assertEqual(self.event_milestoned.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_milestoned.__repr__(), "IssueEvent(id=1783596418)")
def testEvent_demilestoned_Attributes(self):
self.assertEqual(self.event_demilestoned.actor.login, "sfdye")
self.assertEqual(self.event_demilestoned.commit_id, None)
self.assertEqual(
self.event_demilestoned.created_at,
datetime.datetime(2018, 8, 11, 0, 46, 22),
)
self.assertEqual(self.event_demilestoned.event, "demilestoned")
self.assertEqual(self.event_demilestoned.id, 1783596452)
self.assertEqual(self.event_demilestoned.issue.number, 857)
self.assertEqual(
self.event_demilestoned.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783596452",
)
self.assertEqual(
self.event_demilestoned.node_id,
"MDE3OkRlbWlsZXN0b25lZEV2ZW50MTc4MzU5NjQ1Mg==",
)
self.assertEqual(self.event_demilestoned.commit_url, None)
self.assertEqual(self.event_demilestoned.label, None)
self.assertEqual(self.event_demilestoned.assignee, None)
self.assertEqual(self.event_demilestoned.assigner, None)
self.assertEqual(self.event_demilestoned.review_requester, None)
self.assertEqual(self.event_demilestoned.requested_reviewer, None)
self.assertEqual(self.event_demilestoned.milestone.title, "Version 1.25.0")
self.assertEqual(self.event_demilestoned.rename, None)
self.assertEqual(self.event_demilestoned.dismissed_review, None)
self.assertEqual(self.event_demilestoned.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(
self.event_demilestoned.__repr__(), "IssueEvent(id=1783596452)"
)
def testEvent_locked_Attributes(self):
self.assertEqual(self.event_locked.actor.login, "PyGithub")
self.assertEqual(self.event_locked.commit_id, None)
self.assertEqual(
self.event_locked.created_at, datetime.datetime(2018, 8, 11, 0, 46, 56)
)
self.assertEqual(self.event_locked.event, "locked")
self.assertEqual(self.event_locked.id, 1783596743)
self.assertEqual(self.event_locked.issue.number, 857)
self.assertEqual(
self.event_locked.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783596743",
)
self.assertEqual(
self.event_locked.node_id, "MDExOkxvY2tlZEV2ZW50MTc4MzU5Njc0Mw=="
)
self.assertEqual(self.event_locked.commit_url, None)
self.assertEqual(self.event_locked.label, None)
self.assertEqual(self.event_locked.assignee, None)
self.assertEqual(self.event_locked.assigner, None)
self.assertEqual(self.event_locked.review_requester, None)
self.assertEqual(self.event_locked.requested_reviewer, None)
self.assertEqual(self.event_locked.milestone, None)
self.assertEqual(self.event_locked.rename, None)
self.assertEqual(self.event_locked.dismissed_review, None)
self.assertEqual(self.event_locked.lock_reason, "too heated")
### # test __repr__() based on this attributes
self.assertEqual(self.event_locked.__repr__(), "IssueEvent(id=1783596743)")
def testEvent_unlocked_Attributes(self):
self.assertEqual(self.event_unlocked.actor.login, "PyGithub")
self.assertEqual(self.event_unlocked.commit_id, None)
self.assertEqual(
self.event_unlocked.created_at, datetime.datetime(2018, 8, 11, 0, 47, 7)
)
self.assertEqual(self.event_unlocked.event, "unlocked")
self.assertEqual(self.event_unlocked.id, 1783596818)
self.assertEqual(self.event_unlocked.issue.number, 857)
self.assertEqual(
self.event_unlocked.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783596818",
)
self.assertEqual(
self.event_unlocked.node_id, "MDEzOlVubG9ja2VkRXZlbnQxNzgzNTk2ODE4"
)
self.assertEqual(self.event_unlocked.commit_url, None)
self.assertEqual(self.event_unlocked.label, None)
self.assertEqual(self.event_unlocked.assignee, None)
self.assertEqual(self.event_unlocked.assigner, None)
self.assertEqual(self.event_unlocked.review_requester, None)
self.assertEqual(self.event_unlocked.requested_reviewer, None)
self.assertEqual(self.event_unlocked.milestone, None)
self.assertEqual(self.event_unlocked.rename, None)
self.assertEqual(self.event_unlocked.dismissed_review, None)
self.assertEqual(self.event_unlocked.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_unlocked.__repr__(), "IssueEvent(id=1783596818)")
def testEvent_review_dismissed_Attributes(self):
self.assertEqual(self.event_review_dismissed.actor.login, "sfdye")
self.assertEqual(self.event_review_dismissed.commit_id, None)
self.assertEqual(
self.event_review_dismissed.created_at,
datetime.datetime(2018, 8, 11, 1, 7, 10),
)
self.assertEqual(self.event_review_dismissed.event, "review_dismissed")
self.assertEqual(self.event_review_dismissed.id, 1783605084)
self.assertEqual(self.event_review_dismissed.issue.number, 857)
self.assertEqual(
self.event_review_dismissed.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783605084",
)
self.assertEqual(
self.event_review_dismissed.node_id,
"MDIwOlJldmlld0Rpc21pc3NlZEV2ZW50MTc4MzYwNTA4NA==",
)
self.assertEqual(self.event_review_dismissed.commit_url, None)
self.assertEqual(self.event_review_dismissed.label, None)
self.assertEqual(self.event_review_dismissed.assignee, None)
self.assertEqual(self.event_review_dismissed.assigner, None)
self.assertEqual(self.event_review_dismissed.review_requester, None)
self.assertEqual(self.event_review_dismissed.requested_reviewer, None)
self.assertEqual(self.event_review_dismissed.milestone, None)
self.assertEqual(self.event_review_dismissed.rename, None)
self.assertEqual(
self.event_review_dismissed.dismissed_review,
{
u"dismissal_message": u"dismiss",
u"state": u"changes_requested",
u"review_id": 145431295,
},
)
self.assertEqual(self.event_review_dismissed.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(
self.event_review_dismissed.__repr__(), "IssueEvent(id=1783605084)"
)
def testEvent_review_request_removed_Attributes(self):
self.assertEqual(self.event_review_request_removed.actor.login, "sfdye")
self.assertEqual(self.event_review_request_removed.commit_id, None)
self.assertEqual(
self.event_review_request_removed.created_at,
datetime.datetime(2018, 8, 11, 12, 32, 59),
)
self.assertEqual(
self.event_review_request_removed.event, "review_request_removed"
)
self.assertEqual(self.event_review_request_removed.id, 1783779835)
self.assertEqual(self.event_review_request_removed.issue.number, 857)
self.assertEqual(
self.event_review_request_removed.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783779835",
)
self.assertEqual(
self.event_review_request_removed.node_id,
"MDI1OlJldmlld1JlcXVlc3RSZW1vdmVkRXZlbnQxNzgzNzc5ODM1",
)
self.assertEqual(self.event_review_request_removed.commit_url, None)
self.assertEqual(self.event_review_request_removed.label, None)
self.assertEqual(self.event_review_request_removed.assignee, None)
self.assertEqual(self.event_review_request_removed.assigner, None)
self.assertEqual(
self.event_review_request_removed.review_requester.login, "sfdye"
)
self.assertEqual(
self.event_review_request_removed.requested_reviewer.login, "jasonwhite"
)
self.assertEqual(self.event_review_request_removed.milestone, None)
self.assertEqual(self.event_review_request_removed.rename, None)
self.assertEqual(self.event_review_request_removed.dismissed_review, None)
self.assertEqual(self.event_review_request_removed.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(
self.event_review_request_removed.__repr__(), "IssueEvent(id=1783779835)"
)
def testEvent_marked_as_duplicate_Attributes(self):
self.assertEqual(self.event_marked_as_duplicate.actor.login, "sfdye")
self.assertEqual(self.event_marked_as_duplicate.commit_id, None)
self.assertEqual(
self.event_marked_as_duplicate.created_at,
datetime.datetime(2018, 8, 11, 12, 32, 35),
)
self.assertEqual(self.event_marked_as_duplicate.event, "marked_as_duplicate")
self.assertEqual(self.event_marked_as_duplicate.id, 1783779725)
self.assertEqual(self.event_marked_as_duplicate.issue.number, 857)
self.assertEqual(
self.event_marked_as_duplicate.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783779725",
)
self.assertEqual(
self.event_marked_as_duplicate.node_id,
"MDIyOk1hcmtlZEFzRHVwbGljYXRlRXZlbnQxNzgzNzc5NzI1",
)
self.assertEqual(self.event_marked_as_duplicate.commit_url, None)
self.assertEqual(self.event_marked_as_duplicate.label, None)
self.assertEqual(self.event_marked_as_duplicate.assignee, None)
self.assertEqual(self.event_marked_as_duplicate.assigner, None)
self.assertEqual(self.event_marked_as_duplicate.review_requester, None)
self.assertEqual(self.event_marked_as_duplicate.requested_reviewer, None)
self.assertEqual(self.event_marked_as_duplicate.milestone, None)
self.assertEqual(self.event_marked_as_duplicate.rename, None)
self.assertEqual(self.event_marked_as_duplicate.dismissed_review, None)
self.assertEqual(self.event_marked_as_duplicate.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(
self.event_marked_as_duplicate.__repr__(), "IssueEvent(id=1783779725)"
)
def testEvent_unmarked_as_duplicate_Attributes(self):
self.assertEqual(self.event_unmarked_as_duplicate.actor.login, "sfdye")
self.assertEqual(self.event_unmarked_as_duplicate.commit_id, None)
self.assertEqual(
self.event_unmarked_as_duplicate.created_at,
datetime.datetime(2018, 8, 15, 2, 57, 46),
)
self.assertEqual(
self.event_unmarked_as_duplicate.event, "unmarked_as_duplicate"
)
self.assertEqual(self.event_unmarked_as_duplicate.id, 1789228962)
self.assertEqual(self.event_unmarked_as_duplicate.issue.number, 857)
self.assertEqual(
self.event_unmarked_as_duplicate.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1789228962",
)
self.assertEqual(
self.event_unmarked_as_duplicate.node_id,
"MDI0OlVubWFya2VkQXNEdXBsaWNhdGVFdmVudDE3ODkyMjg5NjI=",
)
self.assertEqual(self.event_unmarked_as_duplicate.commit_url, None)
self.assertEqual(self.event_unmarked_as_duplicate.label, None)
self.assertEqual(self.event_unmarked_as_duplicate.assignee, None)
self.assertEqual(self.event_unmarked_as_duplicate.assigner, None)
self.assertEqual(self.event_unmarked_as_duplicate.review_requester, None)
self.assertEqual(self.event_unmarked_as_duplicate.requested_reviewer, None)
self.assertEqual(self.event_unmarked_as_duplicate.milestone, None)
self.assertEqual(self.event_unmarked_as_duplicate.rename, None)
self.assertEqual(self.event_unmarked_as_duplicate.dismissed_review, None)
self.assertEqual(self.event_unmarked_as_duplicate.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(
self.event_unmarked_as_duplicate.__repr__(), "IssueEvent(id=1789228962)"
)
def testEvent_added_to_project_Attributes(self):
self.assertEqual(self.event_added_to_project.actor.login, "sfdye")
self.assertEqual(self.event_added_to_project.commit_id, None)
self.assertEqual(
self.event_added_to_project.created_at,
datetime.datetime(2018, 8, 16, 8, 13, 24),
)
self.assertEqual(self.event_added_to_project.event, "added_to_project")
self.assertEqual(self.event_added_to_project.id, 1791766828)
self.assertEqual(self.event_added_to_project.issue.number, 857)
self.assertEqual(
self.event_added_to_project.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1791766828",
)
self.assertEqual(
self.event_added_to_project.node_id,
"MDE5OkFkZGVkVG9Qcm9qZWN0RXZlbnQxNzkxNzY2ODI4",
)
self.assertEqual(self.event_added_to_project.commit_url, None)
self.assertEqual(self.event_added_to_project.label, None)
self.assertEqual(self.event_added_to_project.assignee, None)
self.assertEqual(self.event_added_to_project.assigner, None)
self.assertEqual(self.event_added_to_project.review_requester, None)
self.assertEqual(self.event_added_to_project.requested_reviewer, None)
self.assertEqual(self.event_added_to_project.milestone, None)
self.assertEqual(self.event_added_to_project.rename, None)
self.assertEqual(self.event_added_to_project.dismissed_review, None)
self.assertEqual(self.event_added_to_project.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(
self.event_added_to_project.__repr__(), "IssueEvent(id=1791766828)"
)
def testEvent_moved_columns_in_project_Attributes(self):
self.assertEqual(self.event_moved_columns_in_project.actor.login, "sfdye")
self.assertEqual(self.event_moved_columns_in_project.commit_id, None)
self.assertEqual(
self.event_moved_columns_in_project.created_at,
datetime.datetime(2018, 8, 16, 8, 13, 55),
)
self.assertEqual(
self.event_moved_columns_in_project.event, "moved_columns_in_project"
)
self.assertEqual(self.event_moved_columns_in_project.id, 1791767766)
self.assertEqual(self.event_moved_columns_in_project.issue.number, 857)
self.assertEqual(
self.event_moved_columns_in_project.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1791767766",
)
self.assertEqual(
self.event_moved_columns_in_project.node_id,
"MDI2Ok1vdmVkQ29sdW1uc0luUHJvamVjdEV2ZW50MTc5MTc2Nzc2Ng==",
)
self.assertEqual(self.event_moved_columns_in_project.commit_url, None)
self.assertEqual(self.event_moved_columns_in_project.label, None)
self.assertEqual(self.event_moved_columns_in_project.assignee, None)
self.assertEqual(self.event_moved_columns_in_project.assigner, None)
self.assertEqual(self.event_moved_columns_in_project.review_requester, None)
self.assertEqual(self.event_moved_columns_in_project.requested_reviewer, None)
self.assertEqual(self.event_moved_columns_in_project.milestone, None)
self.assertEqual(self.event_moved_columns_in_project.rename, None)
self.assertEqual(self.event_moved_columns_in_project.dismissed_review, None)
self.assertEqual(self.event_moved_columns_in_project.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(
self.event_moved_columns_in_project.__repr__(), "IssueEvent(id=1791767766)"
)
def testEvent_removed_from_project_Attributes(self):
self.assertEqual(self.event_removed_from_project.actor.login, "sfdye")
self.assertEqual(self.event_removed_from_project.commit_id, None)
self.assertEqual(
self.event_removed_from_project.created_at,
datetime.datetime(2018, 8, 16, 8, 14, 8),
)
self.assertEqual(self.event_removed_from_project.event, "removed_from_project")
self.assertEqual(self.event_removed_from_project.id, 1791768212)
self.assertEqual(self.event_removed_from_project.issue.number, 857)
self.assertEqual(
self.event_removed_from_project.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1791768212",
)
self.assertEqual(
self.event_removed_from_project.node_id,
"MDIzOlJlbW92ZWRGcm9tUHJvamVjdEV2ZW50MTc5MTc2ODIxMg==",
)
self.assertEqual(self.event_removed_from_project.commit_url, None)
self.assertEqual(self.event_removed_from_project.label, None)
self.assertEqual(self.event_removed_from_project.assignee, None)
self.assertEqual(self.event_removed_from_project.assigner, None)
self.assertEqual(self.event_removed_from_project.review_requester, None)
self.assertEqual(self.event_removed_from_project.requested_reviewer, None)
self.assertEqual(self.event_removed_from_project.milestone, None)
self.assertEqual(self.event_removed_from_project.rename, None)
self.assertEqual(self.event_removed_from_project.dismissed_review, None)
self.assertEqual(self.event_removed_from_project.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(
self.event_removed_from_project.__repr__(), "IssueEvent(id=1791768212)"
)
def testEvent_converted_note_to_issue_Attributes(self):
self.assertEqual(self.event_converted_note_to_issue.actor.login, "sfdye")
self.assertEqual(self.event_converted_note_to_issue.commit_id, None)
self.assertEqual(
self.event_converted_note_to_issue.created_at,
datetime.datetime(2018, 8, 16, 8, 14, 34),
)
self.assertEqual(
self.event_converted_note_to_issue.event, "converted_note_to_issue"
)
self.assertEqual(self.event_converted_note_to_issue.id, 1791769149)
self.assertEqual(self.event_converted_note_to_issue.issue.number, 866)
self.assertEqual(
self.event_converted_note_to_issue.url,
"https://api.github.com/repos/PyGithub/PyGithub/issues/events/1791769149",
)
self.assertEqual(
self.event_converted_note_to_issue.node_id,
"MDI1OkNvbnZlcnRlZE5vdGVUb0lzc3VlRXZlbnQxNzkxNzY5MTQ5",
)
self.assertEqual(self.event_converted_note_to_issue.commit_url, None)
self.assertEqual(self.event_converted_note_to_issue.label, None)
self.assertEqual(self.event_converted_note_to_issue.assignee, None)
self.assertEqual(self.event_converted_note_to_issue.assigner, None)
self.assertEqual(self.event_converted_note_to_issue.review_requester, None)
self.assertEqual(self.event_converted_note_to_issue.requested_reviewer, None)
self.assertEqual(self.event_converted_note_to_issue.milestone, None)
self.assertEqual(self.event_converted_note_to_issue.rename, None)
self.assertEqual(self.event_converted_note_to_issue.dismissed_review, None)
self.assertEqual(self.event_converted_note_to_issue.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(
self.event_converted_note_to_issue.__repr__(), "IssueEvent(id=1791769149)"
)
|
meteorcloudy/tensorflow
|
refs/heads/master
|
tensorflow/contrib/autograph/utils/py_func.py
|
27
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pyfunc creation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import script_ops
class MatchDType(namedtuple('MatchDType', ('arg_number',))):
"""Allows matching the dtype of an argument.
Used in conjunction with function calls. For example, MatchDType(0) will
match the DType of the first argument.
"""
pass
def wrap_py_func(f, return_dtypes, args, kwargs=None, use_dummy_return=False):
"""Helper that wraps a callable to py_func.
The helper passes tensor arguments through the py_func interface. Non-tensor
arguments are allowed, and will be passed to f directly. Note that non-tensor
arguments are captured by f will not update every time the wrapper is
called (this is consistent with its argument list, which only includes
the tensor arguments). In general, it's safest not to reuse this wrapper.
Args:
f: Callable
return_dtypes: None, individual of tuple/list of DType or MatchDType, the
data type for each of f's return value(s). Set to None if f has no
return values or use_dummy_return is True. Use MatchDType to define a
dtype identical to that of `i`th argument (argument 0 is the first);
an argument must of Tensor type if it is to be used with MatchDType.
args: Positional arguments for f, as list or tuple.
kwargs: Keyword arguments for f, as dict with string keys. May be None.
use_dummy_return: If True, the function will return a dummy value of 1
and discard its actual return value.
Returns:
The return values of f converted to tensor.
Raises:
ValueError: if any of the arguments are incorrect.
"""
if return_dtypes and use_dummy_return:
raise ValueError('if use_dummy_return is True, return_dtypes must be empty')
tensor_args = []
tensor_args_idx = {}
# Of the positional arguments, only grab the tensor ones to be passed through
# the py_func.
n_args = len(args)
arg_is_tensor = tuple(map(tensor_util.is_tensor, args))
for i in range(n_args):
if arg_is_tensor[i]:
tensor_args_idx[i] = len(tensor_args)
tensor_args.append(args[i])
# We essentially take the tensor kwargs, if any, and add them to the list of
# positional arguments. The kwargs are then reconstructed inside the py_func.
#
# For example, if
#
# args = [Tensor(1), 'foo']
# kwargs = {'a': Tensor(2), 'b': 'bar'}
#
# Then
#
# tensor_args = (Tensor(1), Tensor(2))
# kwarg_keys = ('a', 'b')
if kwargs:
kwarg_keys = tuple(kwargs.keys())
kwarg_is_tensor = {k: tensor_util.is_tensor(kwargs[k]) for k in kwarg_keys}
for k in kwarg_keys:
if kwarg_is_tensor[k]:
tensor_args_idx[k] = len(tensor_args)
tensor_args.append(kwargs[k])
else:
kwarg_keys = ()
# Set up return dtypes.
def match_arg_dtype(arg_number):
arg = args[arg_number]
if not arg_is_tensor[arg_number]:
raise ValueError(
'argument %d was used with MatchDType and must be a tf.Tensor, but '
'was %s instead' % (arg_number, type(arg)))
return arg.dtype
if return_dtypes:
if isinstance(return_dtypes, MatchDType):
return_dtypes = match_arg_dtype(return_dtypes.arg_number)
elif isinstance(return_dtypes, (list, tuple)):
return_dtypes = tuple(
match_arg_dtype(a.arg_number) if isinstance(a, MatchDType) else a
for a in return_dtypes)
else:
assert isinstance(return_dtypes, dtypes.DType)
def f_wrapper(*tensor_args):
f_args = tuple(tensor_args[tensor_args_idx[i]] if arg_is_tensor[i] else a
for i, a in enumerate(args))
f_kwargs = {
k: tensor_args[tensor_args_idx[k]] if kwarg_is_tensor[k] else kwargs[k]
for i, k in enumerate(kwarg_keys)
}
retval = f(*f_args, **f_kwargs)
return 1 if use_dummy_return else retval
return script_ops.py_func(f_wrapper, tensor_args, dtypes.int64
if use_dummy_return else return_dtypes)
|
blooparksystems/odoo
|
refs/heads/9.0
|
addons/payment_paypal/tests/test_paypal.py
|
25
|
# -*- coding: utf-8 -*-
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment.tests.common import PaymentAcquirerCommon
from openerp.addons.payment_paypal.controllers.main import PaypalController
from openerp.tools import mute_logger
from lxml import objectify
import urlparse
class PaypalCommon(PaymentAcquirerCommon):
def setUp(self):
super(PaypalCommon, self).setUp()
cr, uid = self.cr, self.uid
self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# get the paypal account
model, self.paypal_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_paypal', 'payment_acquirer_paypal')
# tde+seller@openerp.com - tde+buyer@openerp.com - tde+buyer-it@openerp.com
# some CC
self.amex = (('378282246310005', '123'), ('371449635398431', '123'))
self.amex_corporate = (('378734493671000', '123'))
self.autralian_bankcard = (('5610591081018250', '123'))
self.dinersclub = (('30569309025904', '123'), ('38520000023237', '123'))
self.discover = (('6011111111111117', '123'), ('6011000990139424', '123'))
self.jcb = (('3530111333300000', '123'), ('3566002020360505', '123'))
self.mastercard = (('5555555555554444', '123'), ('5105105105105100', '123'))
self.visa = (('4111111111111111', '123'), ('4012888888881881', '123'), ('4222222222222', '123'))
self.dankord_pbs = (('76009244561', '123'), ('5019717010103742', '123'))
self.switch_polo = (('6331101999990016', '123'))
class PaypalForm(PaypalCommon):
def test_10_paypal_form_render(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid things
self.payment_acquirer.write(cr, uid, self.paypal_id, {'paypal_email_account': 'tde+paypal-facilitator@openerp.com', 'fees_active': False}, context)
paypal = self.payment_acquirer.browse(cr, uid, self.paypal_id, context)
self.assertEqual(paypal.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering
# ----------------------------------------
# render the button
res = self.payment_acquirer.render(
cr, uid, self.paypal_id,
'test_ref0', 0.01, self.currency_euro_id,
values=self.buyer_values,
context=context)
form_values = {
'cmd': '_xclick',
'business': 'tde+paypal-facilitator@openerp.com',
'item_name': 'YourCompany: test_ref0',
'item_number': 'test_ref0',
'first_name': 'Norbert',
'last_name': 'Buyer',
'amount': '0.01',
'currency_code': 'EUR',
'address1': 'Huge Street 2/543',
'city': 'Sin City',
'zip': '1000',
'country': 'Belgium',
'email': 'norbert.buyer@example.com',
'return': '%s' % urlparse.urljoin(self.base_url, PaypalController._return_url),
'notify_url': '%s' % urlparse.urljoin(self.base_url, PaypalController._notify_url),
'cancel_return': '%s' % urlparse.urljoin(self.base_url, PaypalController._cancel_url),
}
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://www.sandbox.paypal.com/cgi-bin/webscr', 'paypal: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'paypal: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
def test_11_paypal_form_with_fees(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid things
paypal = self.payment_acquirer.browse(self.cr, self.uid, self.paypal_id, None)
self.assertEqual(paypal.environment, 'test', 'test without test environment')
# update acquirer: compute fees
self.payment_acquirer.write(cr, uid, self.paypal_id, {
'fees_active': True,
'fees_dom_fixed': 1.0,
'fees_dom_var': 0.35,
'fees_int_fixed': 1.5,
'fees_int_var': 0.50,
}, context)
# render the button
res = self.payment_acquirer.render(
cr, uid, self.paypal_id,
'test_ref0', 12.50, self.currency_euro_id,
values=self.buyer_values,
context=context)
# check form result
handling_found = False
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://www.sandbox.paypal.com/cgi-bin/webscr', 'paypal: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['handling']:
handling_found = True
self.assertEqual(form_input.get('value'), '1.57', 'paypal: wrong computed fees')
self.assertTrue(handling_found, 'paypal: fees_active did not add handling input in rendered form')
@mute_logger('openerp.addons.payment_paypal.models.paypal', 'ValidationError')
def test_20_paypal_form_management(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid things
paypal = self.payment_acquirer.browse(cr, uid, self.paypal_id, context)
self.assertEqual(paypal.environment, 'test', 'test without test environment')
# typical data posted by paypal after client has successfully paid
paypal_post_data = {
'protection_eligibility': u'Ineligible',
'last_name': u'Poilu',
'txn_id': u'08D73520KX778924N',
'receiver_email': u'dummy',
'payment_status': u'Pending',
'payment_gross': u'',
'tax': u'0.00',
'residence_country': u'FR',
'address_state': u'Alsace',
'payer_status': u'verified',
'txn_type': u'web_accept',
'address_street': u'Av. de la Pelouse, 87648672 Mayet',
'handling_amount': u'0.00',
'payment_date': u'03:21:19 Nov 18, 2013 PST',
'first_name': u'Norbert',
'item_name': u'test_ref_2',
'address_country': u'France',
'charset': u'windows-1252',
'custom': u'',
'notify_version': u'3.7',
'address_name': u'Norbert Poilu',
'pending_reason': u'multi_currency',
'item_number': u'test_ref_2',
'receiver_id': u'dummy',
'transaction_subject': u'',
'business': u'dummy',
'test_ipn': u'1',
'payer_id': u'VTDKRZQSAHYPS',
'verify_sign': u'An5ns1Kso7MWUdW4ErQKJJJ4qi4-AVoiUf-3478q3vrSmqh08IouiYpM',
'address_zip': u'75002',
'address_country_code': u'FR',
'address_city': u'Paris',
'address_status': u'unconfirmed',
'mc_currency': u'EUR',
'shipping': u'0.00',
'payer_email': u'tde+buyer@openerp.com',
'payment_type': u'instant',
'mc_gross': u'1.95',
'ipn_track_id': u'866df2ccd444b',
'quantity': u'1'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.payment_transaction.form_feedback(cr, uid, paypal_post_data, 'paypal', context=context)
# create tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 1.95,
'acquirer_id': self.paypal_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref_2',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france_id,
}, context=context
)
# validate it
self.payment_transaction.form_feedback(cr, uid, paypal_post_data, 'paypal', context=context)
# check
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'pending', 'paypal: wrong state after receiving a valid pending notification')
self.assertEqual(tx.state_message, 'multi_currency', 'paypal: wrong state message after receiving a valid pending notification')
self.assertEqual(tx.acquirer_reference, '08D73520KX778924N', 'paypal: wrong txn_id after receiving a valid pending notification')
self.assertFalse(tx.date_validate, 'paypal: validation date should not be updated whenr receiving pending notification')
# update tx
self.payment_transaction.write(cr, uid, [tx_id], {
'state': 'draft',
'acquirer_reference': False,
}, context=context)
# update notification from paypal
paypal_post_data['payment_status'] = 'Completed'
# validate it
self.payment_transaction.form_feedback(cr, uid, paypal_post_data, 'paypal', context=context)
# check
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'done', 'paypal: wrong state after receiving a valid pending notification')
self.assertEqual(tx.acquirer_reference, '08D73520KX778924N', 'paypal: wrong txn_id after receiving a valid pending notification')
self.assertEqual(tx.date_validate, '2013-11-18 03:21:19', 'paypal: wrong validation date')
|
phammin1/QaManagement
|
refs/heads/master
|
QaManagement/env/Lib/site-packages/setuptools/tests/test_upload_docs.py
|
151
|
import os
import zipfile
import contextlib
import pytest
from setuptools.command.upload_docs import upload_docs
from setuptools.dist import Distribution
from .textwrap import DALS
from . import contexts
SETUP_PY = DALS(
"""
from setuptools import setup
setup(name='foo')
""")
@pytest.fixture
def sample_project(tmpdir_cwd):
# setup.py
with open('setup.py', 'wt') as f:
f.write(SETUP_PY)
os.mkdir('build')
# A test document.
with open('build/index.html', 'w') as f:
f.write("Hello world.")
# An empty folder.
os.mkdir('build/empty')
@pytest.mark.usefixtures('sample_project')
@pytest.mark.usefixtures('user_override')
class TestUploadDocsTest:
def test_create_zipfile(self):
"""
Ensure zipfile creation handles common cases, including a folder
containing an empty folder.
"""
dist = Distribution()
cmd = upload_docs(dist)
cmd.target_dir = cmd.upload_dir = 'build'
with contexts.tempdir() as tmp_dir:
tmp_file = os.path.join(tmp_dir, 'foo.zip')
zip_file = cmd.create_zipfile(tmp_file)
assert zipfile.is_zipfile(tmp_file)
with contextlib.closing(zipfile.ZipFile(tmp_file)) as zip_file:
assert zip_file.namelist() == ['index.html']
|
mmoya/ansible
|
refs/heads/devel
|
plugins/inventory/gce.py
|
18
|
#!/usr/bin/env python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
GCE external inventory script
=================================
Generates inventory that Ansible can understand by making API requests
Google Compute Engine via the libcloud library. Full install/configuration
instructions for the gce* modules can be found in the comments of
ansible/test/gce_tests.py.
When run against a specific host, this script returns the following variables
based on the data obtained from the libcloud Node object:
- gce_uuid
- gce_id
- gce_image
- gce_machine_type
- gce_private_ip
- gce_public_ip
- gce_name
- gce_description
- gce_status
- gce_zone
- gce_tags
- gce_metadata
- gce_network
When run in --list mode, instances are grouped by the following categories:
- zone:
zone group name examples are us-central1-b, europe-west1-a, etc.
- instance tags:
An entry is created for each tag. For example, if you have two instances
with a common tag called 'foo', they will both be grouped together under
the 'tag_foo' name.
- network name:
the name of the network is appended to 'network_' (e.g. the 'default'
network will result in a group named 'network_default')
- machine type
types follow a pattern like n1-standard-4, g1-small, etc.
- running status:
group name prefixed with 'status_' (e.g. status_running, status_stopped,..)
- image:
when using an ephemeral/scratch disk, this will be set to the image name
used when creating the instance (e.g. debian-7-wheezy-v20130816). when
your instance was created with a root persistent disk it will be set to
'persistent_disk' since there is no current way to determine the image.
Examples:
Execute uname on all instances in the us-central1-a zone
$ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
Use the GCE inventory script to print out instance specific information
$ plugins/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
Version: 0.0.1
'''
__requires__ = ['pycrypto>=2.6']
try:
import pkg_resources
except ImportError:
# Use pkg_resources to find the correct versions of libraries and set
# sys.path appropriately when there are multiversion installs. We don't
# fail here as there is code that better expresses the errors where the
# library is used.
pass
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
USER_AGENT_VERSION="v1"
import sys
import os
import argparse
import ConfigParser
try:
import json
except ImportError:
import simplejson as json
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except:
print("GCE inventory script requires libcloud >= 0.13")
sys.exit(1)
class GceInventory(object):
def __init__(self):
# Read settings and parse CLI arguments
self.parse_cli_args()
self.driver = self.get_gce_driver()
# Just display data for specific host
if self.args.host:
print self.json_format_dict(self.node_to_dict(
self.get_instance(self.args.host)),
pretty=self.args.pretty)
sys.exit(0)
# Otherwise, assume user wants all instances grouped
print(self.json_format_dict(self.group_instances(),
pretty=self.args.pretty))
sys.exit(0)
def get_gce_driver(self):
"""Determine the GCE authorization settings and return a
libcloud driver.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = ConfigParser.SafeConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'libcloud_secrets': '',
})
if 'gce' not in config.sections():
config.add_section('gce')
config.read(gce_ini_path)
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
print(err)
sys.exit(1)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found:
args = [
config.get('gce','gce_service_account_email_address'),
config.get('gce','gce_service_account_pem_file_path')
]
kwargs = {'project': config.get('gce', 'gce_project_id')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
# Retrieve and return the GCE driver.
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
)
return gce
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
if inst is None:
return {}
if inst.extra['metadata'].has_key('items'):
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
'gce_image': inst.image,
'gce_machine_type': inst.size,
'gce_private_ip': inst.private_ips[0],
'gce_public_ip': inst.public_ips[0],
'gce_name': inst.name,
'gce_description': inst.extra['description'],
'gce_status': inst.extra['status'],
'gce_zone': inst.extra['zone'].name,
'gce_tags': inst.extra['tags'],
'gce_metadata': md,
'gce_network': net,
# Hosts don't have a public name, so we add an IP
'ansible_ssh_host': inst.public_ips[0]
}
def get_instance(self, instance_name):
'''Gets details about a specific instance '''
try:
return self.driver.ex_get_node(instance_name)
except Exception, e:
return None
def group_instances(self):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
for node in self.driver.list_nodes():
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
if groups.has_key(zone): groups[zone].append(name)
else: groups[zone] = [name]
tags = node.extra['tags']
for t in tags:
tag = 'tag_%s' % t
if groups.has_key(tag): groups[tag].append(name)
else: groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if groups.has_key(net): groups[net].append(name)
else: groups[net] = [name]
machine_type = node.size
if groups.has_key(machine_type): groups[machine_type].append(name)
else: groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
if groups.has_key(image): groups[image].append(name)
else: groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if groups.has_key(stat): groups[stat].append(name)
else: groups[stat] = [name]
groups["_meta"] = meta
return groups
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
GceInventory()
|
ddico/odoo
|
refs/heads/master
|
addons/website_crm_partner_assign/tests/__init__.py
|
87
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import test_partner_assign
|
bretlowery/snakr
|
refs/heads/master
|
lib/django/utils/deconstruct.py
|
502
|
from importlib import import_module
from django.utils.version import get_docs_version
def deconstructible(*args, **kwargs):
"""
Class decorator that allow the decorated class to be serialized
by the migrations subsystem.
Accepts an optional kwarg `path` to specify the import path.
"""
path = kwargs.pop('path', None)
def decorator(klass):
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(klass, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def deconstruct(obj):
"""
Returns a 3-tuple of class import path, positional arguments,
and keyword arguments.
"""
# Python 2/fallback version
if path:
module_name, _, name = path.rpartition('.')
else:
module_name = obj.__module__
name = obj.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (name, module_name, get_docs_version()))
return (
path or '%s.%s' % (obj.__class__.__module__, name),
obj._constructor_args[0],
obj._constructor_args[1],
)
klass.__new__ = staticmethod(__new__)
klass.deconstruct = deconstruct
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
|
aroche/django
|
refs/heads/master
|
tests/m2m_through_regress/tests.py
|
182
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core import management
from django.test import TestCase
from django.utils.six import StringIO
from .models import (
Car, CarDriver, Driver, Group, Membership, Person, UserMembership,
)
class M2MThroughTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.jim = Person.objects.create(name="Jim")
cls.rock = Group.objects.create(name="Rock")
cls.roll = Group.objects.create(name="Roll")
cls.frank = User.objects.create_user("frank", "frank@example.com", "password")
cls.jane = User.objects.create_user("jane", "jane@example.com", "password")
# normal intermediate model
cls.bob_rock = Membership.objects.create(person=cls.bob, group=cls.rock)
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll, price=50)
cls.jim_rock = Membership.objects.create(person=cls.jim, group=cls.rock, price=50)
# intermediate model with custom id column
cls.frank_rock = UserMembership.objects.create(user=cls.frank, group=cls.rock)
cls.frank_roll = UserMembership.objects.create(user=cls.frank, group=cls.roll)
cls.jane_rock = UserMembership.objects.create(user=cls.jane, group=cls.rock)
def test_retrieve_reverse_m2m_items(self):
self.assertQuerysetEqual(
self.bob.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items(self):
self.assertQuerysetEqual(
self.roll.members.all(), [
"<Person: Bob>",
]
)
def test_cannot_use_setattr_on_reverse_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, setattr, self.bob, "group_set", [])
def test_cannot_use_setattr_on_forward_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, setattr, self.roll, "members", [])
def test_cannot_use_create_on_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, self.rock.members.create, name="Anne")
def test_cannot_use_create_on_reverse_m2m_with_intermediary_model(self):
self.assertRaises(AttributeError, self.bob.group_set.create, name="Funk")
def test_retrieve_reverse_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.frank.group_set.all(), [
"<Group: Rock>",
"<Group: Roll>",
],
ordered=False
)
def test_retrieve_forward_m2m_items_via_custom_id_intermediary(self):
self.assertQuerysetEqual(
self.roll.user_members.all(), [
"<User: frank>",
]
)
def test_join_trimming_forwards(self):
"Check that we don't involve too many copies of the intermediate table when doing a join. Refs #8046, #8254"
self.assertQuerysetEqual(
self.rock.members.filter(membership__price=50), [
"<Person: Jim>",
]
)
def test_join_trimming_reverse(self):
self.assertQuerysetEqual(
self.bob.group_set.filter(membership__price=50), [
"<Group: Roll>",
]
)
class M2MThroughSerializationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.roll = Group.objects.create(name="Roll")
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll)
def test_serialization(self):
"m2m-through models aren't serialized as m2m fields. Refs #8134"
pks = {"p_pk": self.bob.pk, "g_pk": self.roll.pk, "m_pk": self.bob_roll.pk}
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(out.getvalue().strip(), """[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", "fields": {"person": %(p_pk)s, "price": 100, "group": %(g_pk)s}}, {"pk": %(p_pk)s, "model": "m2m_through_regress.person", "fields": {"name": "Bob"}}, {"pk": %(g_pk)s, "model": "m2m_through_regress.group", "fields": {"name": "Roll"}}]""" % pks)
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="xml",
indent=2, stdout=out)
self.assertXMLEqual(out.getvalue().strip(), """
<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="%(m_pk)s" model="m2m_through_regress.membership">
<field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">%(p_pk)s</field>
<field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">%(g_pk)s</field>
<field type="IntegerField" name="price">100</field>
</object>
<object pk="%(p_pk)s" model="m2m_through_regress.person">
<field type="CharField" name="name">Bob</field>
</object>
<object pk="%(g_pk)s" model="m2m_through_regress.group">
<field type="CharField" name="name">Roll</field>
</object>
</django-objects>
""".strip() % pks)
class ToFieldThroughTests(TestCase):
def setUp(self):
self.car = Car.objects.create(make="Toyota")
self.driver = Driver.objects.create(name="Ryan Briscoe")
CarDriver.objects.create(car=self.car, driver=self.driver)
# We are testing if wrong objects get deleted due to using wrong
# field value in m2m queries. So, it is essential that the pk
# numberings do not match.
# Create one intentionally unused driver to mix up the autonumbering
self.unused_driver = Driver.objects.create(name="Barney Gumble")
# And two intentionally unused cars.
self.unused_car1 = Car.objects.create(make="Trabant")
self.unused_car2 = Car.objects.create(make="Wartburg")
def test_to_field(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
def test_to_field_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
def test_to_field_clear_reverse(self):
self.driver.car_set.clear()
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
def test_to_field_clear(self):
self.car.drivers.clear()
self.assertQuerysetEqual(
self.car.drivers.all(), [])
# Low level tests for _add_items and _remove_items. We test these methods
# because .add/.remove aren't available for m2m fields with through, but
# through is the only way to set to_field currently. We do want to make
# sure these methods are ready if the ability to use .add or .remove with
# to_field relations is added some day.
def test_add(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
# Yikes - barney is going to drive...
self.car.drivers._add_items('car', 'driver', self.unused_driver)
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Barney Gumble>", "<Driver: Ryan Briscoe>"]
)
def test_add_null(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
nullcar.drivers._add_items('car', 'driver', self.unused_driver)
def test_add_related_null(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
self.car.drivers._add_items('car', 'driver', nulldriver)
def test_add_reverse(self):
car2 = Car.objects.create(make="Honda")
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._add_items('driver', 'car', car2)
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>", "<Car: Honda>"],
ordered=False
)
def test_add_null_reverse(self):
nullcar = Car.objects.create(make=None)
with self.assertRaises(ValueError):
self.driver.car_set._add_items('driver', 'car', nullcar)
def test_add_null_reverse_related(self):
nulldriver = Driver.objects.create(name=None)
with self.assertRaises(ValueError):
nulldriver.car_set._add_items('driver', 'car', self.car)
def test_remove(self):
self.assertQuerysetEqual(
self.car.drivers.all(),
["<Driver: Ryan Briscoe>"]
)
self.car.drivers._remove_items('car', 'driver', self.driver)
self.assertQuerysetEqual(
self.car.drivers.all(), [])
def test_remove_reverse(self):
self.assertQuerysetEqual(
self.driver.car_set.all(),
["<Car: Toyota>"]
)
self.driver.car_set._remove_items('driver', 'car', self.car)
self.assertQuerysetEqual(
self.driver.car_set.all(), [])
class ThroughLoadDataTestCase(TestCase):
fixtures = ["m2m_through"]
def test_sequence_creation(self):
"Check that sequences on an m2m_through are created for the through model, not a phantom auto-generated m2m table. Refs #11107"
out = StringIO()
management.call_command("dumpdata", "m2m_through_regress", format="json", stdout=out)
self.assertJSONEqual(out.getvalue().strip(), """[{"pk": 1, "model": "m2m_through_regress.usermembership", "fields": {"price": 100, "group": 1, "user": 1}}, {"pk": 1, "model": "m2m_through_regress.person", "fields": {"name": "Guido"}}, {"pk": 1, "model": "m2m_through_regress.group", "fields": {"name": "Python Core Group"}}]""")
|
buguelos/odoo
|
refs/heads/master
|
yowsup/ConnectionIO/bintreenode.py
|
4
|
'''
Copyright (c) <2012> Tarek Galal <tare2.galal@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from ..Common.debugger import Debugger
from ..Common.datastructures import ByteArray
from ..Common.constants import Constants
from .protocoltreenode import ProtocolTreeNode
from .ioexceptions import InvalidReadException
class BinTreeNodeReader():
def __init__(self,inputstream):
Debugger.attach(self)
self.inputKey = None
self._d('Reader init');
self.tokenMap = Constants.dictionary;
self.rawIn = inputstream;
self.inn = ByteArray();
self.buf = []#bytearray(1024);
self.bufSize = 0;
self.readSize = 1;
def readStanza(self):
num = self.readInt8(self.rawIn)
stanzaSize = self.readInt16(self.rawIn,1);
header = (num << 16) + stanzaSize#self.readInt24(self.rawIn)
flags = (header >> 20);
#stanzaSize = ((header & 0xF0000) >> 16) | ((header & 0xFF00) >> 8) | (header & 0xFF);
isEncrypted = ((flags & 8) != 0)
self.fillBuffer(stanzaSize);
if self.inputKey is not None and isEncrypted:
#self.inn.buf = bytearray(self.inn.buf)
self.inn.buf = self.inputKey.decodeMessage(self.inn.buf, 0, 4, len(self.inn.buf)-4)[4:]
def streamStart(self):
self.readStanza();
tag = self.inn.read();
size = self.readListSize(tag);
tag = self.inn.read();
if tag != 1:
raise Exception("expecting STREAM_START in streamStart");
attribCount = (size - 2 + size % 2) / 2;
self.readAttributes(attribCount);
def readInt8(self,i):
return i.read();
def readInt16(self,i,socketOnly=0):
intTop = i.read(socketOnly);
intBot = i.read(socketOnly);
#Utilities.debug(str(intTop)+"------------"+str(intBot));
value = (intTop << 8) + intBot;
if value is not None:
return value;
else:
return "";
def readInt24(self,i):
int1 = i.read();
int2 = i.read();
int3 = i.read();
value = (int1 << 16) + (int2 << 8) + (int3 << 0);
return value;
def readListSize(self,token):
size = 0;
if token == 0:
size = 0;
else:
if token == 248:
size = self.readInt8(self.inn);
else:
if token == 249:
size = self.readInt16(self.inn);
else:
#size = self.readInt8(self.inn);
raise Exception("invalid list size in readListSize: token " + str(token));
return size;
def readAttributes(self,attribCount):
attribs = {};
for i in range(0, int(attribCount)):
key = self.readString(self.inn.read());
value = self.readString(self.inn.read());
attribs[key]=value;
return attribs;
def getToken(self,token):
if (token >= 0 and token < len(self.tokenMap)):
ret = self.tokenMap[token];
else:
raise Exception("invalid token/length in getToken %i "%token);
return ret;
def readString(self,token):
if token == -1:
raise Exception("-1 token in readString");
if token > 4 and token < 245:
return self.getToken(token);
if token == 0:
return None;
if token == 252:
size8 = self.readInt8(self.inn);
buf8 = [0] * size8;
self.fillArray(buf8,len(buf8),self.inn);
#print self.inn.buf;
return "".join(map(chr, buf8));
#return size8;
if token == 253:
size24 = self.readInt24(self.inn);
buf24 = [0] * size24;
self.fillArray(buf24,len(buf24),self.inn);
return "".join(map(chr, buf24));
if token == 254:
token = self.inn.read();
return self.getToken(245+token);
if token == 250:
user = self.readString(self.inn.read());
server = self.readString(self.inn.read());
if user is not None and server is not None:
return user + "@" + server;
if server is not None:
return server;
raise Exception("readString couldn't reconstruct jid");
raise Exception("readString couldn't match token "+str(token));
def nextTree(self):
self.inn.buf = [];
self.readStanza();
ret = self.nextTreeInternal();
self._d("Incoming")
if ret is not None:
if '<picture type="' in ret.toString():
self._d("<Picture!!!>");
else:
self._d("\n%s"%ret.toString());
return ret;
def fillBuffer(self,stanzaSize):
#if len(self.buf) < stanzaSize:
# newsize = stanzaSize#max(len(self.buf)*3/2,stanzaSize);
self.buf = [0 for i in range(0,stanzaSize)]
self.bufSize = stanzaSize;
self.fillArray(self.buf, stanzaSize, self.rawIn);
self.inn = ByteArray();
self.inn.write(self.buf);
#this.in = new ByteArrayInputStream(this.buf, 0, stanzaSize);
#self.inn.setReadSize(stanzaSize);
#Utilities.debug(str(len(self.buf))+":::"+str(stanzaSize));
def fillArray(self, buf,length,inputstream):
count = 0;
while count < length:
count+=inputstream.read2(buf,count,length-count);
def nextTreeInternal(self):
b = self.inn.read();
size = self.readListSize(b);
b = self.inn.read();
if b == 2:
return None;
tag = self.readString(b);
if size == 0 or tag is None:
raise InvalidReadException("nextTree sees 0 list or null tag");
attribCount = (size - 2 + size%2)/2;
attribs = self.readAttributes(attribCount);
if size % 2 ==1:
return ProtocolTreeNode(tag,attribs);
b = self.inn.read();
if self.isListTag(b):
return ProtocolTreeNode(tag,attribs,self.readList(b));
return ProtocolTreeNode(tag,attribs,None,self.readString(b));
def readList(self,token):
size = self.readListSize(token);
listx = []
for i in range(0,size):
listx.append(self.nextTreeInternal());
return listx;
def isListTag(self,b):
return (b == 248) or (b == 0) or (b == 249);
class BinTreeNodeWriter():
STREAM_START = 1;
STREAM_END = 2;
LIST_EMPTY = 0;
LIST_8 = 248;
LIST_16 = 249;
JID_PAIR = 250;
BINARY_8 = 252;
BINARY_24 = 253;
TOKEN_8 = 254;
#socket out; #FunXMPP.WAByteArrayOutputStream
#socket realOut;
tokenMap={}
def __init__(self,o):
Debugger.attach(self)
self.outputKey = None
dictionary = Constants.dictionary
self.realOut = o;
#self.out = o;
self.tokenMap = {}
self.out = ByteArray();
#this.tokenMap = new Hashtable(dictionary.length);
for i in range(0,len(dictionary)):
if dictionary[i] is not None:
self.tokenMap[dictionary[i]]=i
#Utilities.debug(self.tokenMap);
'''
for (int i = 0; i < dictionary.length; i++)
if (dictionary[i] != null)
this.tokenMap.put(dictionary[i], new Integer(i));
'''
def streamStart(self,domain,resource):
self.realOut.write(87);
self.realOut.write(65);
self.realOut.write(1);
self.realOut.write(2);
streamOpenAttributes = {"to":domain,"resource":resource};
self.writeListStart(len(streamOpenAttributes )*2+1);
self.out.write(1);
self.writeAttributes(streamOpenAttributes);
self.flushBuffer(False);
def write(self, node,needsFlush = 0):
if node is None:
self.out.write(0);
else:
self._d("Outgoing");
self._d("\n %s" % node.toString());
self.writeInternal(node);
self.flushBuffer(needsFlush);
self.out.buf = [];
def processBuffer(self):
buf = self.out.getBuffer()
prep = [0,0,0]
prep.extend(buf)
length1 = len(self.out.buf)
num = 0
if self.outputKey is not None:
num = 1
prep.extend([0,0,0,0])
length1 += 4
#prep = bytearray(prep)
res = self.outputKey.encodeMessage(prep, len(prep) - 4 , 3, len(prep)-4-3)
res[0] = ((num << 4) | (length1 & 16711680) >> 16) % 256
res[1] = ((length1 & 65280) >> 8) % 256
res[2] = (length1 & 255) % 256
self.out.buf = res
return
else:
prep[0] = ((num << 4) | (length1 & 16711680) >> 16) % 256
prep[1] = ((length1 & 65280) >> 8) % 256
prep[2] = (length1 & 255) % 256
self.out.buf = prep
def flushBuffer(self, flushNetwork):
'''define flush buffer here '''
self.processBuffer()
size = len(self.out.getBuffer());
if (size & 0xFFFFF) != size:
raise Exception("Buffer too large: "+str(size));
#self.realOut.write(0)
#self.writeInt16(size,self.realOut);
self.realOut.write(self.out.getBuffer());
self.out.reset();
if flushNetwork:
self.realOut.flush();
def writeInternal(self,node):
'''define write internal here'''
x = 1 + (0 if node.attributes is None else len(node.attributes) * 2) + (0 if node.children is None else 1) + (0 if node.data is None else 1);
self.writeListStart(1 + (0 if node.attributes is None else len(node.attributes) * 2) + (0 if node.children is None else 1) + (0 if node.data is None else 1));
self.writeString(node.tag);
self.writeAttributes(node.attributes);
if node.data is not None:
self.writeBytes(node.data)
'''if type(node.data) == bytearray:
self.writeBytes(node.data);
else:
self.writeBytes(bytearray(node.data));
'''
if node.children is not None:
self.writeListStart(len(node.children));
for c in node.children:
self.writeInternal(c);
def writeAttributes(self,attributes):
if attributes is not None:
for key, value in attributes.items():
self.writeString(key);
self.writeString(value);
def writeBytes(self,bytes):
length = len(bytes);
if length >= 256:
self.out.write(253);
self.writeInt24(length);
else:
self.out.write(252);
self.writeInt8(length);
for b in bytes:
self.out.write(b);
def writeInt8(self,v):
self.out.write(v & 0xFF);
def writeInt16(self,v, o = None):
if o is None:
o = self.out;
o.write((v & 0xFF00) >> 8);
o.write((v & 0xFF) >> 0);
def writeInt24(self,v):
self.out.write((v & 0xFF0000) >> 16);
self.out.write((v & 0xFF00) >> 8);
self.out.write((v & 0xFF) >> 0);
def writeListStart(self,i):
#Utilities.debug("list start "+str(i));
if i == 0:
self.out.write(0)
elif i < 256:
self.out.write(248);
self.writeInt8(i);#f
else:
self.out.write(249);
#write(i >> 8 & 0xFF);
self.writeInt16(i); #write(i >> 8 & 0xFF);
def writeToken(self, intValue):
if intValue < 245:
self.out.write(intValue)
elif intValue <=500:
self.out.write(254)
self.out.write(intValue - 245);
def writeString(self,tag):
try:
key = self.tokenMap[tag];
self.writeToken(key);
except KeyError:
try:
at = '@'.encode() if type(tag) == bytes else '@'
atIndex = tag.index(at);
if atIndex < 1:
raise ValueError("atIndex < 1");
else:
server = tag[atIndex+1:];
user = tag[0:atIndex];
#Utilities.debug("GOT "+user+"@"+server);
self.writeJid(user, server);
except ValueError:
self.writeBytes(self.encodeString(tag));
def encodeString(self, string):
res = [];
if type(string) == bytes:
for char in string:
res.append(char)
else:
for char in string:
res.append(ord(char))
return res;
def writeJid(self,user,server):
self.out.write(250);
if user is not None:
self.writeString(user);
else:
self.writeToken(0);
self.writeString(server);
def getChild(self,string):
if self.children is None:
return None
for c in self.children:
if string == c.tag:
return c;
return None;
def getAttributeValue(self,string):
if self.attributes is None:
return None;
try:
val = self.attributes[string]
return val;
except KeyError:
return None;
|
40223211/2015cd_midterm2
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/xml/dom/minicompat.py
|
781
|
"""Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
StringTypes = (str,)
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name))
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
|
heitorlessa/serverless-encryption-workshop
|
refs/heads/master
|
lab4/compiled_lib/Crypto/SelfTest/Random/test__UserFriendlyRNG.py
|
103
|
# -*- coding: utf-8 -*-
# Self-tests for the user-friendly Crypto.Random interface
#
# Written in 2013 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for generic Crypto.Random stuff """
from __future__ import nested_scopes
__revision__ = "$Id$"
import binascii
import pprint
import unittest
import os
import time
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.Util.py3compat import *
try:
import multiprocessing
except ImportError:
multiprocessing = None
import Crypto.Random._UserFriendlyRNG
import Crypto.Random.random
class RNGForkTest(unittest.TestCase):
def _get_reseed_count(self):
"""
Get `FortunaAccumulator.reseed_count`, the global count of the
number of times that the PRNG has been reseeded.
"""
rng_singleton = Crypto.Random._UserFriendlyRNG._get_singleton()
rng_singleton._lock.acquire()
try:
return rng_singleton._fa.reseed_count
finally:
rng_singleton._lock.release()
def runTest(self):
# Regression test for CVE-2013-1445. We had a bug where, under the
# right conditions, two processes might see the same random sequence.
if sys.platform.startswith('win'): # windows can't fork
assert not hasattr(os, 'fork') # ... right?
return
# Wait 150 ms so that we don't trigger the rate-limit prematurely.
time.sleep(0.15)
reseed_count_before = self._get_reseed_count()
# One or both of these calls together should trigger a reseed right here.
Crypto.Random._UserFriendlyRNG._get_singleton().reinit()
Crypto.Random.get_random_bytes(1)
reseed_count_after = self._get_reseed_count()
self.assertNotEqual(reseed_count_before, reseed_count_after) # sanity check: test should reseed parent before forking
rfiles = []
for i in range(10):
rfd, wfd = os.pipe()
if os.fork() == 0:
# child
os.close(rfd)
f = os.fdopen(wfd, "wb")
Crypto.Random.atfork()
data = Crypto.Random.get_random_bytes(16)
f.write(data)
f.close()
os._exit(0)
# parent
os.close(wfd)
rfiles.append(os.fdopen(rfd, "rb"))
results = []
results_dict = {}
for f in rfiles:
data = binascii.hexlify(f.read())
results.append(data)
results_dict[data] = 1
f.close()
if len(results) != len(results_dict.keys()):
raise AssertionError("RNG output duplicated across fork():\n%s" %
(pprint.pformat(results)))
# For RNGMultiprocessingForkTest
def _task_main(q):
a = Crypto.Random.get_random_bytes(16)
time.sleep(0.1) # wait 100 ms
b = Crypto.Random.get_random_bytes(16)
q.put(binascii.b2a_hex(a))
q.put(binascii.b2a_hex(b))
q.put(None) # Wait for acknowledgment
class RNGMultiprocessingForkTest(unittest.TestCase):
def runTest(self):
# Another regression test for CVE-2013-1445. This is basically the
# same as RNGForkTest, but less compatible with old versions of Python,
# and a little easier to read.
n_procs = 5
manager = multiprocessing.Manager()
queues = [manager.Queue(1) for i in range(n_procs)]
# Reseed the pool
time.sleep(0.15)
Crypto.Random._UserFriendlyRNG._get_singleton().reinit()
Crypto.Random.get_random_bytes(1)
# Start the child processes
pool = multiprocessing.Pool(processes=n_procs, initializer=Crypto.Random.atfork)
map_result = pool.map_async(_task_main, queues)
# Get the results, ensuring that no pool processes are reused.
aa = [queues[i].get(30) for i in range(n_procs)]
bb = [queues[i].get(30) for i in range(n_procs)]
res = list(zip(aa, bb))
# Shut down the pool
map_result.get(30)
pool.close()
pool.join()
# Check that the results are unique
if len(set(aa)) != len(aa) or len(set(res)) != len(res):
raise AssertionError("RNG output duplicated across fork():\n%s" %
(pprint.pformat(res),))
def get_tests(config={}):
tests = []
tests += [RNGForkTest()]
if multiprocessing is not None:
tests += [RNGMultiprocessingForkTest()]
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
saumishr/django
|
refs/heads/master
|
tests/modeltests/proxy_model_inheritance/app1/__init__.py
|
12133432
| |
michal-ruzicka/archivematica
|
refs/heads/cesnet-ltp-pilot/1.4.x
|
src/dashboard/src/components/ingest/__init__.py
|
12133432
| |
adamjmcgrath/glancydesign
|
refs/heads/master
|
src/django-nonrel/tests/regressiontests/db_typecasts/models.py
|
12133432
| |
emakis/erpnext
|
refs/heads/develop
|
erpnext/docs/user/manual/en/stock/accounting-of-inventory-stock/__init__.py
|
12133432
| |
vizual54/MissionPlanner
|
refs/heads/master
|
Lib/unittest/main.py
|
53
|
"""Unittest main program"""
import sys
import os
import types
from . import loader, runner
from .signals import installHandler
__unittest = True
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s module.TestClass - run tests from module.TestClass
%(progName)s module.Class.test_method - run specified test method
[tests] can be a list of any number of test modules, classes and test
methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
For test discovery all test modules must be importable from the top
level directory of the project.
"""
USAGE_FROM_MODULE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_FROM_MODULE
# defaults for testing
failfast = catchbreak = buffer = progName = None
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=None, testLoader=loader.defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None):
if isinstance(module, basestring):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print msg
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print self.USAGE % usage
sys.exit(2)
def parseArgs(self, argv):
if len(argv) > 1 and argv[1].lower() == 'discover':
self._do_discovery(argv[2:])
return
import getopt
long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer']
try:
options, args = getopt.getopt(argv[1:], 'hHvqfcb', long_opts)
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if opt in ('-f','--failfast'):
if self.failfast is None:
self.failfast = True
# Should this raise an exception if -f is not valid?
if opt in ('-c','--catch'):
if self.catchbreak is None:
self.catchbreak = True
# Should this raise an exception if -c is not valid?
if opt in ('-b','--buffer'):
if self.buffer is None:
self.buffer = True
# Should this raise an exception if -b is not valid?
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = args
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
else:
self.testNames = (self.defaultTest,)
self.createTests()
except getopt.error, msg:
self.usageExit(msg)
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _do_discovery(self, argv, Loader=loader.TestLoader):
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
if options.verbose:
self.verbosity = 2
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
self.test = loader.discover(start_dir, pattern, top_level_dir)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, (type, types.ClassType)):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
|
BT-ojossen/bank-statement-reconcile
|
refs/heads/8.0
|
__unported__/account_statement_ofx_import/statement.py
|
15
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Pedro Manuel Baeza Romero
# Copyright 2013 Servicios Tecnológicos Avanzados
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import orm
class AccountStatementProfil(orm.Model):
_inherit = "account.statement.profile"
def _get_import_type_selection(self, cr, uid, context=None):
"""Inherited from parent to add parser."""
selection = super(AccountStatementProfil, self
)._get_import_type_selection(cr, uid,
context=context)
selection.append(('ofx_so', _('OFX - Open Financial Exchange')))
return selection
|
foobarbazblarg/stayclean
|
refs/heads/master
|
stayclean-2015-september/relapse.py
|
13
|
#!/usr/bin/python
import sys
import participantCollection
names = sys.argv[1::]
participantCollection = participantCollection.ParticipantCollection()
for name in names:
if participantCollection.hasParticipantNamed(name):
participant = participantCollection.participantNamed(name)
if participant.isStillIn:
participant.relapseNowIfNotAlready()
print "just relapsed " + name
else:
print name + " has already relapsed. Skipping."
else:
print "*** WARNING: " + name + " is not present in participants.txt"
participantCollection.save()
|
gtko/CouchPotatoServer
|
refs/heads/develop
|
couchpotato/core/media/_base/providers/nzb/binnewz/nzbindex.py
|
23
|
from bs4 import BeautifulSoup
from nzbdownloader import NZBDownloader, NZBGetURLSearchResult
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
from dateutil.parser import parse
import urllib
import time
log = CPLog(__name__)
class NZBIndex(NZBDownloader,NZBProvider, RSS):
urls = {
'download': 'https://www.nzbindex.nl/download/',
'search': 'http://www.nzbindex.com/rss/?%s',
}
http_time_between_calls = 5 # Seconds
def search(self, filename, minSize, newsgroup=None):
q = filename
arguments = tryUrlencode({
'q': q,
'age': Env.setting('retention', 'nzb'),
'sort': 'agedesc',
'minsize': minSize,
'rating': 1,
'max': 250,
'more': 1,
'complete': 1,
})
nzbs = self.getRSSData(self.urls['search'] % arguments)
nzbid = None
for nzb in nzbs:
enclosure = self.getElement(nzb, 'enclosure').attrib
nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4])
nzbid = nzbindex_id
age = self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple())))
sizeInMegs = tryInt(enclosure['length']) / 1024 / 1024
downloadUrl = enclosure['url']
detailURL = enclosure['url'].replace('/download/', '/release/')
if nzbid:
return NZBGetURLSearchResult(self, downloadUrl, sizeInMegs, detailURL, age, nzbid)
|
jiajiax/crosswalk-test-suite
|
refs/heads/master
|
webapi/tct-xmlhttprequest-w3c-tests/inst.xpk.py
|
456
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user)
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex + 1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(
os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0:
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket" % str(
userid)
else:
print "[Error] cmd commands error : %s" % str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
weolar/miniblink49
|
refs/heads/master
|
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/wpt/wpt/tools/wptserve/wptserve/logger.py
|
489
|
class NoOpLogger(object):
def critical(self, msg):
pass
def error(self, msg):
pass
def info(self, msg):
pass
def warning(self, msg):
pass
def debug(self, msg):
pass
logger = NoOpLogger()
_set_logger = False
def set_logger(new_logger):
global _set_logger
if _set_logger:
raise Exception("Logger must be set at most once")
global logger
logger = new_logger
_set_logger = True
def get_logger():
return logger
|
kutenai/django
|
refs/heads/master
|
django/conf/locale/bn/formats.py
|
575
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F, Y'
TIME_FORMAT = 'g:i A'
# DATETIME_FORMAT =
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M, Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
# NUMBER_GROUPING =
|
feroda/odoo
|
refs/heads/pos-multicurrency
|
addons/project_timesheet/project_timesheet.py
|
237
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
class project_project(osv.osv):
_inherit = 'project.project'
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
res = super(project_project, self).onchange_partner_id(cr, uid, ids, part, context)
if part and res and ('value' in res):
# set Invoice Task Work to 100%
data_obj = self.pool.get('ir.model.data')
data_id = data_obj._get_id(cr, uid, 'hr_timesheet_invoice', 'timesheet_invoice_factor1')
if data_id:
factor_id = data_obj.browse(cr, uid, data_id).res_id
res['value'].update({'to_invoice': factor_id})
return res
_defaults = {
'use_timesheets': True,
}
def open_timesheets(self, cr, uid, ids, context=None):
""" open Timesheets view """
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
project = self.browse(cr, uid, ids[0], context)
view_context = {
'search_default_account_id': [project.analytic_account_id.id],
'default_account_id': project.analytic_account_id.id,
}
help = _("""<p class="oe_view_nocontent_create">Record your timesheets for the project '%s'.</p>""") % (project.name,)
try:
if project.to_invoice and project.partner_id:
help+= _("""<p>Timesheets on this project may be invoiced to %s, according to the terms defined in the contract.</p>""" ) % (project.partner_id.name,)
except:
# if the user do not have access rights on the partner
pass
res = mod_obj.get_object_reference(cr, uid, 'hr_timesheet', 'act_hr_timesheet_line_evry1_all_form')
id = res and res[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['name'] = _('Timesheets')
result['context'] = view_context
result['help'] = help
return result
class project_work(osv.osv):
_inherit = "project.task.work"
def get_user_related_details(self, cr, uid, user_id):
res = {}
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', user_id)])
if not emp_id:
user_name = self.pool.get('res.users').read(cr, uid, [user_id], ['name'])[0]['name']
raise osv.except_osv(_('Bad Configuration!'),
_('Please define employee for user "%s". You must create one.')% (user_name,))
emp = emp_obj.browse(cr, uid, emp_id[0])
if not emp.product_id:
raise osv.except_osv(_('Bad Configuration!'),
_('Please define product and product category property account on the related employee.\nFill in the HR Settings tab of the employee form.'))
if not emp.journal_id:
raise osv.except_osv(_('Bad Configuration!'),
_('Please define journal on the related employee.\nFill in the HR Settings tab of the employee form.'))
acc_id = emp.product_id.property_account_expense.id
if not acc_id:
acc_id = emp.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Bad Configuration!'),
_('Please define product and product category property account on the related employee.\nFill in the HR Settings of the employee form.'))
res['product_id'] = emp.product_id.id
res['journal_id'] = emp.journal_id.id
res['general_account_id'] = acc_id
res['product_uom_id'] = emp.product_id.uom_id.id
return res
def _create_analytic_entries(self, cr, uid, vals, context):
"""Create the hr analytic timesheet from project task work"""
timesheet_obj = self.pool['hr.analytic.timesheet']
task_obj = self.pool['project.task']
vals_line = {}
timeline_id = False
acc_id = False
task_obj = task_obj.browse(cr, uid, vals['task_id'], context=context)
result = self.get_user_related_details(cr, uid, vals.get('user_id', uid))
vals_line['name'] = '%s: %s' % (tools.ustr(task_obj.name), tools.ustr(vals['name'] or '/'))
vals_line['user_id'] = vals['user_id']
vals_line['product_id'] = result['product_id']
if vals.get('date'):
if len(vals['date']) > 10:
timestamp = datetime.datetime.strptime(vals['date'], tools.DEFAULT_SERVER_DATETIME_FORMAT)
ts = fields.datetime.context_timestamp(cr, uid, timestamp, context)
vals_line['date'] = ts.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
else:
vals_line['date'] = vals['date']
# Calculate quantity based on employee's product's uom
vals_line['unit_amount'] = vals['hours']
default_uom = self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.project_time_mode_id.id
if result['product_uom_id'] != default_uom:
vals_line['unit_amount'] = self.pool['product.uom']._compute_qty(cr, uid, default_uom, vals['hours'], result['product_uom_id'])
acc_id = task_obj.project_id and task_obj.project_id.analytic_account_id.id or acc_id
if acc_id:
vals_line['account_id'] = acc_id
res = timesheet_obj.on_change_account_id(cr, uid, False, acc_id)
if res.get('value'):
vals_line.update(res['value'])
vals_line['general_account_id'] = result['general_account_id']
vals_line['journal_id'] = result['journal_id']
vals_line['amount'] = 0.0
vals_line['product_uom_id'] = result['product_uom_id']
amount = vals_line['unit_amount']
prod_id = vals_line['product_id']
unit = False
timeline_id = timesheet_obj.create(cr, uid, vals=vals_line, context=context)
# Compute based on pricetype
amount_unit = timesheet_obj.on_change_unit_amount(cr, uid, timeline_id,
prod_id, amount, False, unit, vals_line['journal_id'], context=context)
if amount_unit and 'amount' in amount_unit.get('value',{}):
updv = { 'amount': amount_unit['value']['amount'] }
timesheet_obj.write(cr, uid, [timeline_id], updv, context=context)
return timeline_id
def create(self, cr, uid, vals, *args, **kwargs):
context = kwargs.get('context', {})
if not context.get('no_analytic_entry',False):
vals['hr_analytic_timesheet_id'] = self._create_analytic_entries(cr, uid, vals, context=context)
return super(project_work,self).create(cr, uid, vals, *args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
"""
When a project task work gets updated, handle its hr analytic timesheet.
"""
if context is None:
context = {}
timesheet_obj = self.pool.get('hr.analytic.timesheet')
uom_obj = self.pool.get('product.uom')
result = {}
if isinstance(ids, (long, int)):
ids = [ids]
for task in self.browse(cr, uid, ids, context=context):
line_id = task.hr_analytic_timesheet_id
if not line_id:
# if a record is deleted from timesheet, the line_id will become
# null because of the foreign key on-delete=set null
continue
vals_line = {}
if 'name' in vals:
vals_line['name'] = '%s: %s' % (tools.ustr(task.task_id.name), tools.ustr(vals['name'] or '/'))
if 'user_id' in vals:
vals_line['user_id'] = vals['user_id']
if 'date' in vals:
vals_line['date'] = vals['date'][:10]
if 'hours' in vals:
vals_line['unit_amount'] = vals['hours']
prod_id = vals_line.get('product_id', line_id.product_id.id) # False may be set
# Put user related details in analytic timesheet values
details = self.get_user_related_details(cr, uid, vals.get('user_id', task.user_id.id))
for field in ('product_id', 'general_account_id', 'journal_id', 'product_uom_id'):
if details.get(field, False):
vals_line[field] = details[field]
# Check if user's default UOM differs from product's UOM
user_default_uom_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.project_time_mode_id.id
if details.get('product_uom_id', False) and details['product_uom_id'] != user_default_uom_id:
vals_line['unit_amount'] = uom_obj._compute_qty(cr, uid, user_default_uom_id, vals['hours'], details['product_uom_id'])
# Compute based on pricetype
amount_unit = timesheet_obj.on_change_unit_amount(cr, uid, line_id.id,
prod_id=prod_id, company_id=False,
unit_amount=vals_line['unit_amount'], unit=False, journal_id=vals_line['journal_id'], context=context)
if amount_unit and 'amount' in amount_unit.get('value',{}):
vals_line['amount'] = amount_unit['value']['amount']
if vals_line:
self.pool.get('hr.analytic.timesheet').write(cr, uid, [line_id.id], vals_line, context=context)
return super(project_work,self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, *args, **kwargs):
hat_obj = self.pool.get('hr.analytic.timesheet')
hat_ids = []
for task in self.browse(cr, uid, ids):
if task.hr_analytic_timesheet_id:
hat_ids.append(task.hr_analytic_timesheet_id.id)
# Delete entry from timesheet too while deleting entry to task.
if hat_ids:
hat_obj.unlink(cr, uid, hat_ids, *args, **kwargs)
return super(project_work,self).unlink(cr, uid, ids, *args, **kwargs)
_columns={
'hr_analytic_timesheet_id':fields.many2one('hr.analytic.timesheet','Related Timeline Id', ondelete='set null'),
}
class task(osv.osv):
_inherit = "project.task"
def unlink(self, cr, uid, ids, *args, **kwargs):
for task_obj in self.browse(cr, uid, ids, *args, **kwargs):
if task_obj.work_ids:
work_ids = [x.id for x in task_obj.work_ids]
self.pool.get('project.task.work').unlink(cr, uid, work_ids, *args, **kwargs)
return super(task,self).unlink(cr, uid, ids, *args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
task_work_obj = self.pool['project.task.work']
acc_id = False
missing_analytic_entries = {}
if vals.get('project_id',False) or vals.get('name',False):
vals_line = {}
hr_anlytic_timesheet = self.pool.get('hr.analytic.timesheet')
if vals.get('project_id',False):
project_obj = self.pool.get('project.project').browse(cr, uid, vals['project_id'], context=context)
acc_id = project_obj.analytic_account_id.id
for task_obj in self.browse(cr, uid, ids, context=context):
if len(task_obj.work_ids):
for task_work in task_obj.work_ids:
if not task_work.hr_analytic_timesheet_id:
if acc_id :
# missing timesheet activities to generate
missing_analytic_entries[task_work.id] = {
'name' : task_work.name,
'user_id' : task_work.user_id.id,
'date' : task_work.date,
'account_id': acc_id,
'hours' : task_work.hours,
'task_id' : task_obj.id
}
continue
line_id = task_work.hr_analytic_timesheet_id.id
if vals.get('project_id',False):
vals_line['account_id'] = acc_id
if vals.get('name',False):
vals_line['name'] = '%s: %s' % (tools.ustr(vals['name']), tools.ustr(task_work.name) or '/')
hr_anlytic_timesheet.write(cr, uid, [line_id], vals_line, {})
res = super(task,self).write(cr, uid, ids, vals, context)
for task_work_id, analytic_entry in missing_analytic_entries.items():
timeline_id = task_work_obj._create_analytic_entries(cr, uid, analytic_entry, context=context)
task_work_obj.write(cr, uid, task_work_id, {'hr_analytic_timesheet_id' : timeline_id}, context=context)
return res
class res_partner(osv.osv):
_inherit = 'res.partner'
def unlink(self, cursor, user, ids, context=None):
parnter_id=self.pool.get('project.project').search(cursor, user, [('partner_id', 'in', ids)])
if parnter_id:
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a partner which is assigned to project, but you can uncheck the active box.'))
return super(res_partner,self).unlink(cursor, user, ids,
context=context)
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def get_product(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
emp_ids = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if emp_ids:
employee = emp_obj.browse(cr, uid, emp_ids, context=context)[0]
if employee.product_id:return employee.product_id.id
return False
_defaults = {'product_id': get_product,}
def on_change_account_id(self, cr, uid, ids, account_id):
res = {}
if not account_id:
return res
res.setdefault('value',{})
acc = self.pool.get('account.analytic.account').browse(cr, uid, account_id)
st = acc.to_invoice.id
res['value']['to_invoice'] = st or False
if acc.state == 'close' or acc.state == 'cancelled':
raise osv.except_osv(_('Invalid Analytic Account!'), _('You cannot select a Analytic Account which is in Close or Cancelled state.'))
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
lantz/nox-tutorial
|
refs/heads/tutorial-destiny
|
src/nox/netapps/spanning_tree/spanning_tree.py
|
5
|
# ----------------------------------------------------------------------
# Spanning tree -- software based
# Authors: Glen Gibb <grg@stanford.edu>
# Date: 08/08/08
#
# Changes:
#
# Notes: This won't work correctly if there are more than 2 switches on
# any one "link". ie. if we were on a broadcast network or there was an
# extra switch in the middle
# ----------------------------------------------------------------------
import array
import struct
import time
from nox.coreapps.pyrt.pycomponent import CONTINUE, STOP
from nox.netapps.bindings_storage.pybindings_storage import pybindings_storage
from nox.lib.core import *
from nox.lib.util import *
from nox.lib.packet.packet_utils import longlong_to_octstr
from nox.lib.packet.ethernet import ethernet, ETHER_ANY, ETHER_BROADCAST
from nox.lib.netinet import *
import nox.lib.openflow as openflow
import logging
from nox.coreapps.messenger.pyjsonmsgevent import JSONMsg_event
import simplejson as json
# How often should we rebuild the flood ports?
FLOOD_PORT_UPDATE_INTERVAL = 5
# Hold time before allowing floods out a switch
FLOOD_WAIT_TIME = 10
# Minimum LLDP packet send period
MIN_LLDP_SEND_PERIOD = 0.05
log = logging.getLogger('spanning_tree')
class Spanning_Tree(Component):
def __init__(self, ctxt):
Component.__init__(self, ctxt)
self.datapaths = {}
self.debug = True
self.ip_bypass = set()
self.mac_bypass = set()
self.roots = set()
self.port_count = 0
# dict {dp:[stp_ports]} holding the current ST state (ports, root)
# check against this every time ST is update, to see if changed
self.current_stp_ports = {}
def getInterface(self):
return str(Spanning_Tree)
def debugPrint(self, text):
if (self.debug):
log.debug(text)
def install(self):
# Ensure LLDP queries occur more frequently by default.
self.update_lldp_send_period()
# Register to learn about datapath join and leave events
self.register_for_datapath_join ( self.dp_join )
self.register_for_datapath_leave( self.dp_leave )
self.register_for_port_status( self.handle_port_status )
self.register_for_packet_in( self.handle_packet_in)
self.bindings = self.resolve(pybindings_storage)
self.post_callback(1, self.update_spanning_tree)
self.debugPrint("Spanning tree installed\n")
# Register for json messages from the gui
self.register_handler( JSONMsg_event.static_get_name(), \
lambda event: self.handle_jsonmsg_event(event))
# Subscribers for json messages
#(eg. self.subscribers["stp_ports"] = [guistream]
self.subscribers = {}
def dp_join(self, dp, stats):
self.debugPrint("Datapath join: "+longlong_to_octstr(dp)[6:])
if (not self.datapaths.has_key(dp)):
# Process the port information returned by the switch
# Build a list of ports
now = time.time()
ports = {}
for port in stats['ports']:
ports[port[core.PORT_NO]] = port
if port[core.PORT_NO] <= openflow.OFPP_MAX:
port['enable_time'] = now + FLOOD_WAIT_TIME
port['flood'] = False
hw_addr = "\0\0" + port[core.HW_ADDR]
hw_addr = struct.unpack("!q", hw_addr)[0]
self.ctxt.send_port_mod(dp, port[core.PORT_NO], ethernetaddr(hw_addr),
openflow.OFPPC_NO_FLOOD, openflow.OFPPC_NO_FLOOD)
# Record the datapath
self.datapaths[dp] = ports
self.port_count += len(ports)
# Update the LLDP send period
self.update_lldp_send_period()
return CONTINUE
def dp_leave(self, dp):
self.debugPrint("Datapath leave, "+longlong_to_octstr(dp)[6:])
if (self.datapaths.has_key(dp)):
# Decrement port count by # of ports in datapath that is leaving
self.port_count -= len(self.datapaths[dp])
del self.datapaths[dp]
return CONTINUE
def update_spanning_tree(self):
'''Get the links to update the spanning tree
'''
self.bindings.get_all_links(self.update_spanning_tree_callback)
self.post_callback(FLOOD_PORT_UPDATE_INTERVAL, self.update_spanning_tree)
def update_spanning_tree_callback(self, links):
'''Callback called by get_all_links to process the set of links.
Currently:
- updates the flood ports to build a spanning tree
Note: each link probably appears twice (once for each direction)
As a temporary hack to deal with the fact that we don't have
spanning tree support in NOX we build a set of "flood-ports". Each
datapath id representing a switch has a set of ports associated
which represent links that don't contain other OpenFlow
switches. This set of paths can be used safely for flooding to
ensure that we don't circulate broadcast packets.
@param links list link tuples (src_dpid, src_port, dst_dpid, dst_port)
'''
# Walk through the datapaths and mark all ports
# that are potentially enableable
now = time.time()
for dp in self.datapaths.iterkeys():
for port_no, port in self.datapaths[dp].iteritems():
if port_no > openflow.OFPP_MAX or now > port['enable_time']:
port['enable'] = True
else:
port['enable'] = False
port['keep'] = False
# Walk through the links and create a dict based on source port
my_links = self.build_link_dict(links)
self.verify_bidir_links(my_links)
# Now try to build the spanning tree
seen = set()
roots = set()
# Get all sources in reversed sorted order
srcs = self.datapaths.keys()
srcs.sort()
srcs = srcs[::-1]
#kyr
if len(srcs):
self.root = srcs[len(srcs)-1]
# Process all sources
while len(srcs) > 0:
src_dpid = srcs.pop()
# Add the dpid to the list of roots if we haven't yet seen it
# (it must the be root of a tree)
if src_dpid not in seen:
roots.add(src_dpid)
# Record that we've seen this node
seen.add(src_dpid)
# Make sure we know the src_dpid
# This is necessary occasionally during start-up
if not my_links.has_key(src_dpid):
self.debugPrint("Warning: cannot find src_dpid %s in my_links"%longlong_to_octstr(src_dpid)[6:])
continue
# Walk through all dests
dsts = my_links[src_dpid].keys()
dsts.sort()
next_dpids = []
for dst_dpid in dsts:
if dst_dpid not in seen:
# Attempt to find the fastest link to the other switch
best_speed = -1
best_pair = (-1, -1)
for (src_port, dst_port) in my_links[src_dpid][dst_dpid]:
try:
speed = self.datapaths[src_dpid][src_port]['speed']
if speed > best_speed:
best_speed = speed
best_pair = (src_port, dst_port)
except KeyError:
pass
# Disable all links but the fastest
for (src_port, dst_port) in my_links[src_dpid][dst_dpid]:
try:
if (src_port, dst_port) != best_pair:
self.datapaths[dst_dpid][dst_port]['enable'] = False
else:
self.datapaths[src_dpid][src_port]['keep'] = True
self.datapaths[dst_dpid][dst_port]['keep'] = True
except KeyError:
pass
# Record that we've seen the dpid
seen.add(dst_dpid)
next_dpids.append(dst_dpid)
# Already-seen DPIDs
else:
# Disable the link to the already-seen DPIDs
if src_dpid <= dst_dpid:
(local_src_dpid, local_dst_dpid) = (src_dpid, dst_dpid)
else:
(local_src_dpid, local_dst_dpid) = (dst_dpid, src_dpid)
for (src_port, dst_port) in my_links[local_src_dpid][local_dst_dpid]:
# If the src/dst dpids are the same, sort the ports
if local_src_dpid == local_dst_dpid:
if (src_port > dst_port):
(src_port, dst_port) = (dst_port, src_port)
# Disable the ports
try:
if not self.datapaths[local_dst_dpid][dst_port]['keep']:
self.datapaths[local_dst_dpid][dst_port]['enable'] = False
if not self.datapaths[local_src_dpid][src_port]['keep']:
self.datapaths[local_src_dpid][src_port]['enable'] = False
except KeyError:
pass
# Once we've processed all links from this source, update the
# list of sources so that the DPIDs we've just linked to will
# be processed next. This is achieved by placing them at the
# end of the list.
next_dpids = next_dpids[::-1]
for dpid in next_dpids:
try:
srcs.remove(dpid)
except ValueError:
pass
srcs.extend(next_dpids)
# Update the list of roots
self.roots = roots
# Build dictionary to send to gui
# Format { dp: [stp_ports] }
stp_ports = {}
# Walk through links and enable/disable as appropriate
for dp in self.datapaths.iterkeys():
floodports = []
nonfloodports = []
for port_no, port in self.datapaths[dp].iteritems():
if port_no <= openflow.OFPP_MAX:
if port['enable'] != port['flood']:
if port['flood']:
port['flood'] = False
msg = 'Disabling'
config = openflow.OFPPC_NO_FLOOD
else:
port['flood'] = True
msg = 'Enabling'
config = 0
self.debugPrint("%s port: %s--%d"%(msg, longlong_to_octstr(dp)[6:], port_no))
hw_addr = "\0\0" + port[core.HW_ADDR]
hw_addr = struct.unpack("!q", hw_addr)[0]
self.ctxt.send_port_mod(dp, port[core.PORT_NO], ethernetaddr(hw_addr),
openflow.OFPPC_NO_FLOOD, config)
if port['flood']:
floodports.append(port_no)
else:
nonfloodports.append(port_no)
self.debugPrint("Ports for %s: Flood: %s Non-flood: %s"%(longlong_to_octstr(dp)[6:], floodports, nonfloodports))
dp = str(hex(dp))
dp = dp[2:len(dp)-1]
while len(dp)<12:
dp = "0"+dp
stp_ports[dp] = floodports#, nonfloodports)
# If ST has changed, update and send new enabled ports to GUI
if cmp(self.current_stp_ports, stp_ports) != 0:
self.current_stp_ports = stp_ports
root = str(self.root)
while len(root)<12:
root = "0"+root
stp_ports['root'] = root
self.send_to_gui("stp_ports", self.current_stp_ports)
else:
self.debugPrint("SP has not changed")
def build_link_dict(self, links):
'''Build a dictionary of links based on source dpid
Dict is:
{src_dpid: {dst_dpid: [(src_port, dst_port), ...]}}
'''
my_links = {}
for (src_dpid, src_port, dst_dpid, dst_port) in links:
# Track the link
try:
if self.datapaths[src_dpid][src_port]['enable'] and \
self.datapaths[dst_dpid][dst_port]['enable']:
if my_links.has_key(src_dpid):
if (my_links[src_dpid].has_key(dst_dpid)):
my_links[src_dpid][dst_dpid].add((src_port, dst_port))
else:
my_links[src_dpid][dst_dpid] = set()
my_links[src_dpid][dst_dpid].add((src_port, dst_port))
else:
my_links[src_dpid] = {dst_dpid:set()}
my_links[src_dpid][dst_dpid].add((src_port, dst_port))
except KeyError:
pass
return my_links
def verify_bidir_links(self, links):
'''Verify that all links are bi-directional
Delete unidirectional links and disable ports
'''
srcs_to_delete = []
for src_dpid in links.keys():
dsts_to_delete = []
for dst_dpid in links[src_dpid].keys():
# Work out which ports need deleting
ports_to_delete = []
for (src_port, dst_port) in links[src_dpid][dst_dpid]:
ok = True
try:
if (dst_port, src_port) not in links[dst_dpid][src_dpid]:
ok = False
except KeyError:
ok = False
if not ok:
self.debugPrint("WARNING: Unidirectional link detected between %s -- %d <--> %s -- %d"%
(longlong_to_octstr(src_dpid)[6:], src_port,
longlong_to_octstr(dst_dpid)[6:], dst_port))
ports_to_delete.append((src_port, dst_port))
try:
if (src_dpid <= dst_dpid):
self.datapaths[dst_dpid][dst_port]['enable'] = False
else:
self.datapaths[src_dpid][src_port]['enable'] = False
except KeyError:
self.datapaths[src_dpid][src_port]['enable'] = False
# Delete the ports and work out if we need to delete the dst_dpid
for ports in ports_to_delete:
links[src_dpid][dst_dpid].discard(ports)
if len(links[src_dpid][dst_dpid]) == 0:
dsts_to_delete.append(dst_dpid)
# Delete the dst_dpids and identify whether to delete the src_dpid
for dst_dpid in dsts_to_delete:
del links[src_dpid][dst_dpid]
if len(links[src_dpid]) == 0:
srcs_to_delete.append(src_dpid)
# Delete the src_dpids
for src_dpid in srcs_to_delete:
del links[src_dpid]
def handle_port_status(self, dpid, reason, port):
'''Port_status_event handler
Handles port stats events, such as adding and deleting ports
dpid - Datapath ID of port
reason - what event occured
port - port
'''
# Work out what sort of event we're processing
if reason == openflow.OFPPR_ADD:
if port['port_no'] <= openflow.OFPP_MAX:
port['enable_time'] = time.time() + FLOOD_WAIT_TIME
port['flood'] = False
hw_addr = "\0\0" + port[core.HW_ADDR]
hw_addr = struct.unpack("!q", hw_addr)[0]
self.ctxt.send_port_mod(dp, port[core.PORT_NO], ethernetaddr(hw_addr),
openflow.OFPPC_NO_FLOOD, openflow.OFPPC_NO_FLOOD)
self.datapaths[dpid][port['port_no']] = port
self.port_count += 1
elif reason == openflow.OFPPR_DELETE:
if self.datapaths[dpid].has_key(port['port_no']):
self.port_count += 1
del self.datapaths[dpid][port['port_no']]
return CONTINUE
def handle_packet_in(self, dpid, inport, reason, len, bufid, packet):
'''Packet in callback function
Allow packets to be processed by other modules only if
the port is a flood port or it's an LLDP packet
dpid - DPID of switch
inport - inport port
reason -
len - length
bufid - buffer ID of packet
packet - received packet
'''
if not packet.parsed:
log.msg('Ignoring incomplete packet',system='spanning_tree')
# Allow LLDP messages to be processed
if packet.type == ethernet.LLDP_TYPE:
return CONTINUE
# Check if it is a destination we know about
try:
# Check dest mac
dst_mac = (struct.unpack('!I', packet.arr[0:4])[0] << 16) + struct.unpack('!H', packet.arr[4:6])[0]
if dst_mac in self.mac_bypass:
return CONTINUE
# Check dest IP
type = struct.unpack('!H', packet.arr[12:14])[0]
ipver = struct.unpack('!b', packet.arr[14:15])[0]
if type == 0x800 and ipver == 0x45:
dst_ip = struct.unpack('!I', packet.arr[30:34])[0]
if dst_ip in self.ip_bypass:
return CONTINUE
except:
pass
# Check if the port is a flood port
log.warn("%s : %s" %(dpid, packet))
try:
if self.datapaths[dpid][inport]['flood']:
return CONTINUE
else:
log.warn("STOP")
return STOP
except KeyError:
return STOP
def update_lldp_send_period(self):
'''Update the LLDP send period'''
if self.port_count == 0:
nox.netapps.discovery.discovery.LLDP_SEND_PERIOD = MIN_LLDP_SEND_PERIOD
else:
nox.netapps.discovery.discovery.LLDP_SEND_PERIOD = min(
MIN_LLDP_SEND_PERIOD,
(FLOOD_WAIT_TIME * 1.0) / 2 / self.port_count)
def add_ip_bypass(self, ip):
'''Add a bypass IP address
Bypass IP addresses should be ignored when conisdering datapath'''
self.ip_bypass.add(ip)
def del_ip_bypass(self, ip):
'''Delete a bypass IP address'''
self.ip_bypass.discard(ip)
def add_mac_bypass(self, mac):
'''Add a bypass MAC address
Bypass MAC addresses should be ignored when conisdering datapath'''
self.mac_bypass.add(mac)
def del_mac_bypass(self, mac):
'''Delete a bypass MAC address'''
self.mac_bypass.discard(mac)
def reset_bypass(self):
'''Reset all bypass IP addresses'''
self.ip_bypass = set()
self.mac_bypass = set()
def get_roots(self):
'''Get a list of all spanning tree roots'''
return self.roots
"""Communication with the GUI"""
def handle_jsonmsg_event(self, e):
''' Handle incoming json messenges '''
msg = json.loads(e.jsonstring)
if msg["type"] != "spanning_tree" :
return CONTINUE
if not "command" in msg:
lg.debug( "Received message with no command field" )
return CONTINUE
if msg["command"] == "subscribe":
# Add stream to interested entities for this msg_type
if not msg["msg_type"] in self.subscribers:
self.subscribers[msg["msg_type"]] = []
self.subscribers[msg["msg_type"]].append(e)
# Immediately send the current stp ports
self.send_to_gui("stp_ports", self.current_stp_ports)
return CONTINUE
def send_to_gui(self, msg_type, data):
# Construct message header
msg = {}
msg["type"] = "spanning_tree"
# Add msg_type-speficic payload
if msg_type=="stp_ports":
msg["msg_type"] = "stp_ports"
msg["ports"] = data
if "stp_ports" in self.subscribers:
for stream in self.subscribers["stp_ports"]:
stream.reply(json.dumps(msg))
def getFactory():
class Factory:
def instance(self, ctxt):
return Spanning_Tree(ctxt)
return Factory()
|
LiangfengD/code-for-blog
|
refs/heads/master
|
2012/plugins_python/htmlize/__init__.py
|
12133432
| |
farhaadila/django-cms
|
refs/heads/develop
|
cms/management/commands/subcommands/__init__.py
|
12133432
| |
brian-l/django-1.4.10
|
refs/heads/master
|
tests/modeltests/get_latest/__init__.py
|
12133432
| |
akosel/servo
|
refs/heads/master
|
tests/wpt/css-tests/css21_dev/html4/support/fonts/makegsubfonts.py
|
820
|
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData()
|
nakato/AuthKit
|
refs/heads/master
|
authkit/authorize/__init__.py
|
3
|
"""\
Please update your code to use authkit.authorize.wsgi_adaptors instead of this
module.
"""
from authkit.authorize.wsgi_adaptors import *
|
caterinaurban/Lyra
|
refs/heads/master
|
src/lyra/unittests/numerical/interval/forward/indexing3/filterX.py
|
1
|
X: List[int] = [0, 5, 10]
y: int = int(input())
z: int = int(input())
if y < 2 or y > 10 or z < 3 or z > 5:
raise ValueError
print("")
# STATE: X -> 0@[0, 0], 1@[5, 5], 2@[10, 10], _@⊥; len(X) -> [3, 3]; y -> [2, 10]; z -> [3, 5]
if X[1] - y - z >= 0:
# STATE: X -> 0@[0, 0], 1@[5, 5], 2@[10, 10], _@⊥; len(X) -> [3, 3]; y -> [2, 2]; z -> [3, 3]
print("OK!")
|
fisele/slimta-abusix
|
refs/heads/develop
|
slimta/relay/http.py
|
1
|
# Copyright (c) 2013 Ian C. Good
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""This module contains a |Relay| class that delivers mail using the HTTP or
HTTPS protocols. This is done in the same way that
:mod:`slimta.edge.wsgi` receives mail, making the two compatible
for exchanging mail.
"""
from __future__ import absolute_import
import re
from socket import getfqdn
from base64 import b64encode
import gevent
from slimta import logging
from slimta.smtp.reply import Reply
from slimta.http import get_connection
from slimta.util import validate_tls
from slimta.util.pycompat import urlparse
from . import PermanentRelayError, TransientRelayError
from .pool import RelayPool, RelayPoolClient
from .smtp import SmtpRelayError
__all__ = ['HttpRelay']
log = logging.getHttpLogger(__name__)
class HttpRelayClient(RelayPoolClient):
reply_code_pattern = re.compile(r'^\s*(\d\d\d)\s*;')
reply_param_pattern = re.compile(r'\s(\w+)\s*=\s*"(.*?)"')
def __init__(self, relay):
super(HttpRelayClient, self).__init__(relay.queue, relay.idle_timeout)
self.conn = None
self.ehlo_as = None
self.url = relay.url
self.relay = relay
def _wait_for_request(self):
result, envelope = self.poll()
if result and envelope:
self.idle = False
self._handle_request(result, envelope)
else:
if self.conn:
self.conn.close()
self.conn = None
def _b64encode(self, what):
return b64encode(what.encode('utf-8')).decode('ascii')
def _build_headers(self, envelope, msg_headers, msg_body):
content_length = str(len(msg_headers) + len(msg_body))
headers = [('Content-Length', content_length),
('Content-Type', 'message/rfc822'),
(self.relay.ehlo_header, self.ehlo_as),
(self.relay.sender_header,
self._b64encode(envelope.sender))]
for rcpt in envelope.recipients:
headers.append((self.relay.recipient_header,
self._b64encode(rcpt)))
return headers
def _new_conn(self):
self.conn = get_connection(self.url, self.relay.tls)
try:
self.ehlo_as = self.relay.ehlo_as()
except TypeError:
self.ehlo_as = self.relay.ehlo_as
def _handle_request(self, result, envelope):
method = self.relay.http_verb
if not self.conn:
self._new_conn()
with gevent.Timeout(self.relay.timeout):
msg_headers, msg_body = envelope.flatten()
headers = self._build_headers(envelope, msg_headers, msg_body)
log.request(self.conn, method, self.url.path, headers)
self.conn.putrequest(method, self.url.path)
for name, value in headers:
self.conn.putheader(name.encode('iso-8859-1'),
value.encode('iso-8859-1'))
self.conn.endheaders(msg_headers)
self.conn.send(msg_body)
self._process_response(self.conn.getresponse(), result)
def _parse_smtp_reply_header(self, http_res):
raw_reply = http_res.getheader('X-Smtp-Reply', '')
match = re.match(self.reply_code_pattern, raw_reply)
if not match:
return None
code = match.group(1)
message = ''
command = None
for match in re.finditer(self.reply_param_pattern, raw_reply):
if match.group(1).lower() == 'message':
message = match.group(2)
elif match.group(1).lower() == 'command':
command = match.group(2)
return Reply(code, message, command)
def _process_response(self, http_res, result):
status = '{0!s} {1}'.format(http_res.status, http_res.reason)
smtp_reply = self._parse_smtp_reply_header(http_res)
log.response(self.conn, status, http_res.getheaders())
if status.startswith('2'):
result.set(smtp_reply)
else:
if smtp_reply:
exc = SmtpRelayError.factory(smtp_reply)
elif status.startswith('4'):
exc = PermanentRelayError(http_res.reason)
else:
exc = TransientRelayError(http_res.reason)
result.set_exception(exc)
def _run(self):
try:
while True:
self._wait_for_request()
if not self.relay.idle_timeout:
break
except gevent.Timeout:
pass
finally:
if self.conn:
self.conn.close()
class HttpRelay(RelayPool):
"""Implements a |Relay| that attempts to deliver mail with an HTTP or HTTPS
request. This request contains all the information that would usually go
through an SMTP session as headers: the EHLO string, envelope sender and
recipients.
A ``200 OK`` (or similar) response from the server will inform the caller
that the message was successfully delivered. In other cases, the class
makes its best guess about whether to raise a
:class:`~slimta.relay.PermanentRelayError` or
:class:`~slimta.relay.TransientRelayError`. If the server's response
includes a ``X-Smtp-Reply`` header, it will be used. This header looks
like::
X-Smtp-Reply: 550; message="5.0.0 Some error message"
:param url: URL string to make requests against. This string is parsed with
:py:func:`urlparse.urlsplit` with ``'http'`` as the default
scheme.
:param pool_size: At most this many simultaneous connections will be open
to the destination. If this limit is reached and no
connections are idle, new attempts will block.
:param tls: Dictionary of TLS settings passed directly as keyword arguments
to :class:`gevent.ssl.SSLSocket`. This parameter is optional
unless ``https:`` is given in ``url``.
:param ehlo_as: The string to send as the EHLO string in a header. Defaults
to the FQDN of the system. This may also be given as a
function that will be executed with no arguments at the
beginning of each connection.
:param timeout: This is the maximum time in seconds to wait for the entire
session: connection, request, and response. If ``None``,
there is no timeout.
:param idle_timeout: Timeout in seconds that a connection is held open
waiting for another delivery request to process. By
default, connections are closed immediately and not
reused.
"""
#: The HTTP verb to use with the requests.
http_verb = 'POST'
#: The header name used to send the base64-encoded sender address.
sender_header = 'X-Envelope-Sender'
#: The header name used to send each base64-encoded recipient address.
recipient_header = 'X-Envelope-Recipient'
#: The header name used to send the EHLO string.
ehlo_header = 'X-Ehlo'
def __init__(self, url, pool_size=None, tls=None, ehlo_as=None,
timeout=None, idle_timeout=None):
super(HttpRelay, self).__init__(pool_size)
self.url = urlparse.urlsplit(url, 'http')
self.tls = validate_tls(tls)
self.ehlo_as = ehlo_as or getfqdn()
self.timeout = timeout
self.idle_timeout = idle_timeout
def add_client(self):
return HttpRelayClient(self)
# vim:et:fdm=marker:sts=4:sw=4:ts=4
|
lokirius/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/unittest/result.py
|
50
|
"""Test result object"""
import os
import io
import sys
import traceback
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = io.StringIO()
self._stdout_buffer = io.StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
|
quattor/aquilon
|
refs/heads/upstream
|
lib/aquilon/worker/commands/show_city_all.py
|
2
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013,2014 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq show city --all`."""
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.show_location_type import CommandShowLocationType
class CommandShowCityAll(CommandShowLocationType):
required_parameters = []
def render(self, session, **arguments):
return CommandShowLocationType.render(self, session=session,
type='city', name=None,
**arguments)
|
houzhenggang/hiwifi-openwrt-HC5661-HC5761
|
refs/heads/master
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/encodings/charmap.py
|
860
|
""" Generic Python Character Mapping Codec.
Use this codec directly rather than through the automatic
conversion mechanisms supplied by unicode() and .encode().
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.charmap_encode
decode = codecs.charmap_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict', mapping=None):
codecs.IncrementalEncoder.__init__(self, errors)
self.mapping = mapping
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, self.mapping)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict', mapping=None):
codecs.IncrementalDecoder.__init__(self, errors)
self.mapping = mapping
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, self.mapping)[0]
class StreamWriter(Codec,codecs.StreamWriter):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamWriter.__init__(self,stream,errors)
self.mapping = mapping
def encode(self,input,errors='strict'):
return Codec.encode(input,errors,self.mapping)
class StreamReader(Codec,codecs.StreamReader):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamReader.__init__(self,stream,errors)
self.mapping = mapping
def decode(self,input,errors='strict'):
return Codec.decode(input,errors,self.mapping)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='charmap',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
Ms2ger/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/py/testing/io_/__init__.py
|
9480
|
#
|
eseidel/native_client_patches
|
refs/heads/master
|
tools/modular-build/treemappers.py
|
1
|
# Copyright 2010 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import os
import hashlib
import sys
import types
# Do not add any more imports here! This could lead to undeclared
# dependencies, changes to which fail to trigger rebuilds.
from dirtree import (FileSnapshot, FileSnapshotInMemory, SymlinkSnapshot,
LazyDict)
def UnionIntoDict(input_tree, dest_dict, context=""):
for key, value in sorted(input_tree.iteritems()):
new_context = os.path.join(context, key)
if isinstance(value, FileSnapshot):
if key in dest_dict:
# TODO(mseaborn): Ideally we should pass in a log stream
# explicitly instead of using sys.stderr.
sys.stderr.write("Warning: %r is being overwritten\n" % new_context)
dest_dict[key] = value
else:
dest_subdir = dest_dict.setdefault(key, {})
if isinstance(dest_subdir, FileSnapshot):
raise Exception("Cannot overwrite directory %r with file"
% new_context)
UnionIntoDict(value, dest_subdir, new_context)
def DeepCopy(tree):
"""
This function copies a read-only directory tree to return a mutable
tree that can be modified using in-place operations.
"""
if isinstance(tree, FileSnapshot):
return tree
else:
return dict((key, DeepCopy(value)) for key, value in tree.iteritems())
def UnionDir(*trees):
dest = {}
for tree in trees:
UnionIntoDict(tree, dest)
return dest
def GetPath(tree, path):
for element in path.split("/"):
tree = tree[element]
return tree
def SetPath(tree, path, value):
elements = path.split("/")
for element in elements[:-1]:
tree = tree.setdefault(element, {})
tree[elements[-1]] = value
VERSION_CONTROL_DIRS = (".svn", ".git")
def RemoveVersionControlDirs(tree):
if isinstance(tree, FileSnapshot):
return tree
else:
return LazyDict(lambda: dict((key, RemoveVersionControlDirs(value))
for key, value in tree.iteritems()
if key not in VERSION_CONTROL_DIRS))
# The tree mapper functions below -- AddHeadersToNewlib() and
# InstallLinkerScripts() -- are ad-hoc, in the sense that they support
# specific components that could not be treated uniformly. They have
# TODOs elsewhere.
#
# If we need more ad-hoc tree mappers, it would make sense to split
# this file up and use a different method for tracking when these
# functions' definitions have changed. But for now, since there is
# only a small number of them, it is simpler to keep them in one place
# and use the file-based identity tracking method discussed below.
def NaClSourceSubset(input_tree):
# We copy a subset of the NaCl source tree so that we can account
# for (almost) all of the inputs to the Scons build.
#
# There are two reasons for using a subset:
#
# 1) It lets us ignore parts of the tree that the Scons build does
# not use, such as the tarballs in third_party.
#
# 2) modular-build does not know how to distinguish checked-in
# source files from non-source files. We could query the SVN
# working copy, but the user could be using Git instead. Also
# modular-build does not know about DEPS.
#
# This means we need to avoid large, non-source directories in
# the NaCl working tree, which include:
# native_client/scons-out
# native_client/toolchain
# native_client/tools/modular-build/out
paths_to_copy = [
# From NaCl SVN
"native_client/SConstruct",
"native_client/common",
"native_client/installer",
"native_client/scons",
"native_client/site_scons",
"native_client/src",
"native_client/tests",
"native_client/tools/command_tester.py",
"native_client/tools/diff.py",
"native_client/tools/srpcgen.py",
"native_client/tools/test_lib.py",
"native_client/tools/tests",
"third_party/npapi",
"third_party/scons",
"third_party/sdl",
# Pulled in from SVN repos via DEPS
"base",
"build",
"gpu",
"ppapi",
"testing/gtest",
]
result = {}
for filename in paths_to_copy:
SetPath(result, filename, GetPath(input_tree, filename))
return result
# This is primarily a workaround for the multilib layout that newlib
# produces from "make install". It outputs to "lib/32", whereas
# everyone else outputs to "lib32", and gcc's search path has "lib32"
# not "lib/32".
# This is done instead of the lib32 -> lib/32 symlink that Makefile uses.
# TODO(mseaborn): Fix newlib to not output using this odd layout.
def MungeMultilibDir(tree, arch, bits):
libdir = tree.get(arch, {}).get("lib", {}).get(bits)
if isinstance(libdir, SymlinkSnapshot):
# Avoid clobbering the symlink created by Lib32Symlink().
return
if libdir is not None:
assert "lib" + bits not in tree[arch]
del tree[arch]["lib"][bits]
tree[arch]["lib" + bits] = libdir
# This is a workaround for gcc, which installs default-subarch
# libraries (such as libgcc_s.so.1) into "nacl/lib32" (for
# --target=nacl) or "nacl/lib64" (for --target=nacl64) rather than to
# "nacl/lib". However, the Scons build expects libraries to be in
# "lib", so rename "lib32" or "lib64" to "lib".
def RenameLibXXToLib(tree, arch, bits):
libdir = tree.get(arch, {}).get("lib" + bits)
if libdir is not None:
UnionIntoDict(libdir, tree[arch].get("lib", {}))
del tree[arch]["lib" + bits]
def Lib32Symlink(arch):
# This is necessary for full-gcc to link libgcc_s.so, because it
# passes "-B" and thereby ignores the "nacl64/lib32" path. The
# full-gcc build will only search "nacl64/lib/32" for libc.so, which
# libgcc_s.so is linked against.
return {arch: {"lib": {"32": SymlinkSnapshot("../lib32")}}}
def LibraryPathVar():
if sys.platform == "darwin":
return "DYLD_LIBRARY_PATH"
else:
return "LD_LIBRARY_PATH"
def AddEnvVarWrapperScripts(tree):
# Move the real executables from "bin" to "original-bin" and create
# wrapper scripts in "bin" that set LD_LIBRARY_PATH.
if "bin" in tree:
assert "original-bin" not in tree
tree["original-bin"] = tree["bin"]
tree["bin"] = {}
for script_name in tree["original-bin"].iterkeys():
template = """\
#!/bin/bash
export %(path_var)s="${0%%/*}/../lib${%(path_var)s+:$%(path_var)s}"
exec ${0%%/*}/../original-bin/%(script_name)s "$@"
"""
script = template % {"path_var": LibraryPathVar(),
"script_name": script_name}
tree["bin"][script_name] = FileSnapshotInMemory(script, executable=True)
def CombineInstallTrees(*trees):
trees = map(DeepCopy, trees)
for tree in trees:
MungeMultilibDir(tree, "nacl64", "32")
MungeMultilibDir(tree, "nacl", "64")
RenameLibXXToLib(tree, "nacl64", "64")
RenameLibXXToLib(tree, "nacl", "32")
combined = UnionDir(*trees)
AddEnvVarWrapperScripts(combined)
return combined
def AddHeadersToNewlib(newlib_source, nacl_headers):
dest = DeepCopy(newlib_source)
UnionIntoDict(nacl_headers, dest["newlib"]["libc"]["sys"]["nacl"])
return dest
def InstallLinkerScripts(glibc_tree, arch):
return {arch: {"lib": glibc_tree["nacl"]["dyn-link"]}}
def InstallKernelHeaders(include_dir_parent, arch):
return {arch: include_dir_parent}
def SubsetNaClHeaders(input_headers, arch):
# We install only a subset of the NaCl headers from
# service_runtime/include. We don't want the headers for POSIX
# interfaces that are provided by glibc, e.g. sys/mman.h.
result = {}
headers = [
"sys/nacl_imc_api.h",
"bits/nacl_imc_api.h",
"sys/nacl_syscalls.h",
"bits/nacl_syscalls.h",
"sys/audio_video.h",
"machine/_types.h"]
for filename in headers:
SetPath(result, filename, GetPath(input_headers, filename))
# TODO(mseaborn): Use from file:
# src/untrusted/include/machine/_default_types.h
default_types_h = """
/*
* This is a workaround for service_runtime/include/machine/_types.h,
* which wants to define dev_t, ino_t and others, but these are normally
* defined by glibc. However, we need machine/_types.h for its
* definition of nacl_abi_size_t.
*
* To resolve this, we #include glibc headers that cause __*_t_defined
* macros to be defined, which tells machine/_types.h not to attempt to
* define these types again.
*
* TODO(mseaborn): We should remove _default_types.h because it is a
* newlibism. However, this is tricky because of the magic rewriting of
* the "NACL_ABI_" and "nacl_abi_" prefixes that the NaCl headers use.
*/
/* This gives us __dev_t_defined, __ino_t_defined and others */
#include <sys/types.h>
/* This gives us __time_t_defined */
#include <time.h>
"""
SetPath(result, "machine/_default_types.h",
FileSnapshotInMemory(default_types_h))
return {arch: {"include": result}}
def CreateAlias(new_name, old_name):
template = """\
#!/bin/sh
exec %s "$@"
"""
return {"bin": {new_name: FileSnapshotInMemory(template % old_name,
executable=True)}}
def DummyLibrary(arch, name):
# This text file works as a dummy (empty) library because ld treats
# it as a linker script.
dummy_lib = FileSnapshotInMemory("/* Intentionally empty */\n")
lib_dir = {"%s.a" % name: dummy_lib}
return {arch: {"lib": lib_dir, "lib32": lib_dir}}
# When gcc is built, it checks whether libc provides <limits.h> in
# order to determine whether gcc's own <limits.h> should use libc's
# version via #include_next. Oddly, gcc looks in "sys-include" rather
# than "include". We work around this by creating "sys-include" as an
# alias. See http://code.google.com/p/nativeclient/issues/detail?id=854
def SysIncludeAlias(tree, arch):
return {arch: {"sys-include": tree[arch]["include"]}}
# The functions above are fairly cheap, so we could run them each
# time, but they do require scanning their input directory trees, so
# it would be better to avoid that if the function has not changed.
#
# To do that, we need a way to serialise the identities of the
# functions. We do that just by recording the hash of this file.
#
# More complex approaches could be:
# * Hashing a function's Python bytecode. This captures line numbers
# for debugging so it would still change on trivial changes.
# * Hashing a function's AST.
def MarkFunctionsWithIdentity(module):
filename = __file__
if filename.endswith(".pyc"):
# Change ".pyc" extension to ".py".
filename = filename[:-1]
fh = open(filename, "r")
module_identity = hashlib.sha1(fh.read()).hexdigest()
fh.close()
for value in module.itervalues():
if isinstance(value, types.FunctionType):
value.function_identity = (module_identity, __name__)
MarkFunctionsWithIdentity(globals())
|
mzdaniel/oh-mainline
|
refs/heads/master
|
vendor/packages/celery/funtests/suite/test_basic.py
|
18
|
import operator
import os
import sys
import time
# funtest config
sys.path.insert(0, os.getcwd())
sys.path.insert(0, os.path.join(os.getcwd(), os.pardir))
import suite
from celery.tests.utils import unittest
from celery.tests.functional import tasks
from celery.tests.functional.case import WorkerCase
from celery.task.control import broadcast
class test_basic(WorkerCase):
def test_started(self):
self.assertWorkerAlive()
def test_roundtrip_simple_task(self):
publisher = tasks.add.get_publisher()
results = [(tasks.add.apply_async(i, publisher=publisher), i)
for i in zip(xrange(100), xrange(100))]
for result, i in results:
self.assertEqual(result.get(timeout=10), operator.add(*i))
def test_dump_active(self, sleep=1):
r1 = tasks.sleeptask.delay(sleep)
r2 = tasks.sleeptask.delay(sleep)
self.ensure_accepted(r1.task_id)
active = self.inspect().active(safe=True)
self.assertTrue(active)
active = active[self.worker.hostname]
self.assertEqual(len(active), 2)
self.assertEqual(active[0]["name"], tasks.sleeptask.name)
self.assertEqual(active[0]["args"], [sleep])
def test_dump_reserved(self, sleep=1):
r1 = tasks.sleeptask.delay(sleep)
r2 = tasks.sleeptask.delay(sleep)
r3 = tasks.sleeptask.delay(sleep)
r4 = tasks.sleeptask.delay(sleep)
self.ensure_accepted(r1.task_id)
reserved = self.inspect().reserved(safe=True)
self.assertTrue(reserved)
reserved = reserved[self.worker.hostname]
self.assertEqual(reserved[0]["name"], tasks.sleeptask.name)
self.assertEqual(reserved[0]["args"], [sleep])
def test_dump_schedule(self, countdown=1):
r1 = tasks.add.apply_async((2, 2), countdown=countdown)
r2 = tasks.add.apply_async((2, 2), countdown=countdown)
self.ensure_scheduled(r1.task_id, interval=0.1)
schedule = self.inspect().scheduled(safe=True)
self.assertTrue(schedule)
schedule = schedule[self.worker.hostname]
self.assertTrue(len(schedule), 2)
self.assertEqual(schedule[0]["request"]["name"], tasks.add.name)
self.assertEqual(schedule[0]["request"]["args"], [2, 2])
if __name__ == "__main__":
unittest.main()
|
knuu/competitive-programming
|
refs/heads/master
|
atcoder/corp/mujin2018_a.py
|
1
|
S = input()
print("Yes" if S.startswith("MUJIN") else "No")
|
Samweli/inasafe
|
refs/heads/develop
|
safe/messaging/item/line_break.py
|
14
|
"""
InaSAFE Disaster risk assessment tool developed by AusAid - **Paragraph.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'marco@opengis.ch'
__revision__ = '$Format:%H$'
__date__ = '28/05/2013'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
from text import Text
# FIXME (MB) remove when all to_* methods are implemented
# pylint: disable=W0223
class LineBreak(Text):
"""A class to model line breaks in text the messaging system """
def to_html(self, **kwargs):
"""Render as html
Args:
None
Returns:
Str the html representation
Raises:
Errors are propagated
We pass the kwargs on to the base class so an exception is raised
if invalid keywords were passed. See:
http://stackoverflow.com/questions/13124961/
how-to-pass-arguments-efficiently-kwargs-in-python
"""
super(LineBreak, self).__init__(**kwargs)
return '<br%s/>\n' % self.html_attributes()
def to_text(self):
"""Render as plain text
Args:
None
Returns:
Str the plain text representation
Raises:
Errors are propagated
"""
return '\n'
|
nabilt/Like-List
|
refs/heads/master
|
embedly/client.py
|
1
|
"""
Client
======
The embedly object that interacts with the service
"""
import re
import urllib
import httplib2
try:
import json
except ImportError:
import simplejson as json
from models import Url
USER_AGENT = 'Mozilla/5.0 (compatible; embedly-python/0.3;)'
class Embedly(object):
"""
Client
"""
def __init__(self, key=None, user_agent=USER_AGENT):
"""
Initialize the Embedly client
:param user_agent: User Agent passed to Embedly
:type user_agent: str
:param key: Embedly Pro key
:type key: str
:returns: None
"""
self.user_agent = user_agent
self.key = key
self.services = []
self._regex = None
def get_services(self):
"""
get_services makes call to services end point of api.embed.ly to fetch
the list of supported providers and their regexes
"""
if self.services: return self.services
url = 'http://api.embed.ly/1/services/python'
http = httplib2.Http()
headers = {'User-Agent' : self.user_agent}
resp, content = http.request(url, headers=headers)
if resp['status'] == '200':
resp_data = json.loads(content)
self.services = resp_data
#build the regex that we can use later.
_regex = []
for each in self.get_services():
_regex.append('|'.join(each.get('regex',[])))
self._regex = re.compile('|'.join(_regex))
return self.services
def is_supported(self, url):
"""
``is_supported`` is a shortcut for client.regex.match(url)
"""
return self.regex.match(url) is not None
@property
def regex(self):
"""
``regex`` property just so we can call get_services if the _regex is
not yet filled.
"""
if not self._regex:
self.get_services()
return self._regex
def _get(self, version, method, url_or_urls, **kwargs):
"""
_get makes the actual call to api.embed.ly
"""
if not url_or_urls:
raise ValueError('%s requires a url or a list of urls given: %s' %
(method.title(), url_or_urls))
#A flag we can use instead of calling isinstance all the time.
multi = isinstance(url_or_urls, list)
query = ''
key = kwargs.get('key', self.key)
#make sure that a key was set on the client or passed in.
if not key:
raise ValueError('Requires a key. None given: %s' % (key))
kwargs['key'] = key
query += urllib.urlencode(kwargs)
if multi:
query += '&urls=%s&' % ','.join([urllib.quote(url) for url in url_or_urls])
else:
query += '&url=%s' % urllib.quote(url_or_urls)
url = 'http://api.embed.ly/%s/%s?%s' % (version, method, query)
http = httplib2.Http()
headers = {'User-Agent' : self.user_agent}
resp, content = http.request(url, headers=headers)
if resp['status'] == '200':
data = json.loads(content)
if kwargs.get('raw', False):
data['raw'] = content
else:
data = {'type' : 'error',
'error' : True,
'error_code' : int(resp['status'])}
if multi:
return map(lambda url, data: Url(data, method, url),
url_or_urls, data)
return Url(data, method, url_or_urls)
def oembed(self, url_or_urls, **kwargs):
"""
oembed
"""
return self._get(1, 'oembed', url_or_urls, **kwargs)
def preview(self, url_or_urls, **kwargs):
"""
oembed
"""
return self._get(1, 'preview', url_or_urls, **kwargs)
def objectify(self, url_or_urls, **kwargs):
"""
oembed
"""
return self._get(2, 'objectify', url_or_urls, **kwargs)
|
jiangwei1221/django-virtualenv-demo
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/conf/locale/de_CH/__init__.py
|
12133432
| |
gablg1/PerfKitBenchmarker
|
refs/heads/master
|
perfkitbenchmarker/packages/openblas.py
|
4
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing OpenBLAS installation and cleanup functions."""
from perfkitbenchmarker import vm_util
OPENBLAS_DIR = '%s/OpenBLAS' % vm_util.VM_TMP_DIR
GIT_REPO = 'git://github.com/xianyi/OpenBLAS'
GIT_TAG = 'v0.2.11'
def _Install(vm):
"""Installs the OpenBLAS package on the VM."""
vm.Install('build_tools')
vm.Install('fortran')
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, OPENBLAS_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(OPENBLAS_DIR, GIT_TAG))
vm.RemoteCommand('cd {0} && make'.format(OPENBLAS_DIR))
def YumInstall(vm):
"""Installs the OpenBLAS package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the OpenBLAS package on the VM."""
_Install(vm)
|
2ndy/RaspIM
|
refs/heads/master
|
usr/lib/python2.7/encodings/zlib_codec.py
|
533
|
""" Python 'zlib_codec' Codec - zlib compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs
import zlib # this codec needs the optional zlib module !
### Codec APIs
def zlib_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.compress(input)
return (output, len(input))
def zlib_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
varunkamra/kuma
|
refs/heads/master
|
vendor/packages/translate/storage/placeables/test_lisa.py
|
26
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2006-2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
from lxml import etree
from translate.storage.placeables import StringElem, lisa
from translate.storage.placeables.xliff import Bx, Ex, G, UnknownXML, X
def test_xml_to_strelem():
source = etree.fromstring(u'<source>a</source>')
elem = lisa.xml_to_strelem(source)
assert elem == StringElem(u'a')
source = etree.fromstring(u'<source>a<x id="foo[1]/bar[1]/baz[1]"/></source>')
elem = lisa.xml_to_strelem(source)
assert elem.sub == [StringElem(u'a'), X(id=u'foo[1]/bar[1]/baz[1]')]
source = etree.fromstring(u'<source>a<x id="foo[1]/bar[1]/baz[1]"/>é</source>')
elem = lisa.xml_to_strelem(source)
assert elem.sub == [StringElem(u'a'), X(id=u'foo[1]/bar[1]/baz[1]'), StringElem(u'é')]
source = etree.fromstring(u'<source>a<g id="foo[2]/bar[2]/baz[2]">b<x id="foo[1]/bar[1]/baz[1]"/>c</g>é</source>')
elem = lisa.xml_to_strelem(source)
assert elem.sub == [StringElem(u'a'), G(id=u'foo[2]/bar[2]/baz[2]', sub=[StringElem(u'b'), X(id=u'foo[1]/bar[1]/baz[1]'), StringElem(u'c')]), StringElem(u'é')]
def test_xml_space():
source = etree.fromstring(u'<source xml:space="default"> a <x id="foo[1]/bar[1]/baz[1]"/> </source>')
elem = lisa.xml_to_strelem(source)
print(elem.sub)
assert elem.sub == [StringElem(u'a '), X(id=u'foo[1]/bar[1]/baz[1]'), StringElem(u' ')]
def test_chunk_list():
left = StringElem([u'a', G(id='foo[2]/bar[2]/baz[2]', sub=[u'b', X(id='foo[1]/bar[1]/baz[1]'), u'c']), u'é'])
right = StringElem([u'a', G(id='foo[2]/bar[2]/baz[2]', sub=[u'b', X(id='foo[1]/bar[1]/baz[1]'), u'c']), u'é'])
assert left == right
def test_set_strelem_to_xml():
source = etree.Element(u'source')
lisa.strelem_to_xml(source, StringElem(u'a'))
assert etree.tostring(source, encoding='UTF-8') == '<source>a</source>'
source = etree.Element(u'source')
lisa.strelem_to_xml(source, StringElem([u'a', u'é']))
assert etree.tostring(source, encoding='UTF-8') == '<source>aé</source>'
source = etree.Element(u'source')
lisa.strelem_to_xml(source, StringElem(X(id='foo[1]/bar[1]/baz[1]')))
assert etree.tostring(source, encoding='UTF-8') == '<source><x id="foo[1]/bar[1]/baz[1]"/></source>'
source = etree.Element(u'source')
lisa.strelem_to_xml(source, StringElem([u'a', X(id='foo[1]/bar[1]/baz[1]')]))
assert etree.tostring(source, encoding='UTF-8') == '<source>a<x id="foo[1]/bar[1]/baz[1]"/></source>'
source = etree.Element(u'source')
lisa.strelem_to_xml(source, StringElem([u'a', X(id='foo[1]/bar[1]/baz[1]'), u'é']))
assert etree.tostring(source, encoding='UTF-8') == '<source>a<x id="foo[1]/bar[1]/baz[1]"/>é</source>'
source = etree.Element(u'source')
lisa.strelem_to_xml(source, StringElem([u'a', G(id='foo[2]/bar[2]/baz[2]', sub=[u'b', X(id='foo[1]/bar[1]/baz[1]'), u'c']), u'é']))
assert etree.tostring(source, encoding='UTF-8') == '<source>a<g id="foo[2]/bar[2]/baz[2]">b<x id="foo[1]/bar[1]/baz[1]"/>c</g>é</source>'
def test_unknown_xml_placeable():
# The XML below is (modified) from the official XLIFF example file Sample_AlmostEverything_1.2_strict.xlf
source = etree.fromstring(u"""<source xml:lang="en-us">Text <g id="_1_ski_040">g</g>TEXT<bpt id="_1_ski_139">bpt<sub>sub</sub>
</bpt>TEXT<ept id="_1_ski_238">ept</ept>TEXT<ph id="_1_ski_337"/>TEXT<it id="_1_ski_436" pos="open">it</it>TEXT<mrk mtype="x-test">mrk</mrk>
<x id="_1_ski_535"/>TEXT<bx id="_1_ski_634"/>TEXT<ex id="_1_ski_733"/>TEXT.</source>""")
elem = lisa.xml_to_strelem(source)
from copy import copy
custom = StringElem([
StringElem(u'Text '),
G(u'g', id='_1_ski_040'),
StringElem(u'TEXT'),
UnknownXML(
[
StringElem(u'bpt'),
UnknownXML(u'sub', xml_node=copy(source[1][0])),
StringElem(u'\n '),
],
id='_1_ski_139',
xml_node=copy(source[3])),
StringElem(u'TEXT'),
UnknownXML(u'ept', id=u'_1_ski_238', xml_node=copy(source[2])),
StringElem(u'TEXT'),
UnknownXML(id='_1_ski_337', xml_node=copy(source[3])), # ph-tag
StringElem(u'TEXT'),
UnknownXML(u'it', id='_1_ski_436', xml_node=copy(source[4])),
StringElem(u'TEXT'),
UnknownXML(u'mrk', xml_node=copy(source[5])),
StringElem(u'\n '),
X(id='_1_ski_535'),
StringElem(u'TEXT'),
Bx(id='_1_ski_634'),
StringElem(u'TEXT'),
Ex(id='_1_ski_733'),
StringElem(u'TEXT.')
])
assert elem == custom
xml = copy(source)
for i in range(len(xml)):
del xml[0]
xml.text = None
xml.tail = None
lisa.strelem_to_xml(xml, elem)
assert etree.tostring(xml) == etree.tostring(source)
if __name__ == '__main__':
test_chunk_list()
test_xml_to_strelem()
test_set_strelem_to_xml()
test_unknown_xml_placeable()
|
MaheshIBM/keystone
|
refs/heads/master
|
keystone/common/cache/backends/mongo.py
|
2
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import datetime
from dogpile.cache import api
from dogpile.cache import util as dp_util
from oslo.utils import importutils
from oslo.utils import timeutils
import six
from keystone import exception
from keystone.i18n import _, _LW
from keystone.openstack.common import log
NO_VALUE = api.NO_VALUE
LOG = log.getLogger(__name__)
class MongoCacheBackend(api.CacheBackend):
"""A MongoDB based caching backend implementing dogpile backend APIs.
Arguments accepted in the arguments dictionary:
:param db_hosts: string (required), hostname or IP address of the
MongoDB server instance. This can be a single MongoDB connection URI,
or a list of MongoDB connection URIs.
:param db_name: string (required), the name of the database to be used.
:param cache_collection: string (required), the name of collection to store
cached data.
*Note:* Different collection name can be provided if there is need to
create separate container (i.e. collection) for cache data. So region
configuration is done per collection.
Following are optional parameters for MongoDB backend configuration,
:param username: string, the name of the user to authenticate.
:param password: string, the password of the user to authenticate.
:param max_pool_size: integer, the maximum number of connections that the
pool will open simultaneously. By default the pool size is 10.
:param w: integer, write acknowledgement for MongoDB client
If not provided, then no default is set on MongoDB and then write
acknowledgement behavior occurs as per MongoDB default. This parameter
name is same as what is used in MongoDB docs. This value is specified
at collection level so its applicable to `cache_collection` db write
operations.
If this is a replica set, write operations will block until they have
been replicated to the specified number or tagged set of servers.
Setting w=0 disables write acknowledgement and all other write concern
options.
:param read_preference: string, the read preference mode for MongoDB client
Expected value is ``primary``, ``primaryPreferred``, ``secondary``,
``secondaryPreferred``, or ``nearest``. This read_preference is
specified at collection level so its applicable to `cache_collection`
db read operations.
:param use_replica: boolean, flag to indicate if replica client to be
used. Default is `False`. `replicaset_name` value is required if
`True`.
:param replicaset_name: string, name of replica set.
Becomes required if `use_replica` is `True`
:param son_manipulator: string, name of class with module name which
implements MongoDB SONManipulator.
Default manipulator used is :class:`.BaseTransform`.
This manipulator is added per database. In multiple cache
configurations, the manipulator name should be same if same
database name ``db_name`` is used in those configurations.
SONManipulator is used to manipulate custom data types as they are
saved or retrieved from MongoDB. Custom impl is only needed if cached
data is custom class and needs transformations when saving or reading
from db. If dogpile cached value contains built-in data types, then
BaseTransform class is sufficient as it already handles dogpile
CachedValue class transformation.
:param mongo_ttl_seconds: integer, interval in seconds to indicate maximum
time-to-live value.
If value is greater than 0, then its assumed that cache_collection
needs to be TTL type (has index at 'doc_date' field).
By default, the value is -1 and its disabled.
Reference: <http://docs.mongodb.org/manual/tutorial/expire-data/>
.. NOTE::
This parameter is different from Dogpile own
expiration_time, which is the number of seconds after which Dogpile
will consider the value to be expired. When Dogpile considers a
value to be expired, it continues to use the value until generation
of a new value is complete, when using CacheRegion.get_or_create().
Therefore, if you are setting `mongo_ttl_seconds`, you will want to
make sure it is greater than expiration_time by at least enough
seconds for new values to be generated, else the value would not
be available during a regeneration, forcing all threads to wait for
a regeneration each time a value expires.
:param ssl: boolean, If True, create the connection to the server
using SSL. Default is `False`. Client SSL connection parameters depends
on server side SSL setup. For further reference on SSL configuration:
<http://docs.mongodb.org/manual/tutorial/configure-ssl/>
:param ssl_keyfile: string, the private keyfile used to identify the
local connection against mongod. If included with the certfile then
only the `ssl_certfile` is needed. Used only when `ssl` is `True`.
:param ssl_certfile: string, the certificate file used to identify the
local connection against mongod. Used only when `ssl` is `True`.
:param ssl_ca_certs: string, the ca_certs file contains a set of
concatenated 'certification authority' certificates, which are used to
validate certificates passed from the other end of the connection.
Used only when `ssl` is `True`.
:param ssl_cert_reqs: string, the parameter cert_reqs specifies whether
a certificate is required from the other side of the connection, and
whether it will be validated if provided. It must be one of the three
values ``ssl.CERT_NONE`` (certificates ignored), ``ssl.CERT_OPTIONAL``
(not required, but validated if provided), or
``ssl.CERT_REQUIRED`` (required and validated). If the value of this
parameter is not ``ssl.CERT_NONE``, then the ssl_ca_certs parameter
must point to a file of CA certificates. Used only when `ssl`
is `True`.
Rest of arguments are passed to mongo calls for read, write and remove.
So related options can be specified to pass to these operations.
Further details of various supported arguments can be referred from
<http://api.mongodb.org/python/current/api/pymongo/>
"""
def __init__(self, arguments):
self.api = MongoApi(arguments)
@dp_util.memoized_property
def client(self):
"""Initializes MongoDB connection and collection defaults.
This initialization is done only once and performed as part of lazy
inclusion of MongoDB dependency i.e. add imports only if related
backend is used.
:return: :class:`.MongoApi` instance
"""
self.api.get_cache_collection()
return self.api
def get(self, key):
value = self.client.get(key)
if value is None:
return NO_VALUE
else:
return value
def get_multi(self, keys):
values = self.client.get_multi(keys)
return [
NO_VALUE if key not in values
else values[key] for key in keys
]
def set(self, key, value):
self.client.set(key, value)
def set_multi(self, mapping):
self.client.set_multi(mapping)
def delete(self, key):
self.client.delete(key)
def delete_multi(self, keys):
self.client.delete_multi(keys)
class MongoApi(object):
"""Class handling MongoDB specific functionality.
This class uses PyMongo APIs internally to create database connection
with configured pool size, ensures unique index on key, does database
authentication and ensure TTL collection index if configured so.
This class also serves as handle to cache collection for dogpile cache
APIs.
In a single deployment, multiple cache configuration can be defined. In
that case of multiple cache collections usage, db client connection pool
is shared when cache collections are within same database.
"""
# class level attributes for re-use of db client connection and collection
_DB = {} # dict of db_name: db connection reference
_MONGO_COLLS = {} # dict of cache_collection : db collection reference
def __init__(self, arguments):
self._init_args(arguments)
self._data_manipulator = None
def _init_args(self, arguments):
"""Helper logic for collecting and parsing MongoDB specific arguments.
The arguments passed in are separated out in connection specific
setting and rest of arguments are passed to create/update/delete
db operations.
"""
self.conn_kwargs = {} # connection specific arguments
self.hosts = arguments.pop('db_hosts', None)
if self.hosts is None:
msg = _('db_hosts value is required')
raise exception.ValidationError(message=msg)
self.db_name = arguments.pop('db_name', None)
if self.db_name is None:
msg = _('database db_name is required')
raise exception.ValidationError(message=msg)
self.cache_collection = arguments.pop('cache_collection', None)
if self.cache_collection is None:
msg = _('cache_collection name is required')
raise exception.ValidationError(message=msg)
self.username = arguments.pop('username', None)
self.password = arguments.pop('password', None)
self.max_pool_size = arguments.pop('max_pool_size', 10)
self.w = arguments.pop('w', -1)
try:
self.w = int(self.w)
except ValueError:
msg = _('integer value expected for w (write concern attribute)')
raise exception.ValidationError(message=msg)
self.read_preference = arguments.pop('read_preference', None)
self.use_replica = arguments.pop('use_replica', False)
if self.use_replica:
if arguments.get('replicaset_name') is None:
msg = _('replicaset_name required when use_replica is True')
raise exception.ValidationError(message=msg)
self.replicaset_name = arguments.get('replicaset_name')
self.son_manipulator = arguments.pop('son_manipulator', None)
# set if mongo collection needs to be TTL type.
# This needs to be max ttl for any cache entry.
# By default, -1 means don't use TTL collection.
# With ttl set, it creates related index and have doc_date field with
# needed expiration interval
self.ttl_seconds = arguments.pop('mongo_ttl_seconds', -1)
try:
self.ttl_seconds = int(self.ttl_seconds)
except ValueError:
msg = _('integer value expected for mongo_ttl_seconds')
raise exception.ValidationError(message=msg)
self.conn_kwargs['ssl'] = arguments.pop('ssl', False)
if self.conn_kwargs['ssl']:
ssl_keyfile = arguments.pop('ssl_keyfile', None)
ssl_certfile = arguments.pop('ssl_certfile', None)
ssl_ca_certs = arguments.pop('ssl_ca_certs', None)
ssl_cert_reqs = arguments.pop('ssl_cert_reqs', None)
if ssl_keyfile:
self.conn_kwargs['ssl_keyfile'] = ssl_keyfile
if ssl_certfile:
self.conn_kwargs['ssl_certfile'] = ssl_certfile
if ssl_ca_certs:
self.conn_kwargs['ssl_ca_certs'] = ssl_ca_certs
if ssl_cert_reqs:
self.conn_kwargs['ssl_cert_reqs'] = (
self._ssl_cert_req_type(ssl_cert_reqs))
# rest of arguments are passed to mongo crud calls
self.meth_kwargs = arguments
def _ssl_cert_req_type(self, req_type):
try:
import ssl
except ImportError:
raise exception.ValidationError(_('no ssl support available'))
req_type = req_type.upper()
try:
return {
'NONE': ssl.CERT_NONE,
'OPTIONAL': ssl.CERT_OPTIONAL,
'REQUIRED': ssl.CERT_REQUIRED
}[req_type]
except KeyError:
msg = _('Invalid ssl_cert_reqs value of %s, must be one of '
'"NONE", "OPTIONAL", "REQUIRED"') % (req_type)
raise exception.ValidationError(message=msg)
def _get_db(self):
# defer imports until backend is used
global pymongo
import pymongo
if self.use_replica:
connection = pymongo.MongoReplicaSetClient(
host=self.hosts, replicaSet=self.replicaset_name,
max_pool_size=self.max_pool_size, **self.conn_kwargs)
else: # used for standalone node or mongos in sharded setup
connection = pymongo.MongoClient(
host=self.hosts, max_pool_size=self.max_pool_size,
**self.conn_kwargs)
database = getattr(connection, self.db_name)
self._assign_data_mainpulator()
database.add_son_manipulator(self._data_manipulator)
if self.username and self.password:
database.authenticate(self.username, self.password)
return database
def _assign_data_mainpulator(self):
if self._data_manipulator is None:
if self.son_manipulator:
self._data_manipulator = importutils.import_object(
self.son_manipulator)
else:
self._data_manipulator = BaseTransform()
def _get_doc_date(self):
if self.ttl_seconds > 0:
expire_delta = datetime.timedelta(seconds=self.ttl_seconds)
doc_date = timeutils.utcnow() + expire_delta
else:
doc_date = timeutils.utcnow()
return doc_date
def get_cache_collection(self):
if self.cache_collection not in self._MONGO_COLLS:
global pymongo
import pymongo
# re-use db client connection if already defined as part of
# earlier dogpile cache configuration
if self.db_name not in self._DB:
self._DB[self.db_name] = self._get_db()
coll = getattr(self._DB[self.db_name], self.cache_collection)
self._assign_data_mainpulator()
if self.read_preference:
self.read_preference = pymongo.read_preferences.mongos_enum(
self.read_preference)
coll.read_preference = self.read_preference
if self.w > -1:
coll.write_concern['w'] = self.w
if self.ttl_seconds > 0:
kwargs = {'expireAfterSeconds': self.ttl_seconds}
coll.ensure_index('doc_date', cache_for=5, **kwargs)
else:
self._validate_ttl_index(coll, self.cache_collection,
self.ttl_seconds)
self._MONGO_COLLS[self.cache_collection] = coll
return self._MONGO_COLLS[self.cache_collection]
def _get_cache_entry(self, key, value, meta, doc_date):
"""MongoDB cache data representation.
Storing cache key as ``_id`` field as MongoDB by default creates
unique index on this field. So no need to create separate field and
index for storing cache key. Cache data has additional ``doc_date``
field for MongoDB TTL collection support.
"""
return dict(_id=key, value=value, meta=meta, doc_date=doc_date)
def _validate_ttl_index(self, collection, coll_name, ttl_seconds):
"""Checks if existing TTL index is removed on a collection.
This logs warning when existing collection has TTL index defined and
new cache configuration tries to disable index with
``mongo_ttl_seconds < 0``. In that case, existing index needs
to be addressed first to make new configuration effective.
Refer to MongoDB documentation around TTL index for further details.
"""
indexes = collection.index_information()
for indx_name, index_data in six.iteritems(indexes):
if all(k in index_data for k in ('key', 'expireAfterSeconds')):
existing_value = index_data['expireAfterSeconds']
fld_present = 'doc_date' in index_data['key'][0]
if fld_present and existing_value > -1 and ttl_seconds < 1:
msg = _LW('TTL index already exists on db collection '
'<%(c_name)s>, remove index <%(indx_name)s> '
'first to make updated mongo_ttl_seconds value '
'to be effective')
LOG.warn(msg, {'c_name': coll_name,
'indx_name': indx_name})
def get(self, key):
critieria = {'_id': key}
result = self.get_cache_collection().find_one(spec_or_id=critieria,
**self.meth_kwargs)
if result:
return result['value']
else:
return None
def get_multi(self, keys):
db_results = self._get_results_as_dict(keys)
return dict((doc['_id'], doc['value']) for doc in
six.itervalues(db_results))
def _get_results_as_dict(self, keys):
critieria = {'_id': {'$in': keys}}
db_results = self.get_cache_collection().find(spec=critieria,
**self.meth_kwargs)
return dict((doc['_id'], doc) for doc in db_results)
def set(self, key, value):
doc_date = self._get_doc_date()
ref = self._get_cache_entry(key, value.payload, value.metadata,
doc_date)
spec = {'_id': key}
# find and modify does not have manipulator support
# so need to do conversion as part of input document
ref = self._data_manipulator.transform_incoming(ref, self)
self.get_cache_collection().find_and_modify(spec, ref, upsert=True,
**self.meth_kwargs)
def set_multi(self, mapping):
"""Insert multiple documents specified as key, value pairs.
In this case, multiple documents can be added via insert provided they
do not exist.
Update of multiple existing documents is done one by one
"""
doc_date = self._get_doc_date()
insert_refs = []
update_refs = []
existing_docs = self._get_results_as_dict(mapping.keys())
for key, value in mapping.items():
ref = self._get_cache_entry(key, value.payload, value.metadata,
doc_date)
if key in existing_docs:
ref['_id'] = existing_docs[key]['_id']
update_refs.append(ref)
else:
insert_refs.append(ref)
if insert_refs:
self.get_cache_collection().insert(insert_refs, manipulate=True,
**self.meth_kwargs)
for upd_doc in update_refs:
self.get_cache_collection().save(upd_doc, manipulate=True,
**self.meth_kwargs)
def delete(self, key):
critieria = {'_id': key}
self.get_cache_collection().remove(spec_or_id=critieria,
**self.meth_kwargs)
def delete_multi(self, keys):
critieria = {'_id': {'$in': keys}}
self.get_cache_collection().remove(spec_or_id=critieria,
**self.meth_kwargs)
@six.add_metaclass(abc.ABCMeta)
class AbstractManipulator(object):
"""Abstract class with methods which need to be implemented for custom
manipulation.
Adding this as a base class for :class:`.BaseTransform` instead of adding
import dependency of pymongo specific class i.e.
`pymongo.son_manipulator.SONManipulator` and using that as base class.
This is done to avoid pymongo dependency if MongoDB backend is not used.
"""
@abc.abstractmethod
def transform_incoming(self, son, collection):
"""Used while saving data to MongoDB.
:param son: the SON object to be inserted into the database
:param collection: the collection the object is being inserted into
:returns: transformed SON object
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def transform_outgoing(self, son, collection):
"""Used while reading data from MongoDB.
:param son: the SON object being retrieved from the database
:param collection: the collection this object was stored in
:returns: transformed SON object
"""
raise exception.NotImplemented() # pragma: no cover
def will_copy(self):
"""Will this SON manipulator make a copy of the incoming document?
Derived classes that do need to make a copy should override this
method, returning `True` instead of `False`.
:returns: boolean
"""
return False
class BaseTransform(AbstractManipulator):
"""Base transformation class to store and read dogpile cached data
from MongoDB.
This is needed as dogpile internally stores data as a custom class
i.e. dogpile.cache.api.CachedValue
Note: Custom manipulator needs to always override ``transform_incoming``
and ``transform_outgoing`` methods. MongoDB manipulator logic specifically
checks that overridden method in instance and its super are different.
"""
def transform_incoming(self, son, collection):
"""Used while saving data to MongoDB."""
for (key, value) in son.items():
if isinstance(value, api.CachedValue):
son[key] = value.payload # key is 'value' field here
son['meta'] = value.metadata
elif isinstance(value, dict): # Make sure we recurse into sub-docs
son[key] = self.transform_incoming(value, collection)
return son
def transform_outgoing(self, son, collection):
"""Used while reading data from MongoDB."""
metadata = None
# make sure its top level dictionary with all expected fields names
# present
if isinstance(son, dict) and all(k in son for k in
('_id', 'value', 'meta', 'doc_date')):
payload = son.pop('value', None)
metadata = son.pop('meta', None)
for (key, value) in son.items():
if isinstance(value, dict):
son[key] = self.transform_outgoing(value, collection)
if metadata is not None:
son['value'] = api.CachedValue(payload, metadata)
return son
|
shirou/ansible
|
refs/heads/devel
|
lib/ansible/errors.py
|
46
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class AnsibleError(Exception):
''' The base Ansible exception from which all others should subclass '''
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class AnsibleFileNotFound(AnsibleError):
pass
class AnsibleConnectionFailed(AnsibleError):
pass
class AnsibleYAMLValidationFailed(AnsibleError):
pass
class AnsibleUndefinedVariable(AnsibleError):
pass
class AnsibleFilterError(AnsibleError):
pass
|
pk-sam/crosswalk-test-suite
|
refs/heads/master
|
webapi/tct-fonts-css3-tests/css3-fonts-app/testscripts/steps/steps.py
|
226
|
from atip.common.steps import *
from atip.web.steps import *
|
woylaski/notebook
|
refs/heads/master
|
graphic/kivy-master/kivy/tests/test_uix_boxlayout.py
|
78
|
'''
Box layout unit test
====================
Order matter.
On the screen, most of example must have the red->blue->green order.
'''
from kivy.tests.common import GraphicUnitTest
class UIXBoxLayoutTestcase(GraphicUnitTest):
def box(self, r, g, b):
from kivy.uix.widget import Widget
from kivy.graphics import Color, Rectangle
wid = Widget()
with wid.canvas:
Color(r, g, b)
r = Rectangle(pos=wid.pos, size=wid.size)
def linksp(instance, *largs):
r.pos = instance.pos
r.size = instance.size
wid.bind(pos=linksp, size=linksp)
return wid
def test_boxlayout_orientation(self):
from kivy.uix.boxlayout import BoxLayout
r = self.render
b = self.box
layout = BoxLayout()
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
layout = BoxLayout(orientation='vertical')
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
def test_boxlayout_spacing(self):
from kivy.uix.boxlayout import BoxLayout
r = self.render
b = self.box
layout = BoxLayout(spacing=20)
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
layout = BoxLayout(spacing=20, orientation='vertical')
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
def test_boxlayout_padding(self):
from kivy.uix.boxlayout import BoxLayout
r = self.render
b = self.box
layout = BoxLayout(padding=20)
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
layout = BoxLayout(padding=20, orientation='vertical')
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
def test_boxlayout_padding_spacing(self):
from kivy.uix.boxlayout import BoxLayout
r = self.render
b = self.box
layout = BoxLayout(spacing=20, padding=20)
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
layout = BoxLayout(spacing=20, padding=20, orientation='vertical')
layout.add_widget(b(1, 0, 0))
layout.add_widget(b(0, 1, 0))
layout.add_widget(b(0, 0, 1))
r(layout)
|
j-marjanovic/myhdl
|
refs/heads/master
|
myhdl/_misc.py
|
2
|
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" MyHDL miscellaneous public objects.
This module provides the following public myhdl objects:
instances -- function that returns instances in a generator function
downrange -- function that returns a downward range
"""
from __future__ import absolute_import
import sys
import inspect
from myhdl._Cosimulation import Cosimulation
from myhdl._instance import _Instantiator
def _isGenSeq(obj):
if isinstance(obj, (Cosimulation, _Instantiator)):
return True
if not isinstance(obj, (list, tuple, set)):
return False
## if not obj:
## return False
for e in obj:
if not _isGenSeq(e):
return False
return True
def instances():
f = inspect.currentframe()
d = inspect.getouterframes(f)[1][0].f_locals
l = []
for v in d.values():
if _isGenSeq(v):
l.append(v)
return l
def downrange(start, stop=0, step=1):
""" Return a downward range. """
return range(start-1, stop-1, -step)
|
tastynoodle/django
|
refs/heads/master
|
django/contrib/gis/management/__init__.py
|
12133432
| |
campbe13/openhatch
|
refs/heads/master
|
vendor/packages/Django/django/contrib/gis/db/backends/spatialite/__init__.py
|
12133432
| |
korealerts1/sentry
|
refs/heads/master
|
src/sentry/plugins/base/__init__.py
|
32
|
"""
sentry.plugins.base
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from sentry.plugins.base.manager import PluginManager
from sentry.plugins.base.notifier import * # NOQA
from sentry.plugins.base.response import * # NOQA
from sentry.plugins.base.structs import * # NOQA
from sentry.plugins.base.v1 import * # NOQA
from sentry.plugins.base.v2 import * # NOQA
plugins = PluginManager()
register = plugins.register
unregister = plugins.unregister
|
rhuss/bazel
|
refs/heads/master
|
third_party/py/gflags/tests/gflags_helpxml_test.py
|
139
|
#!/usr/bin/env python
# Copyright (c) 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the XML-format help generated by the gflags.py module."""
__author__ = 'salcianu@google.com (Alex Salcianu)'
import string
import StringIO
import sys
import xml.dom.minidom
import xml.sax.saxutils
import gflags_googletest as googletest
import gflags
from flags_modules_for_testing import module_bar
class _MakeXMLSafeTest(googletest.TestCase):
def _Check(self, s, expected_output):
self.assertEqual(gflags._MakeXMLSafe(s), expected_output)
def testMakeXMLSafe(self):
self._Check('plain text', 'plain text')
self._Check('(x < y) && (a >= b)',
'(x < y) && (a >= b)')
# Some characters with ASCII code < 32 are illegal in XML 1.0 and
# are removed by us. However, '\n', '\t', and '\r' are legal.
self._Check('\x09\x0btext \x02 with\x0dsome \x08 good & bad chars',
'\ttext with\rsome good & bad chars')
def _ListSeparatorsInXMLFormat(separators, indent=''):
"""Generates XML encoding of a list of list separators.
Args:
separators: A list of list separators. Usually, this should be a
string whose characters are the valid list separators, e.g., ','
means that both comma (',') and space (' ') are valid list
separators.
indent: A string that is added at the beginning of each generated
XML element.
Returns:
A string.
"""
result = ''
separators = list(separators)
separators.sort()
for sep_char in separators:
result += ('%s<list_separator>%s</list_separator>\n' %
(indent, repr(sep_char)))
return result
class WriteFlagHelpInXMLFormatTest(googletest.TestCase):
"""Test the XML-format help for a single flag at a time.
There is one test* method for each kind of DEFINE_* declaration.
"""
def setUp(self):
# self.fv is a FlagValues object, just like gflags.FLAGS. Each
# test registers one flag with this FlagValues.
self.fv = gflags.FlagValues()
def _CheckFlagHelpInXML(self, flag_name, module_name,
expected_output, is_key=False):
# StringIO.StringIO is a file object that writes into a memory string.
sio = StringIO.StringIO()
flag_obj = self.fv[flag_name]
flag_obj.WriteInfoInXMLFormat(sio, module_name, is_key=is_key, indent=' ')
self.assertMultiLineEqual(sio.getvalue(), expected_output)
sio.close()
def testFlagHelpInXML_Int(self):
gflags.DEFINE_integer('index', 17, 'An integer flag', flag_values=self.fv)
expected_output_pattern = (
' <flag>\n'
' <file>module.name</file>\n'
' <name>index</name>\n'
' <meaning>An integer flag</meaning>\n'
' <default>17</default>\n'
' <current>%d</current>\n'
' <type>int</type>\n'
' </flag>\n')
self._CheckFlagHelpInXML('index', 'module.name',
expected_output_pattern % 17)
# Check that the output is correct even when the current value of
# a flag is different from the default one.
self.fv['index'].value = 20
self._CheckFlagHelpInXML('index', 'module.name',
expected_output_pattern % 20)
def testFlagHelpInXML_IntWithBounds(self):
gflags.DEFINE_integer('nb_iters', 17, 'An integer flag',
lower_bound=5, upper_bound=27,
flag_values=self.fv)
expected_output = (
' <flag>\n'
' <key>yes</key>\n'
' <file>module.name</file>\n'
' <name>nb_iters</name>\n'
' <meaning>An integer flag</meaning>\n'
' <default>17</default>\n'
' <current>17</current>\n'
' <type>int</type>\n'
' <lower_bound>5</lower_bound>\n'
' <upper_bound>27</upper_bound>\n'
' </flag>\n')
self._CheckFlagHelpInXML('nb_iters', 'module.name',
expected_output, is_key=True)
def testFlagHelpInXML_String(self):
gflags.DEFINE_string('file_path', '/path/to/my/dir', 'A test string flag.',
flag_values=self.fv)
expected_output = (
' <flag>\n'
' <file>simple_module</file>\n'
' <name>file_path</name>\n'
' <meaning>A test string flag.</meaning>\n'
' <default>/path/to/my/dir</default>\n'
' <current>/path/to/my/dir</current>\n'
' <type>string</type>\n'
' </flag>\n')
self._CheckFlagHelpInXML('file_path', 'simple_module',
expected_output)
def testFlagHelpInXML_StringWithXMLIllegalChars(self):
gflags.DEFINE_string('file_path', '/path/to/\x08my/dir',
'A test string flag.', flag_values=self.fv)
# '\x08' is not a legal character in XML 1.0 documents. Our
# current code purges such characters from the generated XML.
expected_output = (
' <flag>\n'
' <file>simple_module</file>\n'
' <name>file_path</name>\n'
' <meaning>A test string flag.</meaning>\n'
' <default>/path/to/my/dir</default>\n'
' <current>/path/to/my/dir</current>\n'
' <type>string</type>\n'
' </flag>\n')
self._CheckFlagHelpInXML('file_path', 'simple_module',
expected_output)
def testFlagHelpInXML_Boolean(self):
gflags.DEFINE_boolean('use_hack', False, 'Use performance hack',
flag_values=self.fv)
expected_output = (
' <flag>\n'
' <key>yes</key>\n'
' <file>a_module</file>\n'
' <name>use_hack</name>\n'
' <meaning>Use performance hack</meaning>\n'
' <default>false</default>\n'
' <current>false</current>\n'
' <type>bool</type>\n'
' </flag>\n')
self._CheckFlagHelpInXML('use_hack', 'a_module',
expected_output, is_key=True)
def testFlagHelpInXML_Enum(self):
gflags.DEFINE_enum('cc_version', 'stable', ['stable', 'experimental'],
'Compiler version to use.', flag_values=self.fv)
expected_output = (
' <flag>\n'
' <file>tool</file>\n'
' <name>cc_version</name>\n'
' <meaning><stable|experimental>: '
'Compiler version to use.</meaning>\n'
' <default>stable</default>\n'
' <current>stable</current>\n'
' <type>string enum</type>\n'
' <enum_value>stable</enum_value>\n'
' <enum_value>experimental</enum_value>\n'
' </flag>\n')
self._CheckFlagHelpInXML('cc_version', 'tool', expected_output)
def testFlagHelpInXML_CommaSeparatedList(self):
gflags.DEFINE_list('files', 'a.cc,a.h,archive/old.zip',
'Files to process.', flag_values=self.fv)
expected_output = (
' <flag>\n'
' <file>tool</file>\n'
' <name>files</name>\n'
' <meaning>Files to process.</meaning>\n'
' <default>a.cc,a.h,archive/old.zip</default>\n'
' <current>[\'a.cc\', \'a.h\', \'archive/old.zip\']</current>\n'
' <type>comma separated list of strings</type>\n'
' <list_separator>\',\'</list_separator>\n'
' </flag>\n')
self._CheckFlagHelpInXML('files', 'tool', expected_output)
def testListAsDefaultArgument_CommaSeparatedList(self):
gflags.DEFINE_list('allow_users', ['alice', 'bob'],
'Users with access.', flag_values=self.fv)
expected_output = (
' <flag>\n'
' <file>tool</file>\n'
' <name>allow_users</name>\n'
' <meaning>Users with access.</meaning>\n'
' <default>alice,bob</default>\n'
' <current>[\'alice\', \'bob\']</current>\n'
' <type>comma separated list of strings</type>\n'
' <list_separator>\',\'</list_separator>\n'
' </flag>\n')
self._CheckFlagHelpInXML('allow_users', 'tool', expected_output)
def testFlagHelpInXML_SpaceSeparatedList(self):
gflags.DEFINE_spaceseplist('dirs', 'src libs bin',
'Directories to search.', flag_values=self.fv)
expected_output = (
' <flag>\n'
' <file>tool</file>\n'
' <name>dirs</name>\n'
' <meaning>Directories to search.</meaning>\n'
' <default>src libs bin</default>\n'
' <current>[\'src\', \'libs\', \'bin\']</current>\n'
' <type>whitespace separated list of strings</type>\n'
'LIST_SEPARATORS'
' </flag>\n').replace('LIST_SEPARATORS',
_ListSeparatorsInXMLFormat(string.whitespace,
indent=' '))
self._CheckFlagHelpInXML('dirs', 'tool', expected_output)
def testFlagHelpInXML_MultiString(self):
gflags.DEFINE_multistring('to_delete', ['a.cc', 'b.h'],
'Files to delete', flag_values=self.fv)
expected_output = (
' <flag>\n'
' <file>tool</file>\n'
' <name>to_delete</name>\n'
' <meaning>Files to delete;\n '
'repeat this option to specify a list of values</meaning>\n'
' <default>[\'a.cc\', \'b.h\']</default>\n'
' <current>[\'a.cc\', \'b.h\']</current>\n'
' <type>multi string</type>\n'
' </flag>\n')
self._CheckFlagHelpInXML('to_delete', 'tool', expected_output)
def testFlagHelpInXML_MultiInt(self):
gflags.DEFINE_multi_int('cols', [5, 7, 23],
'Columns to select', flag_values=self.fv)
expected_output = (
' <flag>\n'
' <file>tool</file>\n'
' <name>cols</name>\n'
' <meaning>Columns to select;\n '
'repeat this option to specify a list of values</meaning>\n'
' <default>[5, 7, 23]</default>\n'
' <current>[5, 7, 23]</current>\n'
' <type>multi int</type>\n'
' </flag>\n')
self._CheckFlagHelpInXML('cols', 'tool', expected_output)
# The next EXPECTED_HELP_XML_* constants are parts of a template for
# the expected XML output from WriteHelpInXMLFormatTest below. When
# we assemble these parts into a single big string, we'll take into
# account the ordering between the name of the main module and the
# name of module_bar. Next, we'll fill in the docstring for this
# module (%(usage_doc)s), the name of the main module
# (%(main_module_name)s) and the name of the module module_bar
# (%(module_bar_name)s). See WriteHelpInXMLFormatTest below.
#
# NOTE: given the current implementation of _GetMainModule(), we
# already know the ordering between the main module and module_bar.
# However, there is no guarantee that _GetMainModule will never be
# changed in the future (especially since it's far from perfect).
EXPECTED_HELP_XML_START = """\
<?xml version="1.0"?>
<AllFlags>
<program>gflags_helpxml_test.py</program>
<usage>%(usage_doc)s</usage>
"""
EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE = """\
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>allow_users</name>
<meaning>Users with access.</meaning>
<default>alice,bob</default>
<current>['alice', 'bob']</current>
<type>comma separated list of strings</type>
<list_separator>','</list_separator>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>cc_version</name>
<meaning><stable|experimental>: Compiler version to use.</meaning>
<default>stable</default>
<current>stable</current>
<type>string enum</type>
<enum_value>stable</enum_value>
<enum_value>experimental</enum_value>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>cols</name>
<meaning>Columns to select;
repeat this option to specify a list of values</meaning>
<default>[5, 7, 23]</default>
<current>[5, 7, 23]</current>
<type>multi int</type>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>dirs</name>
<meaning>Directories to create.</meaning>
<default>src libs bins</default>
<current>['src', 'libs', 'bins']</current>
<type>whitespace separated list of strings</type>
%(whitespace_separators)s </flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>file_path</name>
<meaning>A test string flag.</meaning>
<default>/path/to/my/dir</default>
<current>/path/to/my/dir</current>
<type>string</type>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>files</name>
<meaning>Files to process.</meaning>
<default>a.cc,a.h,archive/old.zip</default>
<current>['a.cc', 'a.h', 'archive/old.zip']</current>
<type>comma separated list of strings</type>
<list_separator>\',\'</list_separator>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>index</name>
<meaning>An integer flag</meaning>
<default>17</default>
<current>17</current>
<type>int</type>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>nb_iters</name>
<meaning>An integer flag</meaning>
<default>17</default>
<current>17</current>
<type>int</type>
<lower_bound>5</lower_bound>
<upper_bound>27</upper_bound>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>to_delete</name>
<meaning>Files to delete;
repeat this option to specify a list of values</meaning>
<default>['a.cc', 'b.h']</default>
<current>['a.cc', 'b.h']</current>
<type>multi string</type>
</flag>
<flag>
<key>yes</key>
<file>%(main_module_name)s</file>
<name>use_hack</name>
<meaning>Use performance hack</meaning>
<default>false</default>
<current>false</current>
<type>bool</type>
</flag>
"""
EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR = """\
<flag>
<file>%(module_bar_name)s</file>
<name>tmod_bar_t</name>
<meaning>Sample int flag.</meaning>
<default>4</default>
<current>4</current>
<type>int</type>
</flag>
<flag>
<key>yes</key>
<file>%(module_bar_name)s</file>
<name>tmod_bar_u</name>
<meaning>Sample int flag.</meaning>
<default>5</default>
<current>5</current>
<type>int</type>
</flag>
<flag>
<file>%(module_bar_name)s</file>
<name>tmod_bar_v</name>
<meaning>Sample int flag.</meaning>
<default>6</default>
<current>6</current>
<type>int</type>
</flag>
<flag>
<file>%(module_bar_name)s</file>
<name>tmod_bar_x</name>
<meaning>Boolean flag.</meaning>
<default>true</default>
<current>true</current>
<type>bool</type>
</flag>
<flag>
<file>%(module_bar_name)s</file>
<name>tmod_bar_y</name>
<meaning>String flag.</meaning>
<default>default</default>
<current>default</current>
<type>string</type>
</flag>
<flag>
<key>yes</key>
<file>%(module_bar_name)s</file>
<name>tmod_bar_z</name>
<meaning>Another boolean flag from module bar.</meaning>
<default>false</default>
<current>false</current>
<type>bool</type>
</flag>
"""
EXPECTED_HELP_XML_END = """\
</AllFlags>
"""
class WriteHelpInXMLFormatTest(googletest.TestCase):
"""Big test of FlagValues.WriteHelpInXMLFormat, with several flags."""
def testWriteHelpInXMLFormat(self):
fv = gflags.FlagValues()
# Since these flags are defined by the top module, they are all key.
gflags.DEFINE_integer('index', 17, 'An integer flag', flag_values=fv)
gflags.DEFINE_integer('nb_iters', 17, 'An integer flag',
lower_bound=5, upper_bound=27, flag_values=fv)
gflags.DEFINE_string('file_path', '/path/to/my/dir', 'A test string flag.',
flag_values=fv)
gflags.DEFINE_boolean('use_hack', False, 'Use performance hack',
flag_values=fv)
gflags.DEFINE_enum('cc_version', 'stable', ['stable', 'experimental'],
'Compiler version to use.', flag_values=fv)
gflags.DEFINE_list('files', 'a.cc,a.h,archive/old.zip',
'Files to process.', flag_values=fv)
gflags.DEFINE_list('allow_users', ['alice', 'bob'],
'Users with access.', flag_values=fv)
gflags.DEFINE_spaceseplist('dirs', 'src libs bins',
'Directories to create.', flag_values=fv)
gflags.DEFINE_multistring('to_delete', ['a.cc', 'b.h'],
'Files to delete', flag_values=fv)
gflags.DEFINE_multi_int('cols', [5, 7, 23],
'Columns to select', flag_values=fv)
# Define a few flags in a different module.
module_bar.DefineFlags(flag_values=fv)
# And declare only a few of them to be key. This way, we have
# different kinds of flags, defined in different modules, and not
# all of them are key flags.
gflags.DECLARE_key_flag('tmod_bar_z', flag_values=fv)
gflags.DECLARE_key_flag('tmod_bar_u', flag_values=fv)
# Generate flag help in XML format in the StringIO sio.
sio = StringIO.StringIO()
fv.WriteHelpInXMLFormat(sio)
# Check that we got the expected result.
expected_output_template = EXPECTED_HELP_XML_START
main_module_name = gflags._GetMainModule()
module_bar_name = module_bar.__name__
if main_module_name < module_bar_name:
expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE
expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR
else:
expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR
expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE
expected_output_template += EXPECTED_HELP_XML_END
# XML representation of the whitespace list separators.
whitespace_separators = _ListSeparatorsInXMLFormat(string.whitespace,
indent=' ')
expected_output = (
expected_output_template %
{'usage_doc': sys.modules['__main__'].__doc__,
'main_module_name': main_module_name,
'module_bar_name': module_bar_name,
'whitespace_separators': whitespace_separators})
actual_output = sio.getvalue()
self.assertMultiLineEqual(actual_output, expected_output)
# Also check that our result is valid XML. minidom.parseString
# throws an xml.parsers.expat.ExpatError in case of an error.
xml.dom.minidom.parseString(actual_output)
if __name__ == '__main__':
googletest.main()
|
philippe89/compassion-modules
|
refs/heads/master
|
logging_compassion/wizards/__init__.py
|
2
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Maxime Beck <mbcompte@gmail.com>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from . import subscribe_logs
|
oe-alliance/oe-alliance-enigma2
|
refs/heads/master
|
lib/python/Components/Sources/OnlineUpdate.py
|
66
|
from Source import Source
from Components.Element import cached
from Components.OnlineUpdateCheck import versioncheck
from enigma import eTimer
class OnlineUpdateStableCheck(Source):
def __init__(self):
Source.__init__(self)
self.check_timer = eTimer()
self.check_timer.callback.append(self.poll)
self.check_timer.start(60000)
@cached
def getBoolean(self):
return versioncheck.getStableUpdateAvailable()
boolean = property(getBoolean)
def poll(self):
self.changed((self.CHANGED_POLL,))
def doSuspend(self, suspended):
if suspended:
self.check_timer.stop()
else:
self.check_timer.start(3600000)
self.poll()
def destroy(self):
self.check_timer.callback.remove(self.poll)
Source.destroy(self)
class OnlineUpdateUnstableCheck(Source):
def __init__(self):
Source.__init__(self)
self.check_timer = eTimer()
self.check_timer.callback.append(self.poll)
self.check_timer.start(60000)
@cached
def getBoolean(self):
return versioncheck.getUnstableUpdateAvailable()
boolean = property(getBoolean)
def poll(self):
self.changed((self.CHANGED_POLL,))
def doSuspend(self, suspended):
if suspended:
self.check_timer.stop()
else:
self.check_timer.start(3600000)
self.poll()
def destroy(self):
self.check_timer.callback.remove(self.poll)
Source.destroy(self)
|
theflofly/tensorflow
|
refs/heads/master
|
tensorflow/python/keras/layers/convolutional_recurrent_test.py
|
13
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolutional recurrent layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class ConvLSTMTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
data_format=['channels_first', 'channels_last'],
return_sequences=[True, False]))
def test_conv_lstm(self, data_format, return_sequences):
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, sequence_len,
input_channel,
input_num_row, input_num_col)
else:
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
# test for return state:
x = keras.Input(batch_shape=inputs.shape)
kwargs = {'data_format': data_format,
'return_sequences': return_sequences,
'return_state': True,
'stateful': True,
'filters': filters,
'kernel_size': (num_row, num_col),
'padding': 'valid'}
layer = keras.layers.ConvLSTM2D(**kwargs)
layer.build(inputs.shape)
outputs = layer(x)
_, states = outputs[0], outputs[1:]
self.assertEqual(len(states), 2)
model = keras.models.Model(x, states[0])
state = model.predict(inputs)
self.assertAllClose(
keras.backend.eval(layer.states[0]), state, atol=1e-4)
# test for output shape:
testing_utils.layer_test(
keras.layers.ConvLSTM2D,
kwargs={'data_format': data_format,
'return_sequences': return_sequences,
'filters': filters,
'kernel_size': (num_row, num_col),
'padding': 'valid'},
input_shape=inputs.shape)
def test_conv_lstm_statefulness(self):
# Tests for statefulness
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
with self.cached_session():
model = keras.models.Sequential()
kwargs = {'data_format': 'channels_last',
'return_sequences': False,
'filters': filters,
'kernel_size': (num_row, num_col),
'stateful': True,
'batch_input_shape': inputs.shape,
'padding': 'same'}
layer = keras.layers.ConvLSTM2D(**kwargs)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones_like(inputs))
# train once so that the states change
model.train_on_batch(np.ones_like(inputs),
np.random.random(out1.shape))
out2 = model.predict(np.ones_like(inputs))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones_like(inputs))
self.assertNotEqual(out3.max(), out2.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones_like(inputs))
self.assertAllClose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones_like(inputs))
self.assertNotEqual(out4.max(), out5.max())
def test_conv_lstm_regularizers(self):
# check regularizers
num_row = 3
num_col = 3
filters = 2
num_samples = 1
input_channel = 2
input_num_row = 5
input_num_col = 5
sequence_len = 2
inputs = np.random.rand(num_samples, sequence_len,
input_num_row, input_num_col,
input_channel)
with self.cached_session():
kwargs = {'data_format': 'channels_last',
'return_sequences': False,
'kernel_size': (num_row, num_col),
'stateful': True,
'filters': filters,
'batch_input_shape': inputs.shape,
'kernel_regularizer': keras.regularizers.L1L2(l1=0.01),
'recurrent_regularizer': keras.regularizers.L1L2(l1=0.01),
'activity_regularizer': 'l2',
'bias_regularizer': 'l2',
'kernel_constraint': 'max_norm',
'recurrent_constraint': 'max_norm',
'bias_constraint': 'max_norm',
'padding': 'same'}
layer = keras.layers.ConvLSTM2D(**kwargs)
layer.build(inputs.shape)
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones(inputs.shape)))
self.assertEqual(len(layer.losses), 4)
def test_conv_lstm_dropout(self):
# check dropout
with self.cached_session():
testing_utils.layer_test(
keras.layers.ConvLSTM2D,
kwargs={'data_format': 'channels_last',
'return_sequences': False,
'filters': 2,
'kernel_size': (3, 3),
'padding': 'same',
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(1, 2, 5, 5, 2))
def test_conv_lstm_cloning(self):
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.ConvLSTM2D(5, 3, input_shape=(None, 5, 5, 3)))
test_inputs = np.random.random((2, 4, 5, 5, 3))
reference_outputs = model.predict(test_inputs)
weights = model.get_weights()
# Use a new graph to clone the model
with self.cached_session():
clone = keras.models.clone_model(model)
clone.set_weights(weights)
outputs = clone.predict(test_inputs)
self.assertAllClose(reference_outputs, outputs, atol=1e-5)
if __name__ == '__main__':
test.main()
|
DANCEcollaborative/forum-xblock
|
refs/heads/master
|
XBlock Integration Files/xdjangobb/xblock/lib/python2.7/site-packages/django/conf/locale/fy_NL/formats.py
|
1293
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
# DATE_FORMAT =
# TIME_FORMAT =
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
# SHORT_DATE_FORMAT =
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
Gabotero/GNURadioNext
|
refs/heads/master
|
gr-utils/python/modtool/gr-newmod/python/__init__.py
|
29
|
#
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio HOWTO module. Place your Python package
description here (python/__init__.py).
'''
# ----------------------------------------------------------------
# Temporary workaround for ticket:181 (swig+python problem)
import sys
_RTLD_GLOBAL = 0
try:
from dl import RTLD_GLOBAL as _RTLD_GLOBAL
except ImportError:
try:
from DLFCN import RTLD_GLOBAL as _RTLD_GLOBAL
except ImportError:
pass
if _RTLD_GLOBAL != 0:
_dlopenflags = sys.getdlopenflags()
sys.setdlopenflags(_dlopenflags|_RTLD_GLOBAL)
# ----------------------------------------------------------------
# import swig generated symbols into the howto namespace
from howto_swig import *
# import any pure python here
#
# ----------------------------------------------------------------
# Tail of workaround
if _RTLD_GLOBAL != 0:
sys.setdlopenflags(_dlopenflags) # Restore original flags
# ----------------------------------------------------------------
|
hgl888/web-testing-service
|
refs/heads/master
|
wts/tests/csp/csp_media-src_asterisk_audio_allowed_int.py
|
25
|
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
response.headers.set(
"Content-Security-Policy",
"media-src *; script-src 'self' 'unsafe-inline'")
response.headers.set(
"X-Content-Security-Policy",
"media-src *; script-src 'self' 'unsafe-inline'")
response.headers.set(
"X-WebKit-CSP",
"media-src *; script-src 'self' 'unsafe-inline'")
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Zhang, Zhiqiang <zhiqiang.zhang@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_media-src_asterisk_audio_allowed_int</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#media-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="media-src *; script-src 'self' 'unsafe-inline'"/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
</head>
<body>
<div id="log"></div>
<audio id="m"></audio>
<script>
var t = async_test(document.title);
var m = document.getElementById("m");
m.src = "support/khronos/red-green.theora.ogv";
window.setTimeout(function() {
t.step(function() {
assert_false(m.currentSrc == "",
"audio.currentSrc should not be empty after setting src attribute");
});
t.done();
}, 0);
</script>
</body>
</html> """
|
abhiatgithub/shogun-toolbox
|
refs/heads/master
|
examples/undocumented/python_modular/modelselection_grid_search_libsvr_modular.py
|
17
|
#!/usr/bin/env python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Written (W) 2012 Heiko Strathmann
# Copyright (C) 2012 Berlin Institute of Technology and Max-Planck-Society
#
from numpy import array
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat,2.1,1,1e-5,1e-2], \
[traindat,testdat,label_traindat,2.1,1,1e-5,1e-2]]
def modelselection_grid_search_libsvr_modular (fm_train=traindat,fm_test=testdat,label_train=label_traindat,\
width=2.1,C=1,epsilon=1e-5,tube_epsilon=1e-2):
from modshogun import CrossValidation, CrossValidationResult
from modshogun import MeanSquaredError
from modshogun import CrossValidationSplitting
from modshogun import RegressionLabels
from modshogun import RealFeatures
from modshogun import GaussianKernel
from modshogun import LibSVR
from modshogun import GridSearchModelSelection
from modshogun import ModelSelectionParameters, R_EXP
from modshogun import ParameterCombination
# training data
features_train=RealFeatures(traindat)
labels=RegressionLabels(label_traindat)
# kernel
kernel=GaussianKernel(features_train, features_train, width)
# print all parameter available for modelselection
# Dont worry if yours is not included but, write to the mailing list
#kernel.print_modsel_params()
labels=RegressionLabels(label_train)
# predictor
predictor=LibSVR(C, tube_epsilon, kernel, labels)
predictor.set_epsilon(epsilon)
# splitting strategy for 5 fold cross-validation (for classification its better
# to use "StratifiedCrossValidation", but the standard
# "StratifiedCrossValidationSplitting" is also available
splitting_strategy=CrossValidationSplitting(labels, 5)
# evaluation method
evaluation_criterium=MeanSquaredError()
# cross-validation instance
cross_validation=CrossValidation(predictor, features_train, labels,
splitting_strategy, evaluation_criterium)
# (optional) repeat x-val (set larger to get better estimates, at least two
# for confidence intervals)
cross_validation.set_num_runs(2)
# (optional) request 95% confidence intervals for results (not actually
# needed for this toy example)
cross_validation.set_conf_int_alpha(0.05)
# print all parameter available for modelselection
# Dont worry if yours is not included but, write to the mailing list
#predictor.print_modsel_params()
# build parameter tree to select C1 and C2
param_tree_root=ModelSelectionParameters()
c1=ModelSelectionParameters("C1");
param_tree_root.append_child(c1)
c1.build_values(-1.0, 0.0, R_EXP);
c2=ModelSelectionParameters("C2");
param_tree_root.append_child(c2);
c2.build_values(-1.0, 0.0, R_EXP);
# model selection instance
model_selection=GridSearchModelSelection(cross_validation, param_tree_root)
# perform model selection with selected methods
#print "performing model selection of"
#print "parameter tree"
#param_tree_root.print_tree()
#print "starting model selection"
# print the current parameter combination, if no parameter nothing is printed
print_state=False
# lock data before since model selection will not change the kernel matrix
# (use with care) This avoids that the kernel matrix is recomputed in every
# iteration of the model search
predictor.data_lock(labels, features_train)
best_parameters=model_selection.select_model(print_state)
# print best parameters
#print "best parameters:"
#best_parameters.print_tree()
# apply them and print result
best_parameters.apply_to_machine(predictor)
result=cross_validation.evaluate()
#print "mean:", result.mean
#if result.has_conf_int:
# print "[", result.conf_int_low, ",", result.conf_int_up, "] with alpha=", result.conf_int_alpha
if __name__=='__main__':
print('ModelselectionGridSearchLibSVR')
modelselection_grid_search_libsvr_modular(*parameter_list[0])
|
zbqf109/goodo
|
refs/heads/master
|
openerp/addons/payment_ogone/models/ogone.py
|
2
|
# -*- coding: utf-'8' "-*-"
import datetime
from hashlib import sha1
import logging
from lxml import etree, objectify
from openerp.tools.translate import _
from pprint import pformat
import time
from urllib import urlencode
import urllib2
import urlparse
from openerp import SUPERUSER_ID
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_ogone.controllers.main import OgoneController
from openerp.addons.payment_ogone.data import ogone
from openerp.osv import osv, fields
from openerp.tools import float_round, DEFAULT_SERVER_DATE_FORMAT
from openerp.tools.float_utils import float_compare, float_repr
from openerp.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
class PaymentAcquirerOgone(osv.Model):
_inherit = 'payment.acquirer'
def _get_ogone_urls(self, cr, uid, environment, context=None):
""" Ogone URLS:
- standard order: POST address for form-based
@TDETODO: complete me
"""
return {
'ogone_standard_order_url': 'https://secure.ogone.com/ncol/%s/orderstandard_utf8.asp' % (environment,),
'ogone_direct_order_url': 'https://secure.ogone.com/ncol/%s/orderdirect_utf8.asp' % (environment,),
'ogone_direct_query_url': 'https://secure.ogone.com/ncol/%s/querydirect_utf8.asp' % (environment,),
'ogone_afu_agree_url': 'https://secure.ogone.com/ncol/%s/AFU_agree.asp' % (environment,),
}
def _get_providers(self, cr, uid, context=None):
providers = super(PaymentAcquirerOgone, self)._get_providers(cr, uid, context=context)
providers.append(['ogone', 'Ogone'])
return providers
_columns = {
'ogone_pspid': fields.char('PSPID', required_if_provider='ogone'),
'ogone_userid': fields.char('API User ID', required_if_provider='ogone'),
'ogone_password': fields.char('API User Password', required_if_provider='ogone'),
'ogone_shakey_in': fields.char('SHA Key IN', size=32, required_if_provider='ogone'),
'ogone_shakey_out': fields.char('SHA Key OUT', size=32, required_if_provider='ogone'),
'ogone_alias_usage': fields.char('Alias Usage', help="""If you want to use Ogone Aliases,
this default Alias Usage will be presented to
the customer as the reason you want to
keep his payment data""")
}
def _ogone_generate_shasign(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting ogone) or 'out' (ogone
contacting openerp). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'ogone'
key = getattr(acquirer, 'ogone_shakey_' + inout)
def filter_key(key):
if inout == 'in':
return True
else:
# SHA-OUT keys
# source https://viveum.v-psp.com/Ncol/Viveum_e-Com-BAS_EN.pdf
keys = [
'AAVADDRESS',
'AAVCHECK',
'AAVMAIL',
'AAVNAME',
'AAVPHONE',
'AAVZIP',
'ACCEPTANCE',
'ALIAS',
'AMOUNT',
'BIC',
'BIN',
'BRAND',
'CARDNO',
'CCCTY',
'CN',
'COMPLUS',
'CREATION_STATUS',
'CURRENCY',
'CVCCHECK',
'DCC_COMMPERCENTAGE',
'DCC_CONVAMOUNT',
'DCC_CONVCCY',
'DCC_EXCHRATE',
'DCC_EXCHRATESOURCE',
'DCC_EXCHRATETS',
'DCC_INDICATOR',
'DCC_MARGINPERCENTAGE',
'DCC_VALIDHOURS',
'DIGESTCARDNO',
'ECI',
'ED',
'ENCCARDNO',
'FXAMOUNT',
'FXCURRENCY',
'IBAN',
'IP',
'IPCTY',
'NBREMAILUSAGE',
'NBRIPUSAGE',
'NBRIPUSAGE_ALLTX',
'NBRUSAGE',
'NCERROR',
'NCERRORCARDNO',
'NCERRORCN',
'NCERRORCVC',
'NCERRORED',
'ORDERID',
'PAYID',
'PM',
'SCO_CATEGORY',
'SCORING',
'STATUS',
'SUBBRAND',
'SUBSCRIPTION_ID',
'TRXDATE',
'VC'
]
return key.upper() in keys
items = sorted((k.upper(), v) for k, v in values.items())
sign = ''.join('%s=%s%s' % (k, v, key) for k, v in items if v and filter_key(k))
sign = sign.encode("utf-8")
shasign = sha1(sign).hexdigest()
return shasign
def ogone_form_generate_values(self, cr, uid, id, values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
ogone_tx_values = dict(values)
temp_ogone_tx_values = {
'PSPID': acquirer.ogone_pspid,
'ORDERID': values['reference'],
'AMOUNT': float_repr(float_round(values['amount'], 2) * 100, 0),
'CURRENCY': values['currency'] and values['currency'].name or '',
'LANGUAGE': values.get('partner_lang'),
'CN': values.get('partner_name'),
'EMAIL': values.get('partner_email'),
'OWNERZIP': values.get('partner_zip'),
'OWNERADDRESS': values.get('partner_address'),
'OWNERTOWN': values.get('partner_city'),
'OWNERCTY': values.get('partner_country') and values.get('partner_country').code or '',
'OWNERTELNO': values.get('partner_phone'),
'ACCEPTURL': '%s' % urlparse.urljoin(base_url, OgoneController._accept_url),
'DECLINEURL': '%s' % urlparse.urljoin(base_url, OgoneController._decline_url),
'EXCEPTIONURL': '%s' % urlparse.urljoin(base_url, OgoneController._exception_url),
'CANCELURL': '%s' % urlparse.urljoin(base_url, OgoneController._cancel_url),
'PARAMPLUS': 'return_url=%s' % ogone_tx_values.pop('return_url') if ogone_tx_values.get('return_url') else False,
}
if values.get('type') == 'form_save':
temp_ogone_tx_values.update({
'ALIAS': 'ODOO-NEW-ALIAS-%s' % time.time(), # something unique,
'ALIASUSAGE': values.get('alias_usage') or acquirer.ogone_alias_usage,
})
shasign = self._ogone_generate_shasign(acquirer, 'in', temp_ogone_tx_values)
temp_ogone_tx_values['SHASIGN'] = shasign
ogone_tx_values.update(temp_ogone_tx_values)
return ogone_tx_values
def ogone_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_ogone_urls(cr, uid, acquirer.environment, context=context)['ogone_standard_order_url']
def ogone_s2s_form_validate(self, cr, uid, id, data, context=None):
error = dict()
error_message = []
mandatory_fields = ["cc_number", "cc_cvc", "cc_holder_name", "cc_expiry", "cc_brand"]
# Validation
for field_name in mandatory_fields:
if not data.get(field_name):
error[field_name] = 'missing'
return False if error else True
def ogone_s2s_form_process(self, cr, uid, data, context=None):
values = {
'cc_number': data.get('cc_number'),
'cc_cvc': int(data.get('cc_cvc')),
'cc_holder_name': data.get('cc_holder_name'),
'cc_expiry': data.get('cc_expiry'),
'cc_brand': data.get('cc_brand'),
'acquirer_id': int(data.get('acquirer_id')),
'partner_id': int(data.get('partner_id'))
}
pm_id = self.pool['payment.method'].create(cr, SUPERUSER_ID, values, context=context)
return pm_id
class PaymentTxOgone(osv.Model):
_inherit = 'payment.transaction'
# ogone status
_ogone_valid_tx_status = [5, 9]
_ogone_wait_tx_status = [41, 50, 51, 52, 55, 56, 91, 92, 99]
_ogone_pending_tx_status = [46] # 3DS HTML response
_ogone_cancel_tx_status = [1]
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _ogone_form_get_tx_from_data(self, cr, uid, data, context=None):
""" Given a data dict coming from ogone, verify it and find the related
transaction record. Create a payment method if an alias is returned."""
reference, pay_id, shasign, alias = data.get('orderID'), data.get('PAYID'), data.get('SHASIGN'), data.get('ALIAS')
if not reference or not pay_id or not shasign:
error_msg = _('Ogone: received data with missing reference (%s) or pay_id (%s) or shasign (%s)') % (reference, pay_id, shasign)
_logger.info(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use paytid ?
tx_ids = self.search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = _('Ogone: received data for reference %s') % (reference)
if not tx_ids:
error_msg += _('; no order found')
else:
error_msg += _('; multiple order found')
_logger.info(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
# verify shasign
shasign_check = self.pool['payment.acquirer']._ogone_generate_shasign(tx.acquirer_id, 'out', data)
if shasign_check.upper() != shasign.upper():
error_msg = _('Ogone: invalid shasign, received %s, computed %s, for data %s') % (shasign, shasign_check, data)
_logger.info(error_msg)
raise ValidationError(error_msg)
if not tx.acquirer_reference:
tx.acquirer_reference = pay_id
# alias was created on ogone server, store it
if alias:
method_obj = self.pool['payment.method']
domain = [('acquirer_ref', '=', alias)]
cardholder = data.get('CN')
if not method_obj.search_count(cr, uid, domain, context=context):
_logger.info('Ogone: saving alias %s for partner %s' % (data.get('CARDNO'), tx.partner_id))
ref = method_obj.create(cr, uid, {'name': data.get('CARDNO') + (' - ' + cardholder if cardholder else ''),
'partner_id': tx.partner_id.id,
'acquirer_id': tx.acquirer_id.id,
'acquirer_ref': alias
})
tx.partner_reference = alias
return tx
def _ogone_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
# TODO: txn_id: should be false at draft, set afterwards, and verified with txn details
if tx.acquirer_reference and data.get('PAYID') != tx.acquirer_reference:
invalid_parameters.append(('PAYID', data.get('PAYID'), tx.acquirer_reference))
# check what is bought
if float_compare(float(data.get('amount', '0.0')), tx.amount, 2) != 0:
invalid_parameters.append(('amount', data.get('amount'), '%.2f' % tx.amount))
if data.get('currency') != tx.currency_id.name:
invalid_parameters.append(('currency', data.get('currency'), tx.currency_id.name))
return invalid_parameters
def _ogone_form_validate(self, cr, uid, tx, data, context=None):
if tx.state == 'done':
_logger.info('Ogone: trying to validate an already validated tx (ref %s)', tx.reference)
return True
status = int(data.get('STATUS', '0'))
if status in self._ogone_valid_tx_status:
vals = {
'state': 'done',
'date_validate': datetime.datetime.strptime(data['TRXDATE'], '%m/%d/%y').strftime(DEFAULT_SERVER_DATE_FORMAT),
'acquirer_reference': data['PAYID'],
}
if data.get('ALIAS') and tx.partner_id and tx.type == 'form_save':
pm_id = self.pool['payment.method'].create(cr, uid, {
'partner_id': tx.partner_id.id,
'acquirer_id': tx.acquirer_id.id,
'acquirer_ref': data.get('ALIAS'),
'name': '%s - %s' % (data.get('CARDNO'), data.get('CN'))
}, context=context)
vals.update(payment_method_id=pm_id)
tx.write(vals)
if tx.callback_eval:
safe_eval(tx.callback_eval, {'self': tx})
return True
elif status in self._ogone_cancel_tx_status:
tx.write({
'state': 'cancel',
'acquirer_reference': data.get('PAYID'),
})
elif status in self._ogone_pending_tx_status:
tx.write({
'state': 'pending',
'acquirer_reference': data.get('PAYID'),
})
else:
error = 'Ogone: feedback error: %(error_str)s\n\n%(error_code)s: %(error_msg)s' % {
'error_str': data.get('NCERRORPLUS'),
'error_code': data.get('NCERROR'),
'error_msg': ogone.OGONE_ERROR_MAP.get(data.get('NCERROR')),
}
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error,
'acquirer_reference': data.get('PAYID'),
})
return False
# --------------------------------------------------
# S2S RELATED METHODS
# --------------------------------------------------
def ogone_s2s_do_transaction(self, cr, uid, id, context=None, **kwargs):
# TODO: create tx with s2s type
tx = self.browse(cr, uid, id, context=context)
account = tx.acquirer_id
reference = tx.reference or "ODOO-%s-%s" % (datetime.datetime.now().strftime('%y%m%d_%H%M%S'), tx.partner_id.id)
data = {
'PSPID': account.ogone_pspid,
'USERID': account.ogone_userid,
'PSWD': account.ogone_password,
'ORDERID': reference,
'AMOUNT': long(tx.amount * 100),
'CURRENCY': tx.currency_id.name,
'OPERATION': 'SAL',
'ECI': 2, # Recurring (from MOTO)
'ALIAS': tx.payment_method_id.acquirer_ref,
'RTIMEOUT': 30,
}
if kwargs.get('3d_secure'):
data.update({
'FLAG3D': 'Y',
'LANGUAGE': tx.partner_id.lang or 'en_US',
})
for url in 'accept decline exception'.split():
key = '{0}_url'.format(url)
val = kwargs.pop(key, None)
if val:
key = '{0}URL'.format(url).upper()
data[key] = val
data['SHASIGN'] = self.pool['payment.acquirer']._ogone_generate_shasign(tx.acquirer_id, 'in', data)
direct_order_url = 'https://secure.ogone.com/ncol/%s/orderdirect.asp' % (tx.acquirer_id.environment)
_logger.debug("Ogone data %s", pformat(data))
request = urllib2.Request(direct_order_url, urlencode(data))
result = urllib2.urlopen(request).read()
_logger.debug('Ogone response = %s', result)
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
# invalid response from ogone
_logger.exception('Invalid xml response from ogone')
raise
return self._ogone_s2s_validate_tree(tx, tree)
def _ogone_s2s_validate(self, tx):
tree = self._ogone_s2s_get_tx_status(tx)
return self._ogone_s2s_validate_tree(tx, tree)
def _ogone_s2s_validate_tree(self, tx, tree, tries=2):
if tx.state not in ('draft', 'pending'):
_logger.info('Ogone: trying to validate an already validated tx (ref %s)', tx.reference)
return True
status = int(tree.get('STATUS') or 0)
if status in self._ogone_valid_tx_status:
tx.write({
'state': 'done',
'date_validate': datetime.date.today().strftime(DEFAULT_SERVER_DATE_FORMAT),
'acquirer_reference': tree.get('PAYID'),
})
if tx.callback_eval:
safe_eval(tx.callback_eval, {'self': tx})
return True
elif status in self._ogone_cancel_tx_status:
tx.write({
'state': 'cancel',
'acquirer_reference': tree.get('PAYID'),
})
elif status in self._ogone_pending_tx_status:
tx.write({
'state': 'pending',
'acquirer_reference': tree.get('PAYID'),
'html_3ds': str(tree.HTML_ANSWER).decode('base64')
})
elif (not status or status in self._ogone_wait_tx_status) and tries > 0:
time.sleep(500)
tx.write({'acquirer_reference': tree.get('PAYID')})
tree = self._ogone_s2s_get_tx_status(tx)
return self._ogone_s2s_validate_tree(tx, tree, tries - 1)
else:
error = 'Ogone: feedback error: %(error_str)s\n\n%(error_code)s: %(error_msg)s' % {
'error_str': tree.get('NCERRORPLUS'),
'error_code': tree.get('NCERROR'),
'error_msg': ogone.OGONE_ERROR_MAP.get(tree.get('NCERROR')),
}
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error,
'acquirer_reference': tree.get('PAYID'),
})
return False
def _ogone_s2s_get_tx_status(self, tx):
account = tx.acquirer_id
#reference = tx.reference or "ODOO-%s-%s" % (datetime.datetime.now().strftime('%Y%m%d_%H%M%S'), tx.partner_id.id)
data = {
'PAYID': tx.acquirer_reference,
'PSPID': account.ogone_pspid,
'USERID': account.ogone_userid,
'PSWD': account.ogone_password,
}
query_direct_url = 'https://secure.ogone.com/ncol/%s/querydirect.asp' % (tx.acquirer_id.environment)
_logger.debug("Ogone data %s", pformat(data))
request = urllib2.Request(query_direct_url, urlencode(data))
result = urllib2.urlopen(request).read()
_logger.debug('Ogone response = %s', result)
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
# invalid response from ogone
_logger.exception('Invalid xml response from ogone')
raise
return tree
class PaymentMethod(osv.Model):
_inherit = 'payment.method'
def ogone_create(self, cr, uid, values, context=None):
if values.get('cc_number'):
# create a alias via batch
values['cc_number'] = values['cc_number'].replace(' ', '')
acquirer = self.pool['payment.acquirer'].browse(cr, uid, values['acquirer_id'])
alias = 'ODOO-NEW-ALIAS-%s' % time.time()
expiry = str(values['cc_expiry'][:2]) + str(values['cc_expiry'][-2:])
line = 'ADDALIAS;%(alias)s;%(cc_holder_name)s;%(cc_number)s;%(expiry)s;%(cc_brand)s;%(pspid)s'
line = line % dict(values, alias=alias, expiry=expiry, pspid=acquirer.ogone_pspid)
data = {
'FILE_REFERENCE': alias,
'TRANSACTION_CODE': 'ATR',
'OPERATION': 'SAL',
'NB_PAYMENTS': 1, # even if we do not actually have any payment, ogone want it to not be 0
'FILE': line,
'REPLY_TYPE': 'XML',
'PSPID': acquirer.ogone_pspid,
'USERID': acquirer.ogone_userid,
'PSWD': acquirer.ogone_password,
'PROCESS_MODE': 'CHECKANDPROCESS',
}
url = 'https://secure.ogone.com/ncol/%s/AFU_agree.asp' % (acquirer.environment,)
request = urllib2.Request(url, urlencode(data))
result = urllib2.urlopen(request).read()
try:
tree = objectify.fromstring(result)
except etree.XMLSyntaxError:
_logger.exception('Invalid xml response from ogone')
return None
error_code = error_str = None
if hasattr(tree, 'PARAMS_ERROR'):
error_code = tree.NCERROR.text
error_str = 'PARAMS ERROR: %s' % (tree.PARAMS_ERROR.text or '',)
else:
node = tree.FORMAT_CHECK
error_node = getattr(node, 'FORMAT_CHECK_ERROR', None)
if error_node is not None:
error_code = error_node.NCERROR.text
error_str = 'CHECK ERROR: %s' % (error_node.ERROR.text or '',)
if error_code:
error_msg = tree.get(error_code)
error = '%s\n\n%s: %s' % (error_str, error_code, error_msg)
_logger.error(error)
raise Exception(error)
return {
'acquirer_ref': alias,
'name': 'XXXXXXXXXXXX%s - %s' % (values['cc_number'][-4:], values['cc_holder_name'])
}
return {}
|
scorphus/django
|
refs/heads/master
|
tests/timezones/tests.py
|
165
|
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from unittest import SkipTest, skipIf
from xml.dom.minidom import parseString
from django.contrib.auth.models import User
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import connection, connections
from django.db.models import Max, Min
from django.http import HttpRequest
from django.template import (
Context, RequestContext, Template, TemplateSyntaxError, context_processors,
)
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, override_settings,
skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.utils import six, timezone
from .forms import (
EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm,
EventSplitForm,
)
from .models import (
AllDayEvent, Event, MaybeEvent, Session, SessionEvent, Timestamp,
)
try:
import pytz
except ImportError:
pytz = None
requires_pytz = skipIf(pytz is None, "this test requires pytz")
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unspported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 4, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists())
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class NewDatabaseTests(TestCase):
@requires_tz_support
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
def test_datetime_from_date(self):
dt = datetime.date(2011, 9, 1)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT))
@requires_tz_support
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(microsecond=0, tzinfo=EAT))
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_auto_now_and_auto_now_add(self):
now = timezone.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
@requires_pytz
def test_query_filter_with_pytz_timezones(self):
tz = pytz.timezone('Europe/Paris')
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz)
Event.objects.create(dt=dt)
next = dt + datetime.timedelta(seconds=3)
prev = dt - datetime.timedelta(seconds=3)
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1)
self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1)
@requires_tz_support
def test_query_filter_with_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
dt = dt.replace(tzinfo=None)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
# naive datetimes are interpreted in local time
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0)
self.assertEqual(len(recorded), 3)
for warning in recorded:
msg = str(warning.message)
self.assertTrue(msg.startswith("DateTimeField Event.dt "
"received a naive datetime"))
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
# These two dates fall in the same day in EAT, but in different days,
# years and months in UTC.
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1)
self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
@skipUnlessDBFeature('supports_timezones')
def test_cursor_execute_accepts_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_timezones')
def test_cursor_execute_accepts_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, timezone.utc)
with connection.cursor() as cursor:
cursor.execute('INSERT INTO timezones_event (dt) VALUES (%s)', [utc_naive_dt])
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_timezones')
def test_cursor_execute_returns_aware_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [dt])
self.assertEqual(cursor.fetchall()[0][0], dt)
@skipIfDBFeature('supports_timezones')
def test_cursor_execute_returns_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
utc_naive_dt = timezone.make_naive(dt, timezone.utc)
Event.objects.create(dt=dt)
with connection.cursor() as cursor:
cursor.execute('SELECT dt FROM timezones_event WHERE dt = %s', [utc_naive_dt])
self.assertEqual(cursor.fetchall()[0][0], utc_naive_dt)
@requires_tz_support
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists())
def test_null_datetime(self):
# Regression test for #17294
e = MaybeEvent.objects.create()
self.assertEqual(e.dt, None)
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class ForcedTimeZoneDatabaseTests(TransactionTestCase):
"""
Test the TIME_ZONE database configuration parameter.
Since this involves reading and writing to the same database through two
connections, this is a TransactionTestCase.
"""
available_apps = ['timezones']
@classmethod
def setUpClass(cls):
# @skipIfDBFeature and @skipUnlessDBFeature cannot be chained. The
# outermost takes precedence. Handle skipping manually instead.
if connection.features.supports_timezones:
raise SkipTest("Database has feature(s) supports_timezones")
if not connection.features.test_db_allows_multiple_connections:
raise SkipTest("Database doesn't support feature(s): test_db_allows_multiple_connections")
super(ForcedTimeZoneDatabaseTests, cls).setUpClass()
connections.databases['tz'] = connections.databases['default'].copy()
connections.databases['tz']['TIME_ZONE'] = 'Asia/Bangkok'
@classmethod
def tearDownClass(cls):
connections['tz'].close()
del connections['tz']
del connections.databases['tz']
super(ForcedTimeZoneDatabaseTests, cls).tearDownClass()
def test_read_datetime(self):
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
Event.objects.create(dt=fake_dt)
event = Event.objects.using('tz').get()
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, dt)
def test_write_datetime(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.using('tz').create(dt=dt)
event = Event.objects.get()
fake_dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=UTC)
self.assertEqual(event.dt, fake_dt)
@skipUnlessDBFeature('supports_timezones')
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class UnsupportedTimeZoneDatabaseTests(TestCase):
def test_time_zone_parameter_not_supported_if_database_supports_timezone(self):
connections.databases['tz'] = connections.databases['default'].copy()
connections.databases['tz']['TIME_ZONE'] = 'Asia/Bangkok'
tz_conn = connections['tz']
try:
with self.assertRaises(ImproperlyConfigured):
tz_conn.cursor()
finally:
connections['tz'].close() # in case the test fails
del connections['tz']
del connections.databases['tz']
@override_settings(TIME_ZONE='Africa/Nairobi')
class SerializationTests(SimpleTestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes,
# but when it loads this representation, it substracts the offset and
# returns a naive datetime object in UTC (http://pyyaml.org/ticket/202).
# Tests are adapted to take these quirks into account.
def assert_python_contains_datetime(self, objects, dt):
self.assertEqual(objects[0]['fields']['dt'], dt)
def assert_json_contains_datetime(self, json, dt):
self.assertIn('"fields": {"dt": "%s"}' % dt, json)
def assert_xml_contains_datetime(self, xml, dt):
field = parseString(xml).getElementsByTagName('field')[0]
self.assertXMLEqual(field.childNodes[0].wholeText, dt)
def assert_yaml_contains_datetime(self, yaml, dt):
# Depending on the yaml dumper, '!timestamp' might be absent
six.assertRegex(self, yaml,
r"\n fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt))
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class TemplateTests(TestCase):
@requires_tz_support
def test_localtime_templatetag_and_filters(self):
"""
Test the {% localtime %} templatetag and related filters.
"""
datetimes = {
'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT),
'naive': datetime.datetime(2011, 9, 1, 13, 20, 30),
}
templates = {
'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"),
'noarg': Template("{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'on': Template("{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'off': Template("{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
}
# Transform a list of keys in 'datetimes' to the expected template
# output. This makes the definition of 'results' more readable.
def t(*result):
return '|'.join(datetimes[key].isoformat() for key in result)
# Results for USE_TZ = True
results = {
'utc': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('utc', 'eat', 'utc', 'ict'),
},
'eat': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('eat', 'eat', 'utc', 'ict'),
},
'ict': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('ict', 'eat', 'utc', 'ict'),
},
'naive': {
'notag': t('naive', 'eat', 'utc', 'ict'),
'noarg': t('naive', 'eat', 'utc', 'ict'),
'on': t('naive', 'eat', 'utc', 'ict'),
'off': t('naive', 'eat', 'utc', 'ict'),
}
}
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
# Changes for USE_TZ = False
results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict')
results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict')
with self.settings(USE_TZ=False):
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
@requires_pytz
def test_localtime_filters_with_pytz(self):
"""
Test the |localtime, |utc, and |timezone filters with pytz.
"""
# Use a pytz timezone as local time
tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE='Europe/Paris'):
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00")
# Use a pytz timezone as argument
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_localtime_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render()
def test_localtime_filters_do_not_raise_exceptions(self):
"""
Test the |localtime, |utc, and |timezone filters on bad inputs.
"""
tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}")
with self.settings(USE_TZ=True):
# bad datetime value
ctx = Context({'dt': None, 'tz': ICT})
self.assertEqual(tpl.render(ctx), "None|||")
ctx = Context({'dt': 'not a date', 'tz': ICT})
self.assertEqual(tpl.render(ctx), "not a date|||")
# bad timezone value
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None})
self.assertEqual(tpl.render(ctx), "")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'})
self.assertEqual(tpl.render(ctx), "")
@requires_tz_support
def test_timezone_templatetag(self):
"""
Test the {% timezone %} templatetag.
"""
tpl = Template(
"{% load tz %}"
"{{ dt }}|"
"{% timezone tz1 %}"
"{{ dt }}|"
"{% timezone tz2 %}"
"{{ dt }}"
"{% endtimezone %}"
"{% endtimezone %}"
)
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'tz1': ICT, 'tz2': None})
self.assertEqual(tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00")
@requires_pytz
def test_timezone_templatetag_with_pytz(self):
"""
Test the {% timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}")
# Use a pytz timezone as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': 'Europe/Paris'})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% timezone %}{% endtimezone %}").render()
with self.assertRaises(ValueError if pytz is None else pytz.UnknownTimeZoneError):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'}))
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_get_current_timezone_templatetag(self):
"""
Test the {% get_current_timezone %} templatetag.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Africa/Nairobi" if pytz else "EAT")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), "UTC")
tpl = Template("{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
@requires_pytz
def test_get_current_timezone_templatetag_with_pytz(self):
"""
Test the {% get_current_timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
with timezone.override(pytz.timezone('Europe/Paris')):
self.assertEqual(tpl.render(Context()), "Europe/Paris")
tpl = Template("{% load tz %}{% timezone 'Europe/Paris' %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Europe/Paris")
def test_get_current_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% get_current_timezone %}").render()
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_tz_template_context_processor(self):
"""
Test the django.template.context_processors.tz template context processor.
"""
tpl = Template("{{ TIME_ZONE }}")
context = Context()
self.assertEqual(tpl.render(context), "")
request_context = RequestContext(HttpRequest(), processors=[context_processors.tz])
self.assertEqual(tpl.render(request_context), "Africa/Nairobi" if pytz else "EAT")
@requires_tz_support
def test_date_and_time_template_filters(self):
tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20")
def test_date_and_time_template_filters_honor_localtime(self):
tpl = Template("{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}{% endlocaltime %}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
def test_localtime_with_time_zone_setting_set_to_none(self):
# Regression for #17274
tpl = Template("{% load tz %}{{ dt }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)})
with self.settings(TIME_ZONE=None):
# the actual value depends on the system time zone of the host
self.assertTrue(tpl.render(ctx).startswith("2011"))
@requires_tz_support
def test_now_template_tag_uses_current_time_zone(self):
# Regression for #17343
tpl = Template("{% now \"O\" %}")
self.assertEqual(tpl.render(Context({})), "+0300")
with timezone.override(ICT):
self.assertEqual(tpl.render(Context({})), "+0700")
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False)
class LegacyFormsTests(TestCase):
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
@requires_pytz
def test_form_with_non_existent_time(self):
form = EventForm({'dt': '2011-03-27 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0))
@requires_pytz
def test_form_with_ambiguous_time(self):
form = EventForm({'dt': '2011-10-30 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0))
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class NewFormsTests(TestCase):
@requires_tz_support
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_other_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30'})
with timezone.override(ICT):
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_explicit_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30+07:00'})
# Datetime inputs formats don't allow providing a time zone.
self.assertFalse(form.is_valid())
@requires_pytz
def test_form_with_non_existent_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-03-27 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-03-27 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_pytz
def test_form_with_ambiguous_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-10-30 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-10-30 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_tz_support
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_form(self):
form = EventLocalizedForm(initial={'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)})
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@requires_tz_support
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_model_form(self):
form = EventLocalizedModelForm(instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='timezones.urls')
class AdminTests(TestCase):
@classmethod
def setUpTestData(cls):
# password = "secret"
cls.u1 = User.objects.create(
id=100, password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158',
last_login=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
is_superuser=True, username='super', first_name='Super', last_name='User',
email='super@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10, tzinfo=UTC),
)
def setUp(self):
self.client.login(username='super', password='secret')
@requires_tz_support
def test_changelist(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin_tz:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(EAT).isoformat())
def test_changelist_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(ICT).isoformat())
@requires_tz_support
def test_change_editable(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(EAT).date().isoformat())
self.assertContains(response, e.dt.astimezone(EAT).time().isoformat())
def test_change_editable_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(ICT).date().isoformat())
self.assertContains(response, e.dt.astimezone(ICT).time().isoformat())
@requires_tz_support
def test_change_readonly(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(EAT).isoformat())
def test_change_readonly_in_other_timezone(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
with timezone.override(ICT):
response = self.client.get(reverse('admin_tz:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(ICT).isoformat())
|
CingHu/neutron-ustack
|
refs/heads/master
|
neutron/services/loadbalancer/drivers/haproxy/templates/__init__.py
|
12133432
| |
bb111189/Arky2
|
refs/heads/master
|
boilerplate/external/PIL/ImageTk.py
|
39
|
#
# The Python Imaging Library.
# $Id$
#
# a Tk display interface
#
# History:
# 96-04-08 fl Created
# 96-09-06 fl Added getimage method
# 96-11-01 fl Rewritten, removed image attribute and crop method
# 97-05-09 fl Use PyImagingPaste method instead of image type
# 97-05-12 fl Minor tweaks to match the IFUNC95 interface
# 97-05-17 fl Support the "pilbitmap" booster patch
# 97-06-05 fl Added file= and data= argument to image constructors
# 98-03-09 fl Added width and height methods to Image classes
# 98-07-02 fl Use default mode for "P" images without palette attribute
# 98-07-02 fl Explicitly destroy Tkinter image objects
# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch)
# 99-07-26 fl Automatically hook into Tkinter (if possible)
# 99-08-15 fl Hook uses _imagingtk instead of _imaging
#
# Copyright (c) 1997-1999 by Secret Labs AB
# Copyright (c) 1996-1997 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Tkinter, Image
##
# The <b>ImageTk</b> module contains support to create and modify
# Tkinter <b>BitmapImage</b> and <b>PhotoImage</b> objects.
# <p>
# For examples, see the demo programs in the <i>Scripts</i>
# directory.
##
# --------------------------------------------------------------------
# Check for Tkinter interface hooks
_pilbitmap_ok = None
def _pilbitmap_check():
global _pilbitmap_ok
if _pilbitmap_ok is None:
try:
im = Image.new("1", (1,1))
Tkinter.BitmapImage(data="PIL:%d" % im.im.id)
_pilbitmap_ok = 1
except Tkinter.TclError:
_pilbitmap_ok = 0
return _pilbitmap_ok
# --------------------------------------------------------------------
# PhotoImage
##
# Creates a Tkinter-compatible photo image. This can be used
# everywhere Tkinter expects an image object. If the image is an RGBA
# image, pixels having alpha 0 are treated as transparent.
class PhotoImage:
##
# Create a photo image object. The constructor takes either
# a PIL image, or a mode and a size. Alternatively, you can
# use the <b>file</b> or <b>data</b> options to initialize
# the photo image object.
# <p>
# @def __init__(image=None, size=None, **options)
# @param image Either a PIL image, or a mode string. If a
# mode string is used, a size must also be given.
# @param size If the first argument is a mode string, this
# defines the size of the image.
# @keyparam file A filename to load the image from (using
# Image.open(file)).
# @keyparam data An 8-bit string containing image data (as
# loaded from an image file).
def __init__(self, image=None, size=None, **kw):
# Tk compatibility: file or data
if image is None:
if kw.has_key("file"):
image = Image.open(kw["file"])
del kw["file"]
elif kw.has_key("data"):
from StringIO import StringIO
image = Image.open(StringIO(kw["data"]))
del kw["data"]
if hasattr(image, "mode") and hasattr(image, "size"):
# got an image instead of a mode
mode = image.mode
if mode == "P":
# palette mapped data
image.load()
try:
mode = image.palette.mode
except AttributeError:
mode = "RGB" # default
size = image.size
kw["width"], kw["height"] = size
else:
mode = image
image = None
if mode not in ["1", "L", "RGB", "RGBA"]:
mode = Image.getmodebase(mode)
self.__mode = mode
self.__size = size
self.__photo = apply(Tkinter.PhotoImage, (), kw)
self.tk = self.__photo.tk
if image:
self.paste(image)
def __del__(self):
name = self.__photo.name
self.__photo.name = None
try:
self.__photo.tk.call("image", "delete", name)
except:
pass # ignore internal errors
##
# Get the Tkinter photo image identifier. This method is
# automatically called by Tkinter whenever a PhotoImage object is
# passed to a Tkinter method.
#
# @return A Tkinter photo image identifier (a string).
def __str__(self):
return str(self.__photo)
##
# Get the width of the image.
#
# @return The width, in pixels.
def width(self):
return self.__size[0]
##
# Get the height of the image.
#
# @return The height, in pixels.
def height(self):
return self.__size[1]
##
# Paste a PIL image into the photo image. Note that this can
# be very slow if the photo image is displayed.
#
# @param im A PIL image. The size must match the target region.
# If the mode does not match, the image is converted to the
# mode of the bitmap image.
# @param box A 4-tuple defining the left, upper, right, and
# lower pixel coordinate. If None is given instead of a
# tuple, all of the image is assumed.
def paste(self, im, box=None):
# convert to blittable
im.load()
image = im.im
if image.isblock() and im.mode == self.__mode:
block = image
else:
block = image.new_block(self.__mode, im.size)
image.convert2(block, image) # convert directly between buffers
tk = self.__photo.tk
try:
tk.call("PyImagingPhoto", self.__photo, block.id)
except Tkinter.TclError, v:
# activate Tkinter hook
try:
import _imagingtk
try:
_imagingtk.tkinit(tk.interpaddr(), 1)
except AttributeError:
_imagingtk.tkinit(id(tk), 0)
tk.call("PyImagingPhoto", self.__photo, block.id)
except (ImportError, AttributeError, Tkinter.TclError):
raise # configuration problem; cannot attach to Tkinter
# --------------------------------------------------------------------
# BitmapImage
##
# Create a Tkinter-compatible bitmap image. This can be used
# everywhere Tkinter expects an image object.
class BitmapImage:
##
# Create a Tkinter-compatible bitmap image.
# <p>
# The given image must have mode "1". Pixels having value 0 are
# treated as transparent. Options, if any, are passed on to
# Tkinter. The most commonly used option is <b>foreground</b>,
# which is used to specify the colour for the non-transparent
# parts. See the Tkinter documentation for information on how to
# specify colours.
#
# @def __init__(image=None, **options)
# @param image A PIL image.
def __init__(self, image=None, **kw):
# Tk compatibility: file or data
if image is None:
if kw.has_key("file"):
image = Image.open(kw["file"])
del kw["file"]
elif kw.has_key("data"):
from StringIO import StringIO
image = Image.open(StringIO(kw["data"]))
del kw["data"]
self.__mode = image.mode
self.__size = image.size
if _pilbitmap_check():
# fast way (requires the pilbitmap booster patch)
image.load()
kw["data"] = "PIL:%d" % image.im.id
self.__im = image # must keep a reference
else:
# slow but safe way
kw["data"] = image.tobitmap()
self.__photo = apply(Tkinter.BitmapImage, (), kw)
def __del__(self):
name = self.__photo.name
self.__photo.name = None
try:
self.__photo.tk.call("image", "delete", name)
except:
pass # ignore internal errors
##
# Get the width of the image.
#
# @return The width, in pixels.
def width(self):
return self.__size[0]
##
# Get the height of the image.
#
# @return The height, in pixels.
def height(self):
return self.__size[1]
##
# Get the Tkinter bitmap image identifier. This method is
# automatically called by Tkinter whenever a BitmapImage object
# is passed to a Tkinter method.
#
# @return A Tkinter bitmap image identifier (a string).
def __str__(self):
return str(self.__photo)
##
# Copies the contents of a PhotoImage to a PIL image memory.
def getimage(photo):
photo.tk.call("PyImagingPhotoGet", photo)
# --------------------------------------------------------------------
# Helper for the Image.show method.
def _show(image, title):
class UI(Tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
self.image = BitmapImage(im, foreground="white", master=master)
else:
self.image = PhotoImage(im, master=master)
Tkinter.Label.__init__(self, master, image=self.image,
bg="black", bd=0)
if not Tkinter._default_root:
raise IOError, "tkinter not initialized"
top = Tkinter.Toplevel()
if title:
top.title(title)
UI(top, image).pack()
|
nuncjo/odoo
|
refs/heads/8.0
|
openerp/addons/base/tests/test_view_validation.py
|
396
|
# This test can be run stand-alone with something like:
# > PYTHONPATH=. python2 openerp/tests/test_view_validation.py
from lxml import etree
from StringIO import StringIO
import unittest2
from openerp.tools.view_validation import (valid_page_in_book, valid_att_in_form, valid_type_in_colspan,
valid_type_in_col, valid_att_in_field, valid_att_in_label,
valid_field_in_graph, valid_field_in_tree
)
invalid_form = etree.parse(StringIO('''\
<form>
<label></label>
<group>
<div>
<page></page>
<label colspan="True"></label>
<field></field>
</div>
</group>
<notebook>
<page>
<group col="Two">
<div>
<label></label>
<field colspan="Five"> </field>
</div>
</group>
</page>
</notebook>
</form>
''')).getroot()
valid_form = etree.parse(StringIO('''\
<form string="">
<field name=""></field>
<field name=""></field>
<notebook>
<page>
<field name=""></field>
<label string=""></label>
<field name=""></field>
</page>
<page>
<group colspan="5" col="2">
<label for=""></label>
<label string="" colspan="5"></label>
</group>
</page>
</notebook>
</form>
''')).getroot()
invalid_graph = etree.parse(StringIO('''\
<graph>
<label/>
<group>
<div>
<field></field>
<field></field>
</div>
</group>
</graph>
''')).getroot()
valid_graph = etree.parse(StringIO('''\
<graph string="">
<field name=""></field>
<field name=""></field>
</graph>
''')).getroot()
invalid_tree = etree.parse(StringIO('''\
<tree>
<group>
<div>
<field></field>
<field></field>
</div>
</group>
</tree>
''')).getroot()
valid_tree = etree.parse(StringIO('''\
<tree string="">
<field name=""></field>
<field name=""></field>
<button/>
<field name=""></field>
</tree>
''')).getroot()
class test_view_validation(unittest2.TestCase):
""" Test the view validation code (but not the views themselves). """
def test_page_validation(self):
assert not valid_page_in_book(invalid_form)
assert valid_page_in_book(valid_form)
def test_all_field_validation(self):
assert not valid_att_in_field(invalid_form)
assert valid_att_in_field(valid_form)
def test_all_label_validation(self):
assert not valid_att_in_label(invalid_form)
assert valid_att_in_label(valid_form)
def test_form_string_validation(self):
assert valid_att_in_form(valid_form)
def test_graph_validation(self):
assert not valid_field_in_graph(invalid_graph)
assert valid_field_in_graph(valid_graph)
def test_tree_validation(self):
assert not valid_field_in_tree(invalid_tree)
assert valid_field_in_tree(valid_tree)
def test_colspan_datatype_validation(self):
assert not valid_type_in_colspan(invalid_form)
assert valid_type_in_colspan(valid_form)
def test_col_datatype_validation(self):
assert not valid_type_in_col(invalid_form)
assert valid_type_in_col(valid_form)
if __name__ == '__main__':
unittest2.main()
|
ryanmockabee/golfr
|
refs/heads/master
|
flask/lib/python3.6/site-packages/sqlalchemy/sql/schema.py
|
14
|
# sql/schema.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The schema module provides the building blocks for database metadata.
Each element within this module describes a database entity which can be
created and dropped, or is otherwise part of such an entity. Examples include
tables, columns, sequences, and indexes.
All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as
defined in this module they are intended to be agnostic of any vendor-specific
constructs.
A collection of entities are grouped into a unit called
:class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of
schema elements, and can also be associated with an actual database connection
such that operations involving the contained elements can contact the database
as needed.
Two of the elements here also build upon their "syntactic" counterparts, which
are defined in :class:`~sqlalchemy.sql.expression.`, specifically
:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`.
Since these objects are part of the SQL expression language, they are usable
as components in SQL expressions.
"""
from __future__ import absolute_import
from .. import exc, util, event, inspection
from .base import SchemaEventTarget, DialectKWArgs
import operator
from . import visitors
from . import type_api
from .base import _bind_or_error, ColumnCollection
from .elements import ClauseElement, ColumnClause, \
_as_truncated, TextClause, _literal_as_text,\
ColumnElement, quoted_name
from .selectable import TableClause
import collections
import sqlalchemy
from . import ddl
RETAIN_SCHEMA = util.symbol('retain_schema')
BLANK_SCHEMA = util.symbol(
'blank_schema',
"""Symbol indicating that a :class:`.Table` or :class:`.Sequence`
should have 'None' for its schema, even if the parent
:class:`.MetaData` has specified a schema.
.. versionadded:: 1.0.14
"""
)
def _get_table_key(name, schema):
if schema is None:
return name
else:
return schema + "." + name
@inspection._self_inspects
class SchemaItem(SchemaEventTarget, visitors.Visitable):
"""Base class for items that define a database schema."""
__visit_name__ = 'schema_item'
def _init_items(self, *args):
"""Initialize the list of child items for this SchemaItem."""
for item in args:
if item is not None:
item._set_parent_with_dispatch(self)
def get_children(self, **kwargs):
"""used to allow SchemaVisitor access"""
return []
def __repr__(self):
return util.generic_repr(self, omit_kwarg=['info'])
@property
@util.deprecated('0.9', 'Use ``<obj>.name.quote``')
def quote(self):
"""Return the value of the ``quote`` flag passed
to this schema object, for those schema items which
have a ``name`` field.
"""
return self.name.quote
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.SchemaItem`.
The dictionary is automatically generated when first accessed.
It can also be specified in the constructor of some objects,
such as :class:`.Table` and :class:`.Column`.
"""
return {}
def _schema_item_copy(self, schema_item):
if 'info' in self.__dict__:
schema_item.info = self.info.copy()
schema_item.dispatch._update(self.dispatch)
return schema_item
def _translate_schema(self, effective_schema, map_):
return map_.get(effective_schema, effective_schema)
class Table(DialectKWArgs, SchemaItem, TableClause):
r"""Represent a table in a database.
e.g.::
mytable = Table("mytable", metadata,
Column('mytable_id', Integer, primary_key=True),
Column('value', String(50))
)
The :class:`.Table` object constructs a unique instance of itself based
on its name and optional schema name within the given
:class:`.MetaData` object. Calling the :class:`.Table`
constructor with the same name and same :class:`.MetaData` argument
a second time will return the *same* :class:`.Table` object - in this way
the :class:`.Table` constructor acts as a registry function.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
Constructor arguments are as follows:
:param name: The name of this table as represented in the database.
The table name, along with the value of the ``schema`` parameter,
forms a key which uniquely identifies this :class:`.Table` within
the owning :class:`.MetaData` collection.
Additional calls to :class:`.Table` with the same name, metadata,
and schema name will return the same :class:`.Table` object.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word or contain special characters.
A name with any number of upper case characters is considered
to be case sensitive, and will be sent as quoted.
To enable unconditional quoting for the table name, specify the flag
``quote=True`` to the constructor, or use the :class:`.quoted_name`
construct to specify the name.
:param metadata: a :class:`.MetaData` object which will contain this
table. The metadata is used as a point of association of this table
with other tables which are referenced via foreign key. It also
may be used to associate this table with a particular
:class:`.Connectable`.
:param \*args: Additional positional arguments are used primarily
to add the list of :class:`.Column` objects contained within this
table. Similar to the style of a CREATE TABLE statement, other
:class:`.SchemaItem` constructs may be added here, including
:class:`.PrimaryKeyConstraint`, and :class:`.ForeignKeyConstraint`.
:param autoload: Defaults to False, unless :paramref:`.Table.autoload_with`
is set in which case it defaults to True; :class:`.Column` objects
for this table should be reflected from the database, possibly
augmenting or replacing existing :class:`.Column` objects that were
explicitly specified.
.. versionchanged:: 1.0.0 setting the :paramref:`.Table.autoload_with`
parameter implies that :paramref:`.Table.autoload` will default
to True.
.. seealso::
:ref:`metadata_reflection_toplevel`
:param autoload_replace: Defaults to ``True``; when using
:paramref:`.Table.autoload`
in conjunction with :paramref:`.Table.extend_existing`, indicates
that :class:`.Column` objects present in the already-existing
:class:`.Table` object should be replaced with columns of the same
name retrieved from the autoload process. When ``False``, columns
already present under existing names will be omitted from the
reflection process.
Note that this setting does not impact :class:`.Column` objects
specified programmatically within the call to :class:`.Table` that
also is autoloading; those :class:`.Column` objects will always
replace existing columns of the same name when
:paramref:`.Table.extend_existing` is ``True``.
.. versionadded:: 0.7.5
.. seealso::
:paramref:`.Table.autoload`
:paramref:`.Table.extend_existing`
:param autoload_with: An :class:`.Engine` or :class:`.Connection` object
with which this :class:`.Table` object will be reflected; when
set to a non-None value, it implies that :paramref:`.Table.autoload`
is ``True``. If left unset, but :paramref:`.Table.autoload` is
explicitly set to ``True``, an autoload operation will attempt to
proceed by locating an :class:`.Engine` or :class:`.Connection` bound
to the underlying :class:`.MetaData` object.
.. seealso::
:paramref:`.Table.autoload`
:param extend_existing: When ``True``, indicates that if this
:class:`.Table` is already present in the given :class:`.MetaData`,
apply further arguments within the constructor to the existing
:class:`.Table`.
If :paramref:`.Table.extend_existing` or
:paramref:`.Table.keep_existing` are not set, and the given name
of the new :class:`.Table` refers to a :class:`.Table` that is
already present in the target :class:`.MetaData` collection, and
this :class:`.Table` specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a :class:`.Table`
is specified that matches an existing :class:`.Table`, yet specifies
additional constructs.
:paramref:`.Table.extend_existing` will also work in conjunction
with :paramref:`.Table.autoload` to run a new reflection
operation against the database, even if a :class:`.Table`
of the same name is already present in the target
:class:`.MetaData`; newly reflected :class:`.Column` objects
and other options will be added into the state of the
:class:`.Table`, potentially overwriting existing columns
and options of the same name.
.. versionchanged:: 0.7.4 :paramref:`.Table.extend_existing` will
invoke a new reflection operation when combined with
:paramref:`.Table.autoload` set to True.
As is always the case with :paramref:`.Table.autoload`,
:class:`.Column` objects can be specified in the same :class:`.Table`
constructor, which will take precedence. Below, the existing
table ``mytable`` will be augmented with :class:`.Column` objects
both reflected from the database, as well as the given :class:`.Column`
named "y"::
Table("mytable", metadata,
Column('y', Integer),
extend_existing=True,
autoload=True,
autoload_with=engine
)
.. seealso::
:paramref:`.Table.autoload`
:paramref:`.Table.autoload_replace`
:paramref:`.Table.keep_existing`
:param implicit_returning: True by default - indicates that
RETURNING can be used by default to fetch newly inserted primary key
values, for backends which support this. Note that
create_engine() also provides an implicit_returning flag.
:param include_columns: A list of strings indicating a subset of
columns to be loaded via the ``autoload`` operation; table columns who
aren't present in this list will not be represented on the resulting
``Table`` object. Defaults to ``None`` which indicates all columns
should be reflected.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param keep_existing: When ``True``, indicates that if this Table
is already present in the given :class:`.MetaData`, ignore
further arguments within the constructor to the existing
:class:`.Table`, and return the :class:`.Table` object as
originally created. This is to allow a function that wishes
to define a new :class:`.Table` on first call, but on
subsequent calls will return the same :class:`.Table`,
without any of the declarations (particularly constraints)
being applied a second time.
If :paramref:`.Table.extend_existing` or
:paramref:`.Table.keep_existing` are not set, and the given name
of the new :class:`.Table` refers to a :class:`.Table` that is
already present in the target :class:`.MetaData` collection, and
this :class:`.Table` specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a :class:`.Table`
is specified that matches an existing :class:`.Table`, yet specifies
additional constructs.
.. seealso::
:paramref:`.Table.extend_existing`
:param listeners: A list of tuples of the form ``(<eventname>, <fn>)``
which will be passed to :func:`.event.listen` upon construction.
This alternate hook to :func:`.event.listen` allows the establishment
of a listener function specific to this :class:`.Table` before
the "autoload" process begins. Particularly useful for
the :meth:`.DDLEvents.column_reflect` event::
def listen_for_reflect(table, column_info):
"handle the column reflection event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
:param mustexist: When ``True``, indicates that this Table must already
be present in the given :class:`.MetaData` collection, else
an exception is raised.
:param prefixes:
A list of strings to insert after CREATE in the CREATE TABLE
statement. They will be separated by spaces.
:param quote: Force quoting of this table's name on or off, corresponding
to ``True`` or ``False``. When left at its default of ``None``,
the column identifier will be quoted according to whether the name is
case sensitive (identifiers with at least one upper case character are
treated as case sensitive), or if it's a reserved word. This flag
is only needed to force quoting of a reserved word which is not known
by the SQLAlchemy dialect.
:param quote_schema: same as 'quote' but applies to the schema identifier.
:param schema: The schema name for this table, which is required if
the table resides in a schema other than the default selected schema
for the engine's database connection. Defaults to ``None``.
If the owning :class:`.MetaData` of this :class:`.Table` specifies
its own :paramref:`.MetaData.schema` parameter, then that schema
name will be applied to this :class:`.Table` if the schema parameter
here is set to ``None``. To set a blank schema name on a :class:`.Table`
that would otherwise use the schema set on the owning :class:`.MetaData`,
specify the special symbol :attr:`.BLANK_SCHEMA`.
.. versionadded:: 1.0.14 Added the :attr:`.BLANK_SCHEMA` symbol to
allow a :class:`.Table` to have a blank schema name even when the
parent :class:`.MetaData` specifies :paramref:`.MetaData.schema`.
The quoting rules for the schema name are the same as those for the
``name`` parameter, in that quoting is applied for reserved words or
case-sensitive names; to enable unconditional quoting for the
schema name, specify the flag
``quote_schema=True`` to the constructor, or use the
:class:`.quoted_name` construct to specify the name.
:param useexisting: Deprecated. Use :paramref:`.Table.extend_existing`.
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form ``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
"""
__visit_name__ = 'table'
def __new__(cls, *args, **kw):
if not args:
# python3k pickle seems to call this
return object.__new__(cls)
try:
name, metadata, args = args[0], args[1], args[2:]
except IndexError:
raise TypeError("Table() takes at least two arguments")
schema = kw.get('schema', None)
if schema is None:
schema = metadata.schema
elif schema is BLANK_SCHEMA:
schema = None
keep_existing = kw.pop('keep_existing', False)
extend_existing = kw.pop('extend_existing', False)
if 'useexisting' in kw:
msg = "useexisting is deprecated. Use extend_existing."
util.warn_deprecated(msg)
if extend_existing:
msg = "useexisting is synonymous with extend_existing."
raise exc.ArgumentError(msg)
extend_existing = kw.pop('useexisting', False)
if keep_existing and extend_existing:
msg = "keep_existing and extend_existing are mutually exclusive."
raise exc.ArgumentError(msg)
mustexist = kw.pop('mustexist', False)
key = _get_table_key(name, schema)
if key in metadata.tables:
if not keep_existing and not extend_existing and bool(args):
raise exc.InvalidRequestError(
"Table '%s' is already defined for this MetaData "
"instance. Specify 'extend_existing=True' "
"to redefine "
"options and columns on an "
"existing Table object." % key)
table = metadata.tables[key]
if extend_existing:
table._init_existing(*args, **kw)
return table
else:
if mustexist:
raise exc.InvalidRequestError(
"Table '%s' not defined" % (key))
table = object.__new__(cls)
table.dispatch.before_parent_attach(table, metadata)
metadata._add_table(name, schema, table)
try:
table._init(name, metadata, *args, **kw)
table.dispatch.after_parent_attach(table, metadata)
return table
except:
with util.safe_reraise():
metadata._remove_table(name, schema)
@property
@util.deprecated('0.9', 'Use ``table.schema.quote``')
def quote_schema(self):
"""Return the value of the ``quote_schema`` flag passed
to this :class:`.Table`.
"""
return self.schema.quote
def __init__(self, *args, **kw):
"""Constructor for :class:`~.schema.Table`.
This method is a no-op. See the top-level
documentation for :class:`~.schema.Table`
for constructor arguments.
"""
# __init__ is overridden to prevent __new__ from
# calling the superclass constructor.
def _init(self, name, metadata, *args, **kwargs):
super(Table, self).__init__(
quoted_name(name, kwargs.pop('quote', None)))
self.metadata = metadata
self.schema = kwargs.pop('schema', None)
if self.schema is None:
self.schema = metadata.schema
elif self.schema is BLANK_SCHEMA:
self.schema = None
else:
quote_schema = kwargs.pop('quote_schema', None)
self.schema = quoted_name(self.schema, quote_schema)
self.indexes = set()
self.constraints = set()
self._columns = ColumnCollection()
PrimaryKeyConstraint(_implicit_generated=True).\
_set_parent_with_dispatch(self)
self.foreign_keys = set()
self._extra_dependencies = set()
if self.schema is not None:
self.fullname = "%s.%s" % (self.schema, self.name)
else:
self.fullname = self.name
autoload_with = kwargs.pop('autoload_with', None)
autoload = kwargs.pop('autoload', autoload_with is not None)
# this argument is only used with _init_existing()
kwargs.pop('autoload_replace', True)
_extend_on = kwargs.pop("_extend_on", None)
include_columns = kwargs.pop('include_columns', None)
self.implicit_returning = kwargs.pop('implicit_returning', True)
if 'info' in kwargs:
self.info = kwargs.pop('info')
if 'listeners' in kwargs:
listeners = kwargs.pop('listeners')
for evt, fn in listeners:
event.listen(self, evt, fn)
self._prefixes = kwargs.pop('prefixes', [])
self._extra_kwargs(**kwargs)
# load column definitions from the database if 'autoload' is defined
# we do it after the table is in the singleton dictionary to support
# circular foreign keys
if autoload:
self._autoload(
metadata, autoload_with,
include_columns, _extend_on=_extend_on)
# initialize all the column, etc. objects. done after reflection to
# allow user-overrides
self._init_items(*args)
def _autoload(self, metadata, autoload_with, include_columns,
exclude_columns=(), _extend_on=None):
if autoload_with:
autoload_with.run_callable(
autoload_with.dialect.reflecttable,
self, include_columns, exclude_columns,
_extend_on=_extend_on
)
else:
bind = _bind_or_error(
metadata,
msg="No engine is bound to this Table's MetaData. "
"Pass an engine to the Table via "
"autoload_with=<someengine>, "
"or associate the MetaData with an engine via "
"metadata.bind=<someengine>")
bind.run_callable(
bind.dialect.reflecttable,
self, include_columns, exclude_columns,
_extend_on=_extend_on
)
@property
def _sorted_constraints(self):
"""Return the set of constraints as a list, sorted by creation
order.
"""
return sorted(self.constraints, key=lambda c: c._creation_order)
@property
def foreign_key_constraints(self):
""":class:`.ForeignKeyConstraint` objects referred to by this
:class:`.Table`.
This list is produced from the collection of :class:`.ForeignKey`
objects currently associated.
.. versionadded:: 1.0.0
"""
return set(fkc.constraint for fkc in self.foreign_keys)
def _init_existing(self, *args, **kwargs):
autoload_with = kwargs.pop('autoload_with', None)
autoload = kwargs.pop('autoload', autoload_with is not None)
autoload_replace = kwargs.pop('autoload_replace', True)
schema = kwargs.pop('schema', None)
_extend_on = kwargs.pop('_extend_on', None)
if schema and schema != self.schema:
raise exc.ArgumentError(
"Can't change schema of existing table from '%s' to '%s'",
(self.schema, schema))
include_columns = kwargs.pop('include_columns', None)
if include_columns is not None:
for c in self.c:
if c.name not in include_columns:
self._columns.remove(c)
for key in ('quote', 'quote_schema'):
if key in kwargs:
raise exc.ArgumentError(
"Can't redefine 'quote' or 'quote_schema' arguments")
if 'info' in kwargs:
self.info = kwargs.pop('info')
if autoload:
if not autoload_replace:
# don't replace columns already present.
# we'd like to do this for constraints also however we don't
# have simple de-duping for unnamed constraints.
exclude_columns = [c.name for c in self.c]
else:
exclude_columns = ()
self._autoload(
self.metadata, autoload_with,
include_columns, exclude_columns, _extend_on=_extend_on)
self._extra_kwargs(**kwargs)
self._init_items(*args)
def _extra_kwargs(self, **kwargs):
self._validate_dialect_kwargs(kwargs)
def _init_collections(self):
pass
def _reset_exported(self):
pass
@property
def _autoincrement_column(self):
return self.primary_key._autoincrement_column
@property
def key(self):
"""Return the 'key' for this :class:`.Table`.
This value is used as the dictionary key within the
:attr:`.MetaData.tables` collection. It is typically the same
as that of :attr:`.Table.name` for a table with no
:attr:`.Table.schema` set; otherwise it is typically of the form
``schemaname.tablename``.
"""
return _get_table_key(self.name, self.schema)
def __repr__(self):
return "Table(%s)" % ', '.join(
[repr(self.name)] + [repr(self.metadata)] +
[repr(x) for x in self.columns] +
["%s=%s" % (k, repr(getattr(self, k))) for k in ['schema']])
def __str__(self):
return _get_table_key(self.description, self.schema)
@property
def bind(self):
"""Return the connectable associated with this Table."""
return self.metadata and self.metadata.bind or None
def add_is_dependent_on(self, table):
"""Add a 'dependency' for this Table.
This is another Table object which must be created
first before this one can, or dropped after this one.
Usually, dependencies between tables are determined via
ForeignKey objects. However, for other situations that
create dependencies outside of foreign keys (rules, inheriting),
this method can manually establish such a link.
"""
self._extra_dependencies.add(table)
def append_column(self, column):
"""Append a :class:`~.schema.Column` to this :class:`~.schema.Table`.
The "key" of the newly added :class:`~.schema.Column`, i.e. the
value of its ``.key`` attribute, will then be available
in the ``.c`` collection of this :class:`~.schema.Table`, and the
column definition will be included in any CREATE TABLE, SELECT,
UPDATE, etc. statements generated from this :class:`~.schema.Table`
construct.
Note that this does **not** change the definition of the table
as it exists within any underlying database, assuming that
table has already been created in the database. Relational
databases support the addition of columns to existing tables
using the SQL ALTER command, which would need to be
emitted for an already-existing table that doesn't contain
the newly added column.
"""
column._set_parent_with_dispatch(self)
def append_constraint(self, constraint):
"""Append a :class:`~.schema.Constraint` to this
:class:`~.schema.Table`.
This has the effect of the constraint being included in any
future CREATE TABLE statement, assuming specific DDL creation
events have not been associated with the given
:class:`~.schema.Constraint` object.
Note that this does **not** produce the constraint within the
relational database automatically, for a table that already exists
in the database. To add a constraint to an
existing relational database table, the SQL ALTER command must
be used. SQLAlchemy also provides the
:class:`.AddConstraint` construct which can produce this SQL when
invoked as an executable clause.
"""
constraint._set_parent_with_dispatch(self)
def append_ddl_listener(self, event_name, listener):
"""Append a DDL event listener to this ``Table``.
.. deprecated:: 0.7
See :class:`.DDLEvents`.
"""
def adapt_listener(target, connection, **kw):
listener(event_name, target, connection)
event.listen(self, "" + event_name.replace('-', '_'), adapt_listener)
def _set_parent(self, metadata):
metadata._add_table(self.name, self.schema, self)
self.metadata = metadata
def get_children(self, column_collections=True,
schema_visitor=False, **kw):
if not schema_visitor:
return TableClause.get_children(
self, column_collections=column_collections, **kw)
else:
if column_collections:
return list(self.columns)
else:
return []
def exists(self, bind=None):
"""Return True if this table exists."""
if bind is None:
bind = _bind_or_error(self)
return bind.run_callable(bind.dialect.has_table,
self.name, schema=self.schema)
def create(self, bind=None, checkfirst=False):
"""Issue a ``CREATE`` statement for this
:class:`.Table`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`.MetaData.create_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator,
self,
checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue a ``DROP`` statement for this
:class:`.Table`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`.MetaData.drop_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper,
self,
checkfirst=checkfirst)
def tometadata(self, metadata, schema=RETAIN_SCHEMA,
referred_schema_fn=None, name=None):
"""Return a copy of this :class:`.Table` associated with a different
:class:`.MetaData`.
E.g.::
m1 = MetaData()
user = Table('user', m1, Column('id', Integer, priamry_key=True))
m2 = MetaData()
user_copy = user.tometadata(m2)
:param metadata: Target :class:`.MetaData` object, into which the
new :class:`.Table` object will be created.
:param schema: optional string name indicating the target schema.
Defaults to the special symbol :attr:`.RETAIN_SCHEMA` which indicates
that no change to the schema name should be made in the new
:class:`.Table`. If set to a string name, the new :class:`.Table`
will have this new name as the ``.schema``. If set to ``None``, the
schema will be set to that of the schema set on the target
:class:`.MetaData`, which is typically ``None`` as well, unless
set explicitly::
m2 = MetaData(schema='newschema')
# user_copy_one will have "newschema" as the schema name
user_copy_one = user.tometadata(m2, schema=None)
m3 = MetaData() # schema defaults to None
# user_copy_two will have None as the schema name
user_copy_two = user.tometadata(m3, schema=None)
:param referred_schema_fn: optional callable which can be supplied
in order to provide for the schema name that should be assigned
to the referenced table of a :class:`.ForeignKeyConstraint`.
The callable accepts this parent :class:`.Table`, the
target schema that we are changing to, the
:class:`.ForeignKeyConstraint` object, and the existing
"target schema" of that constraint. The function should return the
string schema name that should be applied.
E.g.::
def referred_schema_fn(table, to_schema,
constraint, referred_schema):
if referred_schema == 'base_tables':
return referred_schema
else:
return to_schema
new_table = table.tometadata(m2, schema="alt_schema",
referred_schema_fn=referred_schema_fn)
.. versionadded:: 0.9.2
:param name: optional string name indicating the target table name.
If not specified or None, the table name is retained. This allows
a :class:`.Table` to be copied to the same :class:`.MetaData` target
with a new name.
.. versionadded:: 1.0.0
"""
if name is None:
name = self.name
if schema is RETAIN_SCHEMA:
schema = self.schema
elif schema is None:
schema = metadata.schema
key = _get_table_key(name, schema)
if key in metadata.tables:
util.warn("Table '%s' already exists within the given "
"MetaData - not copying." % self.description)
return metadata.tables[key]
args = []
for c in self.columns:
args.append(c.copy(schema=schema))
table = Table(
name, metadata, schema=schema,
*args, **self.kwargs
)
for c in self.constraints:
if isinstance(c, ForeignKeyConstraint):
referred_schema = c._referred_schema
if referred_schema_fn:
fk_constraint_schema = referred_schema_fn(
self, schema, c, referred_schema)
else:
fk_constraint_schema = (
schema if referred_schema == self.schema else None)
table.append_constraint(
c.copy(schema=fk_constraint_schema, target_table=table))
elif not c._type_bound:
# skip unique constraints that would be generated
# by the 'unique' flag on Column
if isinstance(c, UniqueConstraint) and \
len(c.columns) == 1 and \
list(c.columns)[0].unique:
continue
table.append_constraint(
c.copy(schema=schema, target_table=table))
for index in self.indexes:
# skip indexes that would be generated
# by the 'index' flag on Column
if len(index.columns) == 1 and \
list(index.columns)[0].index:
continue
Index(index.name,
unique=index.unique,
*[table.c[col] for col in index.columns.keys()],
**index.kwargs)
return self._schema_item_copy(table)
class Column(SchemaItem, ColumnClause):
"""Represents a column in a database table."""
__visit_name__ = 'column'
def __init__(self, *args, **kwargs):
r"""
Construct a new ``Column`` object.
:param name: The name of this column as represented in the database.
This argument may be the first positional argument, or specified
via keyword.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
The name field may be omitted at construction time and applied
later, at any time before the Column is associated with a
:class:`.Table`. This is to support convenient
usage within the :mod:`~sqlalchemy.ext.declarative` extension.
:param type\_: The column's type, indicated using an instance which
subclasses :class:`~sqlalchemy.types.TypeEngine`. If no arguments
are required for the type, the class of the type can be sent
as well, e.g.::
# use a type with arguments
Column('data', String(50))
# use no arguments
Column('level', Integer)
The ``type`` argument may be the second positional argument
or specified by keyword.
If the ``type`` is ``None`` or is omitted, it will first default to
the special type :class:`.NullType`. If and when this
:class:`.Column` is made to refer to another column using
:class:`.ForeignKey` and/or :class:`.ForeignKeyConstraint`, the type
of the remote-referenced column will be copied to this column as
well, at the moment that the foreign key is resolved against that
remote :class:`.Column` object.
.. versionchanged:: 0.9.0
Support for propagation of type to a :class:`.Column` from its
:class:`.ForeignKey` object has been improved and should be
more reliable and timely.
:param \*args: Additional positional arguments include various
:class:`.SchemaItem` derived constructs which will be applied
as options to the column. These include instances of
:class:`.Constraint`, :class:`.ForeignKey`, :class:`.ColumnDefault`,
and :class:`.Sequence`. In some cases an equivalent keyword
argument is available such as ``server_default``, ``default``
and ``unique``.
:param autoincrement: Set up "auto increment" semantics for an integer
primary key column. The default value is the string ``"auto"``
which indicates that a single-column primary key that is of
an INTEGER type with no stated client-side or python-side defaults
should receive auto increment semantics automatically;
all other varieties of primary key columns will not. This
includes that :term:`DDL` such as PostgreSQL SERIAL or MySQL
AUTO_INCREMENT will be emitted for this column during a table
create, as well as that the column is assumed to generate new
integer primary key values when an INSERT statement invokes which
will be retrieved by the dialect.
The flag may be set to ``True`` to indicate that a column which
is part of a composite (e.g. multi-column) primary key should
have autoincrement semantics, though note that only one column
within a primary key may have this setting. It can also
be set to ``True`` to indicate autoincrement semantics on a
column that has a client-side or server-side default configured,
however note that not all dialects can accommodate all styles
of default as an "autoincrement". It can also be
set to ``False`` on a single-column primary key that has a
datatype of INTEGER in order to disable auto increment semantics
for that column.
.. versionchanged:: 1.1 The autoincrement flag now defaults to
``"auto"`` which indicates autoincrement semantics by default
for single-column integer primary keys only; for composite
(multi-column) primary keys, autoincrement is never implicitly
enabled; as always, ``autoincrement=True`` will allow for
at most one of those columns to be an "autoincrement" column.
``autoincrement=True`` may also be set on a :class:`.Column`
that has an explicit client-side or server-side default,
subject to limitations of the backend database and dialect.
The setting *only* has an effect for columns which are:
* Integer derived (i.e. INT, SMALLINT, BIGINT).
* Part of the primary key
* Not referring to another column via :class:`.ForeignKey`, unless
the value is specified as ``'ignore_fk'``::
# turn on autoincrement for this column despite
# the ForeignKey()
Column('id', ForeignKey('other.id'),
primary_key=True, autoincrement='ignore_fk')
It is typically not desirable to have "autoincrement" enabled
on a column that refers to another via foreign key, as such a column
is required to refer to a value that originates from elsewhere.
The setting has these two effects on columns that meet the
above criteria:
* DDL issued for the column will include database-specific
keywords intended to signify this column as an
"autoincrement" column, such as AUTO INCREMENT on MySQL,
SERIAL on PostgreSQL, and IDENTITY on MS-SQL. It does
*not* issue AUTOINCREMENT for SQLite since this is a
special SQLite flag that is not required for autoincrementing
behavior.
.. seealso::
:ref:`sqlite_autoincrement`
* The column will be considered to be available using an
"autoincrement" method specific to the backend database, such
as calling upon ``cursor.lastrowid``, using RETURNING in an
INSERT statement to get at a sequence-generated value, or using
special functions such as "SELECT scope_identity()".
These methods are highly specific to the DBAPIs and databases in
use and vary greatly, so care should be taken when associating
``autoincrement=True`` with a custom default generation function.
:param default: A scalar, Python callable, or
:class:`.ColumnElement` expression representing the
*default value* for this column, which will be invoked upon insert
if this column is otherwise not specified in the VALUES clause of
the insert. This is a shortcut to using :class:`.ColumnDefault` as
a positional argument; see that class for full detail on the
structure of the argument.
Contrast this argument to :paramref:`.Column.server_default`
which creates a default generator on the database side.
.. seealso::
:ref:`metadata_defaults_toplevel`
:param doc: optional String that can be used by the ORM or similar
to document attributes. This attribute does not render SQL
comments (a future attribute 'comment' will achieve that).
:param key: An optional string identifier which will identify this
``Column`` object on the :class:`.Table`. When a key is provided,
this is the only identifier referencing the ``Column`` within the
application, including ORM attribute mapping; the ``name`` field
is used only when rendering SQL.
:param index: When ``True``, indicates that the column is indexed.
This is a shortcut for using a :class:`.Index` construct on the
table. To specify indexes with explicit names or indexes that
contain multiple columns, use the :class:`.Index` construct
instead.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param nullable: When set to ``False``, will cause the "NOT NULL"
phrase to be added when generating DDL for the column. When
``True``, will normally generate nothing (in SQL this defaults to
"NULL"), except in some very specific backend-specific edge cases
where "NULL" may render explicitly. Defaults to ``True`` unless
:paramref:`~.Column.primary_key` is also ``True``, in which case it
defaults to ``False``. This parameter is only used when issuing
CREATE TABLE statements.
:param onupdate: A scalar, Python callable, or
:class:`~sqlalchemy.sql.expression.ClauseElement` representing a
default value to be applied to the column within UPDATE
statements, which wil be invoked upon update if this column is not
present in the SET clause of the update. This is a shortcut to
using :class:`.ColumnDefault` as a positional argument with
``for_update=True``.
.. seealso::
:ref:`metadata_defaults` - complete discussion of onupdate
:param primary_key: If ``True``, marks this column as a primary key
column. Multiple columns can have this flag set to specify
composite primary keys. As an alternative, the primary key of a
:class:`.Table` can be specified via an explicit
:class:`.PrimaryKeyConstraint` object.
:param server_default: A :class:`.FetchedValue` instance, str, Unicode
or :func:`~sqlalchemy.sql.expression.text` construct representing
the DDL DEFAULT value for the column.
String types will be emitted as-is, surrounded by single quotes::
Column('x', Text, server_default="val")
x TEXT DEFAULT 'val'
A :func:`~sqlalchemy.sql.expression.text` expression will be
rendered as-is, without quotes::
Column('y', DateTime, server_default=text('NOW()'))
y DATETIME DEFAULT NOW()
Strings and text() will be converted into a
:class:`.DefaultClause` object upon initialization.
Use :class:`.FetchedValue` to indicate that an already-existing
column will generate a default value on the database side which
will be available to SQLAlchemy for post-fetch after inserts. This
construct does not specify any DDL and the implementation is left
to the database, such as via a trigger.
.. seealso::
:ref:`server_defaults` - complete discussion of server side
defaults
:param server_onupdate: A :class:`.FetchedValue` instance
representing a database-side default generation function,
such as a trigger. This
indicates to SQLAlchemy that a newly generated value will be
available after updates. This construct does not actually
implement any kind of generation function within the database,
which instead must be specified separately.
.. seealso::
:ref:`triggered_columns`
:param quote: Force quoting of this column's name on or off,
corresponding to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param unique: When ``True``, indicates that this column contains a
unique constraint, or if ``index`` is ``True`` as well, indicates
that the :class:`.Index` should be created with the unique flag.
To specify multiple columns in the constraint/index or to specify
an explicit name, use the :class:`.UniqueConstraint` or
:class:`.Index` constructs explicitly.
:param system: When ``True``, indicates this is a "system" column,
that is a column which is automatically made available by the
database, and should not be included in the columns list for a
``CREATE TABLE`` statement.
For more elaborate scenarios where columns should be
conditionally rendered differently on different backends,
consider custom compilation rules for :class:`.CreateColumn`.
.. versionadded:: 0.8.3 Added the ``system=True`` parameter to
:class:`.Column`.
"""
name = kwargs.pop('name', None)
type_ = kwargs.pop('type_', None)
args = list(args)
if args:
if isinstance(args[0], util.string_types):
if name is not None:
raise exc.ArgumentError(
"May not pass name positionally and as a keyword.")
name = args.pop(0)
if args:
coltype = args[0]
if hasattr(coltype, "_sqla_type"):
if type_ is not None:
raise exc.ArgumentError(
"May not pass type_ positionally and as a keyword.")
type_ = args.pop(0)
if name is not None:
name = quoted_name(name, kwargs.pop('quote', None))
elif "quote" in kwargs:
raise exc.ArgumentError("Explicit 'name' is required when "
"sending 'quote' argument")
super(Column, self).__init__(name, type_)
self.key = kwargs.pop('key', name)
self.primary_key = kwargs.pop('primary_key', False)
self.nullable = kwargs.pop('nullable', not self.primary_key)
self.default = kwargs.pop('default', None)
self.server_default = kwargs.pop('server_default', None)
self.server_onupdate = kwargs.pop('server_onupdate', None)
# these default to None because .index and .unique is *not*
# an informational flag about Column - there can still be an
# Index or UniqueConstraint referring to this Column.
self.index = kwargs.pop('index', None)
self.unique = kwargs.pop('unique', None)
self.system = kwargs.pop('system', False)
self.doc = kwargs.pop('doc', None)
self.onupdate = kwargs.pop('onupdate', None)
self.autoincrement = kwargs.pop('autoincrement', "auto")
self.constraints = set()
self.foreign_keys = set()
# check if this Column is proxying another column
if '_proxies' in kwargs:
self._proxies = kwargs.pop('_proxies')
# otherwise, add DDL-related events
elif isinstance(self.type, SchemaEventTarget):
self.type._set_parent_with_dispatch(self)
if self.default is not None:
if isinstance(self.default, (ColumnDefault, Sequence)):
args.append(self.default)
else:
if getattr(self.type, '_warn_on_bytestring', False):
if isinstance(self.default, util.binary_type):
util.warn(
"Unicode column '%s' has non-unicode "
"default value %r specified." % (
self.key,
self.default
))
args.append(ColumnDefault(self.default))
if self.server_default is not None:
if isinstance(self.server_default, FetchedValue):
args.append(self.server_default._as_for_update(False))
else:
args.append(DefaultClause(self.server_default))
if self.onupdate is not None:
if isinstance(self.onupdate, (ColumnDefault, Sequence)):
args.append(self.onupdate)
else:
args.append(ColumnDefault(self.onupdate, for_update=True))
if self.server_onupdate is not None:
if isinstance(self.server_onupdate, FetchedValue):
args.append(self.server_onupdate._as_for_update(True))
else:
args.append(DefaultClause(self.server_onupdate,
for_update=True))
self._init_items(*args)
util.set_creation_order(self)
if 'info' in kwargs:
self.info = kwargs.pop('info')
if kwargs:
raise exc.ArgumentError(
"Unknown arguments passed to Column: " + repr(list(kwargs)))
# @property
# def quote(self):
# return getattr(self.name, "quote", None)
def __str__(self):
if self.name is None:
return "(no name)"
elif self.table is not None:
if self.table.named_with_column:
return (self.table.description + "." + self.description)
else:
return self.description
else:
return self.description
def references(self, column):
"""Return True if this Column references the given column via foreign
key."""
for fk in self.foreign_keys:
if fk.column.proxy_set.intersection(column.proxy_set):
return True
else:
return False
def append_foreign_key(self, fk):
fk._set_parent_with_dispatch(self)
def __repr__(self):
kwarg = []
if self.key != self.name:
kwarg.append('key')
if self.primary_key:
kwarg.append('primary_key')
if not self.nullable:
kwarg.append('nullable')
if self.onupdate:
kwarg.append('onupdate')
if self.default:
kwarg.append('default')
if self.server_default:
kwarg.append('server_default')
return "Column(%s)" % ', '.join(
[repr(self.name)] + [repr(self.type)] +
[repr(x) for x in self.foreign_keys if x is not None] +
[repr(x) for x in self.constraints] +
[(self.table is not None and "table=<%s>" %
self.table.description or "table=None")] +
["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg])
def _set_parent(self, table):
if not self.name:
raise exc.ArgumentError(
"Column must be constructed with a non-blank name or "
"assign a non-blank .name before adding to a Table.")
if self.key is None:
self.key = self.name
existing = getattr(self, 'table', None)
if existing is not None and existing is not table:
raise exc.ArgumentError(
"Column object '%s' already assigned to Table '%s'" % (
self.key,
existing.description
))
if self.key in table._columns:
col = table._columns.get(self.key)
if col is not self:
for fk in col.foreign_keys:
table.foreign_keys.remove(fk)
if fk.constraint in table.constraints:
# this might have been removed
# already, if it's a composite constraint
# and more than one col being replaced
table.constraints.remove(fk.constraint)
table._columns.replace(self)
if self.primary_key:
table.primary_key._replace(self)
elif self.key in table.primary_key:
raise exc.ArgumentError(
"Trying to redefine primary-key column '%s' as a "
"non-primary-key column on table '%s'" % (
self.key, table.fullname))
self.table = table
if self.index:
if isinstance(self.index, util.string_types):
raise exc.ArgumentError(
"The 'index' keyword argument on Column is boolean only. "
"To create indexes with a specific name, create an "
"explicit Index object external to the Table.")
Index(None, self, unique=bool(self.unique))
elif self.unique:
if isinstance(self.unique, util.string_types):
raise exc.ArgumentError(
"The 'unique' keyword argument on Column is boolean "
"only. To create unique constraints or indexes with a "
"specific name, append an explicit UniqueConstraint to "
"the Table's list of elements, or create an explicit "
"Index object external to the Table.")
table.append_constraint(UniqueConstraint(self.key))
self._setup_on_memoized_fks(lambda fk: fk._set_remote_table(table))
def _setup_on_memoized_fks(self, fn):
fk_keys = [
((self.table.key, self.key), False),
((self.table.key, self.name), True),
]
for fk_key, link_to_name in fk_keys:
if fk_key in self.table.metadata._fk_memos:
for fk in self.table.metadata._fk_memos[fk_key]:
if fk.link_to_name is link_to_name:
fn(fk)
def _on_table_attach(self, fn):
if self.table is not None:
fn(self, self.table)
else:
event.listen(self, 'after_parent_attach', fn)
def copy(self, **kw):
"""Create a copy of this ``Column``, unitialized.
This is used in ``Table.tometadata``.
"""
# Constraint objects plus non-constraint-bound ForeignKey objects
args = \
[c.copy(**kw) for c in self.constraints if not c._type_bound] + \
[c.copy(**kw) for c in self.foreign_keys if not c.constraint]
type_ = self.type
if isinstance(type_, SchemaEventTarget):
type_ = type_.copy(**kw)
c = self._constructor(
name=self.name,
type_=type_,
key=self.key,
primary_key=self.primary_key,
nullable=self.nullable,
unique=self.unique,
system=self.system,
# quote=self.quote,
index=self.index,
autoincrement=self.autoincrement,
default=self.default,
server_default=self.server_default,
onupdate=self.onupdate,
server_onupdate=self.server_onupdate,
doc=self.doc,
*args
)
return self._schema_item_copy(c)
def _make_proxy(self, selectable, name=None, key=None,
name_is_truncatable=False, **kw):
"""Create a *proxy* for this column.
This is a copy of this ``Column`` referenced by a different parent
(such as an alias or select statement). The column should
be used only in select scenarios, as its full DDL/default
information is not transferred.
"""
fk = [ForeignKey(f.column, _constraint=f.constraint)
for f in self.foreign_keys]
if name is None and self.name is None:
raise exc.InvalidRequestError(
"Cannot initialize a sub-selectable"
" with this Column object until its 'name' has "
"been assigned.")
try:
c = self._constructor(
_as_truncated(name or self.name) if
name_is_truncatable else (name or self.name),
self.type,
key=key if key else name if name else self.key,
primary_key=self.primary_key,
nullable=self.nullable,
_proxies=[self], *fk)
except TypeError:
util.raise_from_cause(
TypeError(
"Could not create a copy of this %r object. "
"Ensure the class includes a _constructor() "
"attribute or method which accepts the "
"standard Column constructor arguments, or "
"references the Column class itself." % self.__class__)
)
c.table = selectable
selectable._columns.add(c)
if selectable._is_clone_of is not None:
c._is_clone_of = selectable._is_clone_of.columns[c.key]
if self.primary_key:
selectable.primary_key.add(c)
c.dispatch.after_parent_attach(c, selectable)
return c
def get_children(self, schema_visitor=False, **kwargs):
if schema_visitor:
return [x for x in (self.default, self.onupdate)
if x is not None] + \
list(self.foreign_keys) + list(self.constraints)
else:
return ColumnClause.get_children(self, **kwargs)
class ForeignKey(DialectKWArgs, SchemaItem):
"""Defines a dependency between two columns.
``ForeignKey`` is specified as an argument to a :class:`.Column` object,
e.g.::
t = Table("remote_table", metadata,
Column("remote_id", ForeignKey("main_table.id"))
)
Note that ``ForeignKey`` is only a marker object that defines
a dependency between two columns. The actual constraint
is in all cases represented by the :class:`.ForeignKeyConstraint`
object. This object will be generated automatically when
a ``ForeignKey`` is associated with a :class:`.Column` which
in turn is associated with a :class:`.Table`. Conversely,
when :class:`.ForeignKeyConstraint` is applied to a :class:`.Table`,
``ForeignKey`` markers are automatically generated to be
present on each associated :class:`.Column`, which are also
associated with the constraint object.
Note that you cannot define a "composite" foreign key constraint,
that is a constraint between a grouping of multiple parent/child
columns, using ``ForeignKey`` objects. To define this grouping,
the :class:`.ForeignKeyConstraint` object must be used, and applied
to the :class:`.Table`. The associated ``ForeignKey`` objects
are created automatically.
The ``ForeignKey`` objects associated with an individual
:class:`.Column` object are available in the `foreign_keys` collection
of that column.
Further examples of foreign key configuration are in
:ref:`metadata_foreignkeys`.
"""
__visit_name__ = 'foreign_key'
def __init__(self, column, _constraint=None, use_alter=False, name=None,
onupdate=None, ondelete=None, deferrable=None,
initially=None, link_to_name=False, match=None,
info=None,
**dialect_kw):
r"""
Construct a column-level FOREIGN KEY.
The :class:`.ForeignKey` object when constructed generates a
:class:`.ForeignKeyConstraint` which is associated with the parent
:class:`.Table` object's collection of constraints.
:param column: A single target column for the key relationship. A
:class:`.Column` object or a column name as a string:
``tablename.columnkey`` or ``schema.tablename.columnkey``.
``columnkey`` is the ``key`` which has been assigned to the column
(defaults to the column name itself), unless ``link_to_name`` is
``True`` in which case the rendered name of the column is used.
.. versionadded:: 0.7.4
Note that if the schema name is not included, and the
underlying :class:`.MetaData` has a "schema", that value will
be used.
:param name: Optional string. An in-database name for the key if
`constraint` is not provided.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally
assigned ``key``.
:param use_alter: passed to the underlying
:class:`.ForeignKeyConstraint` to indicate the constraint should
be generated/dropped externally from the CREATE TABLE/ DROP TABLE
statement. See :paramref:`.ForeignKeyConstraint.use_alter`
for further description.
.. seealso::
:paramref:`.ForeignKeyConstraint.use_alter`
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. The
arguments are ultimately handled by a corresponding
:class:`.ForeignKeyConstraint`. See the documentation regarding
an individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
.. versionadded:: 0.9.2
"""
self._colspec = column
if isinstance(self._colspec, util.string_types):
self._table_column = None
else:
if hasattr(self._colspec, '__clause_element__'):
self._table_column = self._colspec.__clause_element__()
else:
self._table_column = self._colspec
if not isinstance(self._table_column, ColumnClause):
raise exc.ArgumentError(
"String, Column, or Column-bound argument "
"expected, got %r" % self._table_column)
elif not isinstance(
self._table_column.table, (util.NoneType, TableClause)):
raise exc.ArgumentError(
"ForeignKey received Column not bound "
"to a Table, got: %r" % self._table_column.table
)
# the linked ForeignKeyConstraint.
# ForeignKey will create this when parent Column
# is attached to a Table, *or* ForeignKeyConstraint
# object passes itself in when creating ForeignKey
# markers.
self.constraint = _constraint
self.parent = None
self.use_alter = use_alter
self.name = name
self.onupdate = onupdate
self.ondelete = ondelete
self.deferrable = deferrable
self.initially = initially
self.link_to_name = link_to_name
self.match = match
if info:
self.info = info
self._unvalidated_dialect_kw = dialect_kw
def __repr__(self):
return "ForeignKey(%r)" % self._get_colspec()
def copy(self, schema=None):
"""Produce a copy of this :class:`.ForeignKey` object.
The new :class:`.ForeignKey` will not be bound
to any :class:`.Column`.
This method is usually used by the internal
copy procedures of :class:`.Column`, :class:`.Table`,
and :class:`.MetaData`.
:param schema: The returned :class:`.ForeignKey` will
reference the original table and column name, qualified
by the given string schema name.
"""
fk = ForeignKey(
self._get_colspec(schema=schema),
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match,
**self._unvalidated_dialect_kw
)
return self._schema_item_copy(fk)
def _get_colspec(self, schema=None, table_name=None):
"""Return a string based 'column specification' for this
:class:`.ForeignKey`.
This is usually the equivalent of the string-based "tablename.colname"
argument first passed to the object's constructor.
"""
if schema:
_schema, tname, colname = self._column_tokens
if table_name is not None:
tname = table_name
return "%s.%s.%s" % (schema, tname, colname)
elif table_name:
schema, tname, colname = self._column_tokens
if schema:
return "%s.%s.%s" % (schema, table_name, colname)
else:
return "%s.%s" % (table_name, colname)
elif self._table_column is not None:
return "%s.%s" % (
self._table_column.table.fullname, self._table_column.key)
else:
return self._colspec
@property
def _referred_schema(self):
return self._column_tokens[0]
def _table_key(self):
if self._table_column is not None:
if self._table_column.table is None:
return None
else:
return self._table_column.table.key
else:
schema, tname, colname = self._column_tokens
return _get_table_key(tname, schema)
target_fullname = property(_get_colspec)
def references(self, table):
"""Return True if the given :class:`.Table` is referenced by this
:class:`.ForeignKey`."""
return table.corresponding_column(self.column) is not None
def get_referent(self, table):
"""Return the :class:`.Column` in the given :class:`.Table`
referenced by this :class:`.ForeignKey`.
Returns None if this :class:`.ForeignKey` does not reference the given
:class:`.Table`.
"""
return table.corresponding_column(self.column)
@util.memoized_property
def _column_tokens(self):
"""parse a string-based _colspec into its component parts."""
m = self._get_colspec().split('.')
if m is None:
raise exc.ArgumentError(
"Invalid foreign key column specification: %s" %
self._colspec)
if (len(m) == 1):
tname = m.pop()
colname = None
else:
colname = m.pop()
tname = m.pop()
# A FK between column 'bar' and table 'foo' can be
# specified as 'foo', 'foo.bar', 'dbo.foo.bar',
# 'otherdb.dbo.foo.bar'. Once we have the column name and
# the table name, treat everything else as the schema
# name. Some databases (e.g. Sybase) support
# inter-database foreign keys. See tickets#1341 and --
# indirectly related -- Ticket #594. This assumes that '.'
# will never appear *within* any component of the FK.
if (len(m) > 0):
schema = '.'.join(m)
else:
schema = None
return schema, tname, colname
def _resolve_col_tokens(self):
if self.parent is None:
raise exc.InvalidRequestError(
"this ForeignKey object does not yet have a "
"parent Column associated with it.")
elif self.parent.table is None:
raise exc.InvalidRequestError(
"this ForeignKey's parent column is not yet associated "
"with a Table.")
parenttable = self.parent.table
# assertion, can be commented out.
# basically Column._make_proxy() sends the actual
# target Column to the ForeignKey object, so the
# string resolution here is never called.
for c in self.parent.base_columns:
if isinstance(c, Column):
assert c.table is parenttable
break
else:
assert False
######################
schema, tname, colname = self._column_tokens
if schema is None and parenttable.metadata.schema is not None:
schema = parenttable.metadata.schema
tablekey = _get_table_key(tname, schema)
return parenttable, tablekey, colname
def _link_to_col_by_colstring(self, parenttable, table, colname):
if not hasattr(self.constraint, '_referred_table'):
self.constraint._referred_table = table
else:
assert self.constraint._referred_table is table
_column = None
if colname is None:
# colname is None in the case that ForeignKey argument
# was specified as table name only, in which case we
# match the column name to the same column on the
# parent.
key = self.parent
_column = table.c.get(self.parent.key, None)
elif self.link_to_name:
key = colname
for c in table.c:
if c.name == colname:
_column = c
else:
key = colname
_column = table.c.get(colname, None)
if _column is None:
raise exc.NoReferencedColumnError(
"Could not initialize target column "
"for ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'" %
(self._colspec, parenttable.name, table.name, key),
table.name, key)
self._set_target_column(_column)
def _set_target_column(self, column):
# propagate TypeEngine to parent if it didn't have one
if self.parent.type._isnull:
self.parent.type = column.type
# super-edgy case, if other FKs point to our column,
# they'd get the type propagated out also.
if isinstance(self.parent.table, Table):
def set_type(fk):
if fk.parent.type._isnull:
fk.parent.type = column.type
self.parent._setup_on_memoized_fks(set_type)
self.column = column
@util.memoized_property
def column(self):
"""Return the target :class:`.Column` referenced by this
:class:`.ForeignKey`.
If no target column has been established, an exception
is raised.
.. versionchanged:: 0.9.0
Foreign key target column resolution now occurs as soon as both
the ForeignKey object and the remote Column to which it refers
are both associated with the same MetaData object.
"""
if isinstance(self._colspec, util.string_types):
parenttable, tablekey, colname = self._resolve_col_tokens()
if tablekey not in parenttable.metadata:
raise exc.NoReferencedTableError(
"Foreign key associated with column '%s' could not find "
"table '%s' with which to generate a "
"foreign key to target column '%s'" %
(self.parent, tablekey, colname),
tablekey)
elif parenttable.key not in parenttable.metadata:
raise exc.InvalidRequestError(
"Table %s is no longer associated with its "
"parent MetaData" % parenttable)
else:
raise exc.NoReferencedColumnError(
"Could not initialize target column for "
"ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'" % (
self._colspec, parenttable.name, tablekey, colname),
tablekey, colname)
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
return _column
else:
_column = self._colspec
return _column
def _set_parent(self, column):
if self.parent is not None and self.parent is not column:
raise exc.InvalidRequestError(
"This ForeignKey already has a parent !")
self.parent = column
self.parent.foreign_keys.add(self)
self.parent._on_table_attach(self._set_table)
def _set_remote_table(self, table):
parenttable, tablekey, colname = self._resolve_col_tokens()
self._link_to_col_by_colstring(parenttable, table, colname)
self.constraint._validate_dest_table(table)
def _remove_from_metadata(self, metadata):
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if self in metadata._fk_memos[fk_key]:
# TODO: no test coverage for self not in memos
metadata._fk_memos[fk_key].remove(self)
def _set_table(self, column, table):
# standalone ForeignKey - create ForeignKeyConstraint
# on the hosting Table when attached to the Table.
if self.constraint is None and isinstance(table, Table):
self.constraint = ForeignKeyConstraint(
[], [], use_alter=self.use_alter, name=self.name,
onupdate=self.onupdate, ondelete=self.ondelete,
deferrable=self.deferrable, initially=self.initially,
match=self.match,
**self._unvalidated_dialect_kw
)
self.constraint._append_element(column, self)
self.constraint._set_parent_with_dispatch(table)
table.foreign_keys.add(self)
# set up remote ".column" attribute, or a note to pick it
# up when the other Table/Column shows up
if isinstance(self._colspec, util.string_types):
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if table_key in parenttable.metadata.tables:
table = parenttable.metadata.tables[table_key]
try:
self._link_to_col_by_colstring(
parenttable, table, colname)
except exc.NoReferencedColumnError:
# this is OK, we'll try later
pass
parenttable.metadata._fk_memos[fk_key].append(self)
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
self._set_target_column(_column)
else:
_column = self._colspec
self._set_target_column(_column)
class _NotAColumnExpr(object):
def _not_a_column_expr(self):
raise exc.InvalidRequestError(
"This %s cannot be used directly "
"as a column expression." % self.__class__.__name__)
__clause_element__ = self_group = lambda self: self._not_a_column_expr()
_from_objects = property(lambda self: self._not_a_column_expr())
class DefaultGenerator(_NotAColumnExpr, SchemaItem):
"""Base class for column *default* values."""
__visit_name__ = 'default_generator'
is_sequence = False
is_server_default = False
column = None
def __init__(self, for_update=False):
self.for_update = for_update
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.onupdate = self
else:
self.column.default = self
def execute(self, bind=None, **kwargs):
if bind is None:
bind = _bind_or_error(self)
return bind._execute_default(self, **kwargs)
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_default(self, multiparams, params)
@property
def bind(self):
"""Return the connectable associated with this default."""
if getattr(self, 'column', None) is not None:
return self.column.table.bind
else:
return None
class ColumnDefault(DefaultGenerator):
"""A plain default value on a column.
This could correspond to a constant, a callable function,
or a SQL clause.
:class:`.ColumnDefault` is generated automatically
whenever the ``default``, ``onupdate`` arguments of
:class:`.Column` are used. A :class:`.ColumnDefault`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, default=50)
Is equivalent to::
Column('foo', Integer, ColumnDefault(50))
"""
def __init__(self, arg, **kwargs):
""""Construct a new :class:`.ColumnDefault`.
:param arg: argument representing the default value.
May be one of the following:
* a plain non-callable Python value, such as a
string, integer, boolean, or other simple type.
The default value will be used as is each time.
* a SQL expression, that is one which derives from
:class:`.ColumnElement`. The SQL expression will
be rendered into the INSERT or UPDATE statement,
or in the case of a primary key column when
RETURNING is not used may be
pre-executed before an INSERT within a SELECT.
* A Python callable. The function will be invoked for each
new row subject to an INSERT or UPDATE.
The callable must accept exactly
zero or one positional arguments. The one-argument form
will receive an instance of the :class:`.ExecutionContext`,
which provides contextual information as to the current
:class:`.Connection` in use as well as the current
statement and parameters.
"""
super(ColumnDefault, self).__init__(**kwargs)
if isinstance(arg, FetchedValue):
raise exc.ArgumentError(
"ColumnDefault may not be a server-side default type.")
if util.callable(arg):
arg = self._maybe_wrap_callable(arg)
self.arg = arg
@util.memoized_property
def is_callable(self):
return util.callable(self.arg)
@util.memoized_property
def is_clause_element(self):
return isinstance(self.arg, ClauseElement)
@util.memoized_property
def is_scalar(self):
return not self.is_callable and \
not self.is_clause_element and \
not self.is_sequence
def _maybe_wrap_callable(self, fn):
"""Wrap callables that don't accept a context.
This is to allow easy compatibility with default callables
that aren't specific to accepting of a context.
"""
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
return util.wrap_callable(lambda ctx: fn(), fn)
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
if positionals == 0:
return util.wrap_callable(lambda ctx: fn(), fn)
elif positionals == 1:
return fn
else:
raise exc.ArgumentError(
"ColumnDefault Python function takes zero or one "
"positional arguments")
def _visit_name(self):
if self.for_update:
return "column_onupdate"
else:
return "column_default"
__visit_name__ = property(_visit_name)
def __repr__(self):
return "ColumnDefault(%r)" % self.arg
class Sequence(DefaultGenerator):
"""Represents a named database sequence.
The :class:`.Sequence` object represents the name and configurational
parameters of a database sequence. It also represents
a construct that can be "executed" by a SQLAlchemy :class:`.Engine`
or :class:`.Connection`, rendering the appropriate "next value" function
for the target database and returning a result.
The :class:`.Sequence` is typically associated with a primary key column::
some_table = Table(
'some_table', metadata,
Column('id', Integer, Sequence('some_table_seq'),
primary_key=True)
)
When CREATE TABLE is emitted for the above :class:`.Table`, if the
target platform supports sequences, a CREATE SEQUENCE statement will
be emitted as well. For platforms that don't support sequences,
the :class:`.Sequence` construct is ignored.
.. seealso::
:class:`.CreateSequence`
:class:`.DropSequence`
"""
__visit_name__ = 'sequence'
is_sequence = True
def __init__(self, name, start=None, increment=None, minvalue=None,
maxvalue=None, nominvalue=None, nomaxvalue=None, cycle=None,
schema=None, cache=None, order=None, optional=False,
quote=None, metadata=None, quote_schema=None,
for_update=False):
"""Construct a :class:`.Sequence` object.
:param name: The name of the sequence.
:param start: the starting index of the sequence. This value is
used when the CREATE SEQUENCE command is emitted to the database
as the value of the "START WITH" clause. If ``None``, the
clause is omitted, which on most platforms indicates a starting
value of 1.
:param increment: the increment value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "INCREMENT BY" clause. If ``None``,
the clause is omitted, which on most platforms indicates an
increment of 1.
:param minvalue: the minimum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "MINVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
minvalue of 1 and -2^63-1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param maxvalue: the maximum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "MAXVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
maxvalue of 2^63-1 and -1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param nominvalue: no minimum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "NO MINVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
minvalue of 1 and -2^63-1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param nomaxvalue: no maximum value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "NO MAXVALUE" clause. If ``None``,
the clause is omitted, which on most platforms indicates a
maxvalue of 2^63-1 and -1 for ascending and descending sequences,
respectively.
.. versionadded:: 1.0.7
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached by an ascending or descending sequence
respectively. This value is used when the CREATE SEQUENCE command
is emitted to the database as the "CYCLE" clause. If the limit is
reached, the next number generated will be the minvalue or maxvalue,
respectively. If cycle=False (the default) any calls to nextval
after the sequence has reached its maximum value will return an
error.
.. versionadded:: 1.0.7
:param schema: Optional schema name for the sequence, if located
in a schema other than the default. The rules for selecting the
schema name when a :class:`.MetaData` is also present are the same
as that of :paramref:`.Table.schema`.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance. Renders the CACHE keyword
understood by Oracle and PostgreSQL.
.. versionadded:: 1.1.12
:param order: optional boolean value; if true, renders the
ORDER keyword, understood by Oracle, indicating the sequence is
definitively ordered. May be necessary to provide deterministic
ordering using Oracle RAC.
.. versionadded:: 1.1.12
:param optional: boolean value, when ``True``, indicates that this
:class:`.Sequence` object only needs to be explicitly generated
on backends that don't provide another way to generate primary
key identifiers. Currently, it essentially means, "don't create
this sequence on the PostgreSQL backend, where the SERIAL keyword
creates a sequence for us automatically".
:param quote: boolean value, when ``True`` or ``False``, explicitly
forces quoting of the schema name on or off. When left at its
default of ``None``, normal quoting rules based on casing and
reserved words take place.
:param quote_schema: set the quoting preferences for the ``schema``
name.
:param metadata: optional :class:`.MetaData` object which this
:class:`.Sequence` will be associated with. A :class:`.Sequence`
that is associated with a :class:`.MetaData` gains the following
capabilities:
* The :class:`.Sequence` will inherit the :paramref:`.MetaData.schema`
parameter specified to the target :class:`.MetaData`, which
affects the production of CREATE / DROP DDL, if any.
* The :meth:`.Sequence.create` and :meth:`.Sequence.drop` methods
automatically use the engine bound to the :class:`.MetaData`
object, if any.
* The :meth:`.MetaData.create_all` and :meth:`.MetaData.drop_all`
methods will emit CREATE / DROP for this :class:`.Sequence`,
even if the :class:`.Sequence` is not associated with any
:class:`.Table` / :class:`.Column` that's a member of this
:class:`.MetaData`.
The above behaviors can only occur if the :class:`.Sequence` is
explicitly associated with the :class:`.MetaData` via this parameter.
.. seealso::
:ref:`sequence_metadata` - full discussion of the
:paramref:`.Sequence.metadata` parameter.
:param for_update: Indicates this :class:`.Sequence`, when associated
with a :class:`.Column`, should be invoked for UPDATE statements
on that column's table, rather than for INSERT statements, when
no value is otherwise present for that column in the statement.
"""
super(Sequence, self).__init__(for_update=for_update)
self.name = quoted_name(name, quote)
self.start = start
self.increment = increment
self.minvalue = minvalue
self.maxvalue = maxvalue
self.nominvalue = nominvalue
self.nomaxvalue = nomaxvalue
self.cycle = cycle
self.cache = cache
self.order = order
self.optional = optional
if schema is BLANK_SCHEMA:
self.schema = schema = None
elif metadata is not None and schema is None and metadata.schema:
self.schema = schema = metadata.schema
else:
self.schema = quoted_name(schema, quote_schema)
self.metadata = metadata
self._key = _get_table_key(name, schema)
if metadata:
self._set_metadata(metadata)
@util.memoized_property
def is_callable(self):
return False
@util.memoized_property
def is_clause_element(self):
return False
@util.dependencies("sqlalchemy.sql.functions.func")
def next_value(self, func):
"""Return a :class:`.next_value` function element
which will render the appropriate increment function
for this :class:`.Sequence` within any SQL expression.
"""
return func.next_value(self, bind=self.bind)
def _set_parent(self, column):
super(Sequence, self)._set_parent(column)
column._on_table_attach(self._set_table)
def _set_table(self, column, table):
self._set_metadata(table.metadata)
def _set_metadata(self, metadata):
self.metadata = metadata
self.metadata._sequences[self._key] = self
@property
def bind(self):
if self.metadata:
return self.metadata.bind
else:
return None
def create(self, bind=None, checkfirst=True):
"""Creates this sequence in the database."""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator,
self,
checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=True):
"""Drops this sequence from the database."""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper,
self,
checkfirst=checkfirst)
def _not_a_column_expr(self):
raise exc.InvalidRequestError(
"This %s cannot be used directly "
"as a column expression. Use func.next_value(sequence) "
"to produce a 'next value' function that's usable "
"as a column element."
% self.__class__.__name__)
@inspection._self_inspects
class FetchedValue(_NotAColumnExpr, SchemaEventTarget):
"""A marker for a transparent database-side default.
Use :class:`.FetchedValue` when the database is configured
to provide some automatic default for a column.
E.g.::
Column('foo', Integer, FetchedValue())
Would indicate that some trigger or default generator
will create a new value for the ``foo`` column during an
INSERT.
.. seealso::
:ref:`triggered_columns`
"""
is_server_default = True
reflected = False
has_argument = False
def __init__(self, for_update=False):
self.for_update = for_update
def _as_for_update(self, for_update):
if for_update == self.for_update:
return self
else:
return self._clone(for_update)
def _clone(self, for_update):
n = self.__class__.__new__(self.__class__)
n.__dict__.update(self.__dict__)
n.__dict__.pop('column', None)
n.for_update = for_update
return n
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.server_onupdate = self
else:
self.column.server_default = self
def __repr__(self):
return util.generic_repr(self)
class DefaultClause(FetchedValue):
"""A DDL-specified DEFAULT column value.
:class:`.DefaultClause` is a :class:`.FetchedValue`
that also generates a "DEFAULT" clause when
"CREATE TABLE" is emitted.
:class:`.DefaultClause` is generated automatically
whenever the ``server_default``, ``server_onupdate`` arguments of
:class:`.Column` are used. A :class:`.DefaultClause`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, server_default="50")
Is equivalent to::
Column('foo', Integer, DefaultClause("50"))
"""
has_argument = True
def __init__(self, arg, for_update=False, _reflected=False):
util.assert_arg_type(arg, (util.string_types[0],
ClauseElement,
TextClause), 'arg')
super(DefaultClause, self).__init__(for_update)
self.arg = arg
self.reflected = _reflected
def __repr__(self):
return "DefaultClause(%r, for_update=%r)" % \
(self.arg, self.for_update)
class PassiveDefault(DefaultClause):
"""A DDL-specified DEFAULT column value.
.. deprecated:: 0.6
:class:`.PassiveDefault` is deprecated.
Use :class:`.DefaultClause`.
"""
@util.deprecated("0.6",
":class:`.PassiveDefault` is deprecated. "
"Use :class:`.DefaultClause`.",
False)
def __init__(self, *arg, **kw):
DefaultClause.__init__(self, *arg, **kw)
class Constraint(DialectKWArgs, SchemaItem):
"""A table-level SQL constraint."""
__visit_name__ = 'constraint'
def __init__(self, name=None, deferrable=None, initially=None,
_create_rule=None, info=None, _type_bound=False,
**dialect_kw):
r"""Create a SQL constraint.
:param name:
Optional, the in-database name of this ``Constraint``.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param _create_rule:
a callable which is passed the DDLCompiler object during
compilation. Returns True or False to signal inline generation of
this Constraint.
The AddConstraint and DropConstraint DDL constructs provide
DDLElement's more comprehensive "conditional DDL" approach that is
passed a database connection when DDL is being issued. _create_rule
is instead called during any CREATE TABLE compilation, where there
may not be any transaction/connection in progress. However, it
allows conditional compilation of the constraint even for backends
which do not support addition of constraints through ALTER TABLE,
which currently includes SQLite.
_create_rule is used by some types to create constraints.
Currently, its call signature is subject to change at any time.
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
"""
self.name = name
self.deferrable = deferrable
self.initially = initially
if info:
self.info = info
self._create_rule = _create_rule
self._type_bound = _type_bound
util.set_creation_order(self)
self._validate_dialect_kwargs(dialect_kw)
@property
def table(self):
try:
if isinstance(self.parent, Table):
return self.parent
except AttributeError:
pass
raise exc.InvalidRequestError(
"This constraint is not bound to a table. Did you "
"mean to call table.append_constraint(constraint) ?")
def _set_parent(self, parent):
self.parent = parent
parent.constraints.add(self)
def copy(self, **kw):
raise NotImplementedError()
def _to_schema_column(element):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, Column):
raise exc.ArgumentError("schema.Column object expected")
return element
def _to_schema_column_or_string(element):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, util.string_types + (ColumnElement, )):
msg = "Element %r is not a string name or column element"
raise exc.ArgumentError(msg % element)
return element
class ColumnCollectionMixin(object):
columns = None
"""A :class:`.ColumnCollection` of :class:`.Column` objects.
This collection represents the columns which are referred to by
this object.
"""
_allow_multiple_tables = False
def __init__(self, *columns, **kw):
_autoattach = kw.pop('_autoattach', True)
self.columns = ColumnCollection()
self._pending_colargs = [_to_schema_column_or_string(c)
for c in columns]
if _autoattach and self._pending_colargs:
self._check_attach()
@classmethod
def _extract_col_expression_collection(cls, expressions):
for expr in expressions:
strname = None
column = None
if hasattr(expr, '__clause_element__'):
expr = expr.__clause_element__()
if not isinstance(expr, (ColumnElement, TextClause)):
# this assumes a string
strname = expr
else:
cols = []
visitors.traverse(expr, {}, {'column': cols.append})
if cols:
column = cols[0]
add_element = column if column is not None else strname
yield expr, column, strname, add_element
def _check_attach(self, evt=False):
col_objs = [
c for c in self._pending_colargs
if isinstance(c, Column)
]
cols_w_table = [
c for c in col_objs if isinstance(c.table, Table)
]
cols_wo_table = set(col_objs).difference(cols_w_table)
if cols_wo_table:
# feature #3341 - place event listeners for Column objects
# such that when all those cols are attached, we autoattach.
assert not evt, "Should not reach here on event call"
# issue #3411 - don't do the per-column auto-attach if some of the
# columns are specified as strings.
has_string_cols = set(self._pending_colargs).difference(col_objs)
if not has_string_cols:
def _col_attached(column, table):
# this isinstance() corresponds with the
# isinstance() above; only want to count Table-bound
# columns
if isinstance(table, Table):
cols_wo_table.discard(column)
if not cols_wo_table:
self._check_attach(evt=True)
self._cols_wo_table = cols_wo_table
for col in cols_wo_table:
col._on_table_attach(_col_attached)
return
columns = cols_w_table
tables = set([c.table for c in columns])
if len(tables) == 1:
self._set_parent_with_dispatch(tables.pop())
elif len(tables) > 1 and not self._allow_multiple_tables:
table = columns[0].table
others = [c for c in columns[1:] if c.table is not table]
if others:
raise exc.ArgumentError(
"Column(s) %s are not part of table '%s'." %
(", ".join("'%s'" % c for c in others),
table.description)
)
def _set_parent(self, table):
for col in self._pending_colargs:
if isinstance(col, util.string_types):
col = table.c[col]
self.columns.add(col)
class ColumnCollectionConstraint(ColumnCollectionMixin, Constraint):
"""A constraint that proxies a ColumnCollection."""
def __init__(self, *columns, **kw):
r"""
:param \*columns:
A sequence of column names or Column objects.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param \**kw: other keyword arguments including dialect-specific
arguments are propagated to the :class:`.Constraint` superclass.
"""
_autoattach = kw.pop('_autoattach', True)
Constraint.__init__(self, **kw)
ColumnCollectionMixin.__init__(self, *columns, _autoattach=_autoattach)
columns = None
"""A :class:`.ColumnCollection` representing the set of columns
for this constraint.
"""
def _set_parent(self, table):
Constraint._set_parent(self, table)
ColumnCollectionMixin._set_parent(self, table)
def __contains__(self, x):
return x in self.columns
def copy(self, **kw):
c = self.__class__(name=self.name, deferrable=self.deferrable,
initially=self.initially, *self.columns.keys())
return self._schema_item_copy(c)
def contains_column(self, col):
"""Return True if this constraint contains the given column.
Note that this object also contains an attribute ``.columns``
which is a :class:`.ColumnCollection` of :class:`.Column` objects.
"""
return self.columns.contains_column(col)
def __iter__(self):
# inlining of
# return iter(self.columns)
# ColumnCollection->OrderedProperties->OrderedDict
ordered_dict = self.columns._data
return (ordered_dict[key] for key in ordered_dict._list)
def __len__(self):
return len(self.columns._data)
class CheckConstraint(ColumnCollectionConstraint):
"""A table- or column-level CHECK constraint.
Can be included in the definition of a Table or Column.
"""
_allow_multiple_tables = True
def __init__(self, sqltext, name=None, deferrable=None,
initially=None, table=None, info=None, _create_rule=None,
_autoattach=True, _type_bound=False):
r"""Construct a CHECK constraint.
:param sqltext:
A string containing the constraint definition, which will be used
verbatim, or a SQL expression construct. If given as a string,
the object is converted to a :class:`.Text` object. If the textual
string includes a colon character, escape this using a backslash::
CheckConstraint(r"foo ~ E'a(?\:b|c)d")
:param name:
Optional, the in-database name of the constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
"""
self.sqltext = _literal_as_text(sqltext, warn=False)
columns = []
visitors.traverse(self.sqltext, {}, {'column': columns.append})
super(CheckConstraint, self).\
__init__(
name=name, deferrable=deferrable,
initially=initially, _create_rule=_create_rule, info=info,
_type_bound=_type_bound, _autoattach=_autoattach,
*columns)
if table is not None:
self._set_parent_with_dispatch(table)
def __visit_name__(self):
if isinstance(self.parent, Table):
return "check_constraint"
else:
return "column_check_constraint"
__visit_name__ = property(__visit_name__)
def copy(self, target_table=None, **kw):
if target_table is not None:
def replace(col):
if self.table.c.contains_column(col):
return target_table.c[col.key]
else:
return None
sqltext = visitors.replacement_traverse(self.sqltext, {}, replace)
else:
sqltext = self.sqltext
c = CheckConstraint(sqltext,
name=self.name,
initially=self.initially,
deferrable=self.deferrable,
_create_rule=self._create_rule,
table=target_table,
_autoattach=False,
_type_bound=self._type_bound)
return self._schema_item_copy(c)
class ForeignKeyConstraint(ColumnCollectionConstraint):
"""A table-level FOREIGN KEY constraint.
Defines a single column or composite FOREIGN KEY ... REFERENCES
constraint. For a no-frills, single column foreign key, adding a
:class:`.ForeignKey` to the definition of a :class:`.Column` is a
shorthand equivalent for an unnamed, single column
:class:`.ForeignKeyConstraint`.
Examples of foreign key configuration are in :ref:`metadata_foreignkeys`.
"""
__visit_name__ = 'foreign_key_constraint'
def __init__(self, columns, refcolumns, name=None, onupdate=None,
ondelete=None, deferrable=None, initially=None,
use_alter=False, link_to_name=False, match=None,
table=None, info=None, **dialect_kw):
r"""Construct a composite-capable FOREIGN KEY.
:param columns: A sequence of local column names. The named columns
must be defined and present in the parent Table. The names should
match the ``key`` given to each column (defaults to the name) unless
``link_to_name`` is True.
:param refcolumns: A sequence of foreign column names or Column
objects. The columns must all be located within the same Table.
:param name: Optional, the in-database name of the key.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally assigned
``key``.
:param use_alter: If True, do not emit the DDL for this constraint as
part of the CREATE TABLE definition. Instead, generate it via an
ALTER TABLE statement issued after the full collection of tables
have been created, and drop it via an ALTER TABLE statement before
the full collection of tables are dropped.
The use of :paramref:`.ForeignKeyConstraint.use_alter` is
particularly geared towards the case where two or more tables
are established within a mutually-dependent foreign key constraint
relationship; however, the :meth:`.MetaData.create_all` and
:meth:`.MetaData.drop_all` methods will perform this resolution
automatically, so the flag is normally not needed.
.. versionchanged:: 1.0.0 Automatic resolution of foreign key
cycles has been added, removing the need to use the
:paramref:`.ForeignKeyConstraint.use_alter` in typical use
cases.
.. seealso::
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
.. versionadded:: 0.9.2
"""
Constraint.__init__(
self, name=name, deferrable=deferrable, initially=initially,
info=info, **dialect_kw)
self.onupdate = onupdate
self.ondelete = ondelete
self.link_to_name = link_to_name
self.use_alter = use_alter
self.match = match
if len(set(columns)) != len(refcolumns):
if len(set(columns)) != len(columns):
# e.g. FOREIGN KEY (a, a) REFERENCES r (b, c)
raise exc.ArgumentError(
"ForeignKeyConstraint with duplicate source column "
"references are not supported."
)
else:
# e.g. FOREIGN KEY (a) REFERENCES r (b, c)
# paraphrasing https://www.postgresql.org/docs/9.2/static/\
# ddl-constraints.html
raise exc.ArgumentError(
"ForeignKeyConstraint number "
"of constrained columns must match the number of "
"referenced columns.")
# standalone ForeignKeyConstraint - create
# associated ForeignKey objects which will be applied to hosted
# Column objects (in col.foreign_keys), either now or when attached
# to the Table for string-specified names
self.elements = [
ForeignKey(
refcol,
_constraint=self,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
link_to_name=self.link_to_name,
match=self.match,
deferrable=self.deferrable,
initially=self.initially,
**self.dialect_kwargs
) for refcol in refcolumns
]
ColumnCollectionMixin.__init__(self, *columns)
if table is not None:
if hasattr(self, "parent"):
assert table is self.parent
self._set_parent_with_dispatch(table)
def _append_element(self, column, fk):
self.columns.add(column)
self.elements.append(fk)
columns = None
"""A :class:`.ColumnCollection` representing the set of columns
for this constraint.
"""
elements = None
"""A sequence of :class:`.ForeignKey` objects.
Each :class:`.ForeignKey` represents a single referring column/referred
column pair.
This collection is intended to be read-only.
"""
@property
def _elements(self):
# legacy - provide a dictionary view of (column_key, fk)
return util.OrderedDict(
zip(self.column_keys, self.elements)
)
@property
def _referred_schema(self):
for elem in self.elements:
return elem._referred_schema
else:
return None
@property
def referred_table(self):
"""The :class:`.Table` object to which this
:class:`.ForeignKeyConstraint` references.
This is a dynamically calculated attribute which may not be available
if the constraint and/or parent table is not yet associated with
a metadata collection that contains the referred table.
.. versionadded:: 1.0.0
"""
return self.elements[0].column.table
def _validate_dest_table(self, table):
table_keys = set([elem._table_key()
for elem in self.elements])
if None not in table_keys and len(table_keys) > 1:
elem0, elem1 = sorted(table_keys)[0:2]
raise exc.ArgumentError(
'ForeignKeyConstraint on %s(%s) refers to '
'multiple remote tables: %s and %s' % (
table.fullname,
self._col_description,
elem0,
elem1
))
@property
def column_keys(self):
"""Return a list of string keys representing the local
columns in this :class:`.ForeignKeyConstraint`.
This list is either the original string arguments sent
to the constructor of the :class:`.ForeignKeyConstraint`,
or if the constraint has been initialized with :class:`.Column`
objects, is the string .key of each element.
.. versionadded:: 1.0.0
"""
if hasattr(self, "parent"):
return self.columns.keys()
else:
return [
col.key if isinstance(col, ColumnElement)
else str(col) for col in self._pending_colargs
]
@property
def _col_description(self):
return ", ".join(self.column_keys)
def _set_parent(self, table):
Constraint._set_parent(self, table)
try:
ColumnCollectionConstraint._set_parent(self, table)
except KeyError as ke:
raise exc.ArgumentError(
"Can't create ForeignKeyConstraint "
"on table '%s': no column "
"named '%s' is present." % (table.description, ke.args[0]))
for col, fk in zip(self.columns, self.elements):
if not hasattr(fk, 'parent') or \
fk.parent is not col:
fk._set_parent_with_dispatch(col)
self._validate_dest_table(table)
def copy(self, schema=None, target_table=None, **kw):
fkc = ForeignKeyConstraint(
[x.parent.key for x in self.elements],
[x._get_colspec(
schema=schema,
table_name=target_table.name
if target_table is not None
and x._table_key() == x.parent.table.key
else None)
for x in self.elements],
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match
)
for self_fk, other_fk in zip(
self.elements,
fkc.elements):
self_fk._schema_item_copy(other_fk)
return self._schema_item_copy(fkc)
class PrimaryKeyConstraint(ColumnCollectionConstraint):
"""A table-level PRIMARY KEY constraint.
The :class:`.PrimaryKeyConstraint` object is present automatically
on any :class:`.Table` object; it is assigned a set of
:class:`.Column` objects corresponding to those marked with
the :paramref:`.Column.primary_key` flag::
>>> my_table = Table('mytable', metadata,
... Column('id', Integer, primary_key=True),
... Column('version_id', Integer, primary_key=True),
... Column('data', String(50))
... )
>>> my_table.primary_key
PrimaryKeyConstraint(
Column('id', Integer(), table=<mytable>,
primary_key=True, nullable=False),
Column('version_id', Integer(), table=<mytable>,
primary_key=True, nullable=False)
)
The primary key of a :class:`.Table` can also be specified by using
a :class:`.PrimaryKeyConstraint` object explicitly; in this mode of usage,
the "name" of the constraint can also be specified, as well as other
options which may be recognized by dialects::
my_table = Table('mytable', metadata,
Column('id', Integer),
Column('version_id', Integer),
Column('data', String(50)),
PrimaryKeyConstraint('id', 'version_id',
name='mytable_pk')
)
The two styles of column-specification should generally not be mixed.
An warning is emitted if the columns present in the
:class:`.PrimaryKeyConstraint`
don't match the columns that were marked as ``primary_key=True``, if both
are present; in this case, the columns are taken strictly from the
:class:`.PrimaryKeyConstraint` declaration, and those columns otherwise
marked as ``primary_key=True`` are ignored. This behavior is intended to
be backwards compatible with previous behavior.
.. versionchanged:: 0.9.2 Using a mixture of columns within a
:class:`.PrimaryKeyConstraint` in addition to columns marked as
``primary_key=True`` now emits a warning if the lists don't match.
The ultimate behavior of ignoring those columns marked with the flag
only is currently maintained for backwards compatibility; this warning
may raise an exception in a future release.
For the use case where specific options are to be specified on the
:class:`.PrimaryKeyConstraint`, but the usual style of using
``primary_key=True`` flags is still desirable, an empty
:class:`.PrimaryKeyConstraint` may be specified, which will take on the
primary key column collection from the :class:`.Table` based on the
flags::
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('version_id', Integer, primary_key=True),
Column('data', String(50)),
PrimaryKeyConstraint(name='mytable_pk',
mssql_clustered=True)
)
.. versionadded:: 0.9.2 an empty :class:`.PrimaryKeyConstraint` may now
be specified for the purposes of establishing keyword arguments with
the constraint, independently of the specification of "primary key"
columns within the :class:`.Table` itself; columns marked as
``primary_key=True`` will be gathered into the empty constraint's
column collection.
"""
__visit_name__ = 'primary_key_constraint'
def __init__(self, *columns, **kw):
self._implicit_generated = kw.pop('_implicit_generated', False)
super(PrimaryKeyConstraint, self).__init__(*columns, **kw)
def _set_parent(self, table):
super(PrimaryKeyConstraint, self)._set_parent(table)
if table.primary_key is not self:
table.constraints.discard(table.primary_key)
table.primary_key = self
table.constraints.add(self)
table_pks = [c for c in table.c if c.primary_key]
if self.columns and table_pks and \
set(table_pks) != set(self.columns.values()):
util.warn(
"Table '%s' specifies columns %s as primary_key=True, "
"not matching locally specified columns %s; setting the "
"current primary key columns to %s. This warning "
"may become an exception in a future release" %
(
table.name,
", ".join("'%s'" % c.name for c in table_pks),
", ".join("'%s'" % c.name for c in self.columns),
", ".join("'%s'" % c.name for c in self.columns)
)
)
table_pks[:] = []
for c in self.columns:
c.primary_key = True
c.nullable = False
self.columns.extend(table_pks)
def _reload(self, columns):
"""repopulate this :class:`.PrimaryKeyConstraint` given
a set of columns.
Existing columns in the table that are marked as primary_key=True
are maintained.
Also fires a new event.
This is basically like putting a whole new
:class:`.PrimaryKeyConstraint` object on the parent
:class:`.Table` object without actually replacing the object.
The ordering of the given list of columns is also maintained; these
columns will be appended to the list of columns after any which
are already present.
"""
# set the primary key flag on new columns.
# note any existing PK cols on the table also have their
# flag still set.
for col in columns:
col.primary_key = True
self.columns.extend(columns)
PrimaryKeyConstraint._autoincrement_column._reset(self)
self._set_parent_with_dispatch(self.table)
def _replace(self, col):
PrimaryKeyConstraint._autoincrement_column._reset(self)
self.columns.replace(col)
@property
def columns_autoinc_first(self):
autoinc = self._autoincrement_column
if autoinc is not None:
return [autoinc] + [c for c in self.columns if c is not autoinc]
else:
return list(self.columns)
@util.memoized_property
def _autoincrement_column(self):
def _validate_autoinc(col, autoinc_true):
if col.type._type_affinity is None or not issubclass(
col.type._type_affinity,
type_api.INTEGERTYPE._type_affinity):
if autoinc_true:
raise exc.ArgumentError(
"Column type %s on column '%s' is not "
"compatible with autoincrement=True" % (
col.type,
col
))
else:
return False
elif not isinstance(col.default, (type(None), Sequence)) and \
not autoinc_true:
return False
elif col.server_default is not None and not autoinc_true:
return False
elif (
col.foreign_keys and col.autoincrement
not in (True, 'ignore_fk')):
return False
return True
if len(self.columns) == 1:
col = list(self.columns)[0]
if col.autoincrement is True:
_validate_autoinc(col, True)
return col
elif (
col.autoincrement in ('auto', 'ignore_fk') and
_validate_autoinc(col, False)
):
return col
else:
autoinc = None
for col in self.columns:
if col.autoincrement is True:
_validate_autoinc(col, True)
if autoinc is not None:
raise exc.ArgumentError(
"Only one Column may be marked "
"autoincrement=True, found both %s and %s." %
(col.name, autoinc.name)
)
else:
autoinc = col
return autoinc
class UniqueConstraint(ColumnCollectionConstraint):
"""A table-level UNIQUE constraint.
Defines a single column or composite UNIQUE constraint. For a no-frills,
single column constraint, adding ``unique=True`` to the ``Column``
definition is a shorthand equivalent for an unnamed, single column
UniqueConstraint.
"""
__visit_name__ = 'unique_constraint'
class Index(DialectKWArgs, ColumnCollectionMixin, SchemaItem):
"""A table-level INDEX.
Defines a composite (one or more column) INDEX.
E.g.::
sometable = Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100))
)
Index("some_index", sometable.c.name)
For a no-frills, single column index, adding
:class:`.Column` also supports ``index=True``::
sometable = Table("sometable", metadata,
Column("name", String(50), index=True)
)
For a composite index, multiple columns can be specified::
Index("some_index", sometable.c.name, sometable.c.address)
Functional indexes are supported as well, typically by using the
:data:`.func` construct in conjunction with table-bound
:class:`.Column` objects::
Index("some_index", func.lower(sometable.c.name))
.. versionadded:: 0.8 support for functional and expression-based indexes.
An :class:`.Index` can also be manually associated with a :class:`.Table`,
either through inline declaration or using
:meth:`.Table.append_constraint`. When this approach is used, the names
of the indexed columns can be specified as strings::
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", "name", "address")
)
To support functional or expression-based indexes in this form, the
:func:`.text` construct may be used::
from sqlalchemy import text
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", text("lower(name)"))
)
.. versionadded:: 0.9.5 the :func:`.text` construct may be used to
specify :class:`.Index` expressions, provided the :class:`.Index`
is explicitly associated with the :class:`.Table`.
.. seealso::
:ref:`schema_indexes` - General information on :class:`.Index`.
:ref:`postgresql_indexes` - PostgreSQL-specific options available for
the :class:`.Index` construct.
:ref:`mysql_indexes` - MySQL-specific options available for the
:class:`.Index` construct.
:ref:`mssql_indexes` - MSSQL-specific options available for the
:class:`.Index` construct.
"""
__visit_name__ = 'index'
def __init__(self, name, *expressions, **kw):
r"""Construct an index object.
:param name:
The name of the index
:param \*expressions:
Column expressions to include in the index. The expressions
are normally instances of :class:`.Column`, but may also
be arbitrary SQL expressions which ultimately refer to a
:class:`.Column`.
:param unique=False:
Keyword only argument; if True, create a unique index.
:param quote=None:
Keyword only argument; whether to apply quoting to the name of
the index. Works in the same manner as that of
:paramref:`.Column.quote`.
:param info=None: Optional data dictionary which will be populated
into the :attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self.table = None
columns = []
processed_expressions = []
for expr, column, strname, add_element in self.\
_extract_col_expression_collection(expressions):
if add_element is not None:
columns.append(add_element)
processed_expressions.append(expr)
self.expressions = processed_expressions
self.name = quoted_name(name, kw.pop("quote", None))
self.unique = kw.pop('unique', False)
if 'info' in kw:
self.info = kw.pop('info')
self._validate_dialect_kwargs(kw)
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(self, *columns)
def _set_parent(self, table):
ColumnCollectionMixin._set_parent(self, table)
if self.table is not None and table is not self.table:
raise exc.ArgumentError(
"Index '%s' is against table '%s', and "
"cannot be associated with table '%s'." % (
self.name,
self.table.description,
table.description
)
)
self.table = table
table.indexes.add(self)
self.expressions = [
expr if isinstance(expr, ClauseElement)
else colexpr
for expr, colexpr in util.zip_longest(self.expressions,
self.columns)
]
@property
def bind(self):
"""Return the connectable associated with this Index."""
return self.table.bind
def create(self, bind=None):
"""Issue a ``CREATE`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`.MetaData.create_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator, self)
return self
def drop(self, bind=None):
"""Issue a ``DROP`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`.MetaData.drop_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper, self)
def __repr__(self):
return 'Index(%s)' % (
", ".join(
[repr(self.name)] +
[repr(e) for e in self.expressions] +
(self.unique and ["unique=True"] or [])
))
DEFAULT_NAMING_CONVENTION = util.immutabledict({
"ix": 'ix_%(column_0_label)s'
})
class MetaData(SchemaItem):
"""A collection of :class:`.Table` objects and their associated schema
constructs.
Holds a collection of :class:`.Table` objects as well as
an optional binding to an :class:`.Engine` or
:class:`.Connection`. If bound, the :class:`.Table` objects
in the collection and their columns may participate in implicit SQL
execution.
The :class:`.Table` objects themselves are stored in the
:attr:`.MetaData.tables` dictionary.
:class:`.MetaData` is a thread-safe object for read operations.
Construction of new tables within a single :class:`.MetaData` object,
either explicitly or via reflection, may not be completely thread-safe.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
"""
__visit_name__ = 'metadata'
def __init__(self, bind=None, reflect=False, schema=None,
quote_schema=None,
naming_convention=DEFAULT_NAMING_CONVENTION,
info=None
):
"""Create a new MetaData object.
:param bind:
An Engine or Connection to bind to. May also be a string or URL
instance, these are passed to create_engine() and this MetaData will
be bound to the resulting engine.
:param reflect:
Optional, automatically load all tables from the bound database.
Defaults to False. ``bind`` is required when this option is set.
.. deprecated:: 0.8
Please use the :meth:`.MetaData.reflect` method.
:param schema:
The default schema to use for the :class:`.Table`,
:class:`.Sequence`, and potentially other objects associated with
this :class:`.MetaData`. Defaults to ``None``.
When this value is set, any :class:`.Table` or :class:`.Sequence`
which specifies ``None`` for the schema parameter will instead
have this schema name defined. To build a :class:`.Table`
or :class:`.Sequence` that still has ``None`` for the schema
even when this parameter is present, use the :attr:`.BLANK_SCHEMA`
symbol.
.. note::
As refered above, the :paramref:`.MetaData.schema` parameter
only refers to the **default value** that will be applied to
the :paramref:`.Table.schema` parameter of an incoming
:class:`.Table` object. It does not refer to how the
:class:`.Table` is catalogued within the :class:`.MetaData`,
which remains consistent vs. a :class:`.MetaData` collection
that does not define this parameter. The :class:`.Table`
within the :class:`.MetaData` will still be keyed based on its
schema-qualified name, e.g.
``my_metadata.tables["some_schema.my_table"]``.
The current behavior of the :class:`.ForeignKey` object is to
circumvent this restriction, where it can locate a table given
the table name alone, where the schema will be assumed to be
present from this value as specified on the owning
:class:`.MetaData` collection. However, this implies that a
table qualified with BLANK_SCHEMA cannot currently be referred
to by string name from :class:`.ForeignKey`. Other parts of
SQLAlchemy such as Declarative may not have similar behaviors
built in, however may do so in a future release, along with a
consistent method of referring to a table in BLANK_SCHEMA.
.. seealso::
:paramref:`.Table.schema`
:paramref:`.Sequence.schema`
:param quote_schema:
Sets the ``quote_schema`` flag for those :class:`.Table`,
:class:`.Sequence`, and other objects which make usage of the
local ``schema`` name.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param naming_convention: a dictionary referring to values which
will establish default naming conventions for :class:`.Constraint`
and :class:`.Index` objects, for those objects which are not given
a name explicitly.
The keys of this dictionary may be:
* a constraint or Index class, e.g. the :class:`.UniqueConstraint`,
:class:`.ForeignKeyConstraint` class, the :class:`.Index` class
* a string mnemonic for one of the known constraint classes;
``"fk"``, ``"pk"``, ``"ix"``, ``"ck"``, ``"uq"`` for foreign key,
primary key, index, check, and unique constraint, respectively.
* the string name of a user-defined "token" that can be used
to define new naming tokens.
The values associated with each "constraint class" or "constraint
mnemonic" key are string naming templates, such as
``"uq_%(table_name)s_%(column_0_name)s"``,
which describe how the name should be composed. The values
associated with user-defined "token" keys should be callables of the
form ``fn(constraint, table)``, which accepts the constraint/index
object and :class:`.Table` as arguments, returning a string
result.
The built-in names are as follows, some of which may only be
available for certain types of constraint:
* ``%(table_name)s`` - the name of the :class:`.Table` object
associated with the constraint.
* ``%(referred_table_name)s`` - the name of the :class:`.Table`
object associated with the referencing target of a
:class:`.ForeignKeyConstraint`.
* ``%(column_0_name)s`` - the name of the :class:`.Column` at
index position "0" within the constraint.
* ``%(column_0_label)s`` - the label of the :class:`.Column` at
index position "0", e.g. :attr:`.Column.label`
* ``%(column_0_key)s`` - the key of the :class:`.Column` at
index position "0", e.g. :attr:`.Column.key`
* ``%(referred_column_0_name)s`` - the name of a :class:`.Column`
at index position "0" referenced by a
:class:`.ForeignKeyConstraint`.
* ``%(constraint_name)s`` - a special key that refers to the
existing name given to the constraint. When this key is
present, the :class:`.Constraint` object's existing name will be
replaced with one that is composed from template string that
uses this token. When this token is present, it is required that
the :class:`.Constraint` is given an explicit name ahead of time.
* user-defined: any additional token may be implemented by passing
it along with a ``fn(constraint, table)`` callable to the
naming_convention dictionary.
.. versionadded:: 0.9.2
.. seealso::
:ref:`constraint_naming_conventions` - for detailed usage
examples.
"""
self.tables = util.immutabledict()
self.schema = quoted_name(schema, quote_schema)
self.naming_convention = naming_convention
if info:
self.info = info
self._schemas = set()
self._sequences = {}
self._fk_memos = collections.defaultdict(list)
self.bind = bind
if reflect:
util.warn_deprecated("reflect=True is deprecate; please "
"use the reflect() method.")
if not bind:
raise exc.ArgumentError(
"A bind must be supplied in conjunction "
"with reflect=True")
self.reflect()
tables = None
"""A dictionary of :class:`.Table` objects keyed to their name or "table key".
The exact key is that determined by the :attr:`.Table.key` attribute;
for a table with no :attr:`.Table.schema` attribute, this is the same
as :attr:`.Table.name`. For a table with a schema, it is typically of the
form ``schemaname.tablename``.
.. seealso::
:attr:`.MetaData.sorted_tables`
"""
def __repr__(self):
return 'MetaData(bind=%r)' % self.bind
def __contains__(self, table_or_key):
if not isinstance(table_or_key, util.string_types):
table_or_key = table_or_key.key
return table_or_key in self.tables
def _add_table(self, name, schema, table):
key = _get_table_key(name, schema)
dict.__setitem__(self.tables, key, table)
if schema:
self._schemas.add(schema)
def _remove_table(self, name, schema):
key = _get_table_key(name, schema)
removed = dict.pop(self.tables, key, None)
if removed is not None:
for fk in removed.foreign_keys:
fk._remove_from_metadata(self)
if self._schemas:
self._schemas = set([t.schema
for t in self.tables.values()
if t.schema is not None])
def __getstate__(self):
return {'tables': self.tables,
'schema': self.schema,
'schemas': self._schemas,
'sequences': self._sequences,
'fk_memos': self._fk_memos,
'naming_convention': self.naming_convention
}
def __setstate__(self, state):
self.tables = state['tables']
self.schema = state['schema']
self.naming_convention = state['naming_convention']
self._bind = None
self._sequences = state['sequences']
self._schemas = state['schemas']
self._fk_memos = state['fk_memos']
def is_bound(self):
"""True if this MetaData is bound to an Engine or Connection."""
return self._bind is not None
def bind(self):
"""An :class:`.Engine` or :class:`.Connection` to which this
:class:`.MetaData` is bound.
Typically, a :class:`.Engine` is assigned to this attribute
so that "implicit execution" may be used, or alternatively
as a means of providing engine binding information to an
ORM :class:`.Session` object::
engine = create_engine("someurl://")
metadata.bind = engine
.. seealso::
:ref:`dbengine_implicit` - background on "bound metadata"
"""
return self._bind
@util.dependencies("sqlalchemy.engine.url")
def _bind_to(self, url, bind):
"""Bind this MetaData to an Engine, Connection, string or URL."""
if isinstance(bind, util.string_types + (url.URL, )):
self._bind = sqlalchemy.create_engine(bind)
else:
self._bind = bind
bind = property(bind, _bind_to)
def clear(self):
"""Clear all Table objects from this MetaData."""
dict.clear(self.tables)
self._schemas.clear()
self._fk_memos.clear()
def remove(self, table):
"""Remove the given Table object from this MetaData."""
self._remove_table(table.name, table.schema)
@property
def sorted_tables(self):
"""Returns a list of :class:`.Table` objects sorted in order of
foreign key dependency.
The sorting will place :class:`.Table` objects that have dependencies
first, before the dependencies themselves, representing the
order in which they can be created. To get the order in which
the tables would be dropped, use the ``reversed()`` Python built-in.
.. warning::
The :attr:`.sorted_tables` accessor cannot by itself accommodate
automatic resolution of dependency cycles between tables, which
are usually caused by mutually dependent foreign key constraints.
To resolve these cycles, either the
:paramref:`.ForeignKeyConstraint.use_alter` parameter may be appled
to those constraints, or use the
:func:`.schema.sort_tables_and_constraints` function which will break
out foreign key constraints involved in cycles separately.
.. seealso::
:func:`.schema.sort_tables`
:func:`.schema.sort_tables_and_constraints`
:attr:`.MetaData.tables`
:meth:`.Inspector.get_table_names`
:meth:`.Inspector.get_sorted_table_and_fkc_names`
"""
return ddl.sort_tables(sorted(self.tables.values(), key=lambda t: t.key))
def reflect(self, bind=None, schema=None, views=False, only=None,
extend_existing=False,
autoload_replace=True,
**dialect_kwargs):
r"""Load all available table definitions from the database.
Automatically creates ``Table`` entries in this ``MetaData`` for any
table available in the database but not yet present in the
``MetaData``. May be called multiple times to pick up tables recently
added to the database, however no special action is taken if a table
in this ``MetaData`` no longer exists in the database.
:param bind:
A :class:`.Connectable` used to access the database; if None, uses
the existing bind on this ``MetaData``, if any.
:param schema:
Optional, query and reflect tables from an alterate schema.
If None, the schema associated with this :class:`.MetaData`
is used, if any.
:param views:
If True, also reflect views.
:param only:
Optional. Load only a sub-set of available named tables. May be
specified as a sequence of names or a callable.
If a sequence of names is provided, only those tables will be
reflected. An error is raised if a table is requested but not
available. Named tables already present in this ``MetaData`` are
ignored.
If a callable is provided, it will be used as a boolean predicate to
filter the list of potential table names. The callable is called
with a table name and this ``MetaData`` instance as positional
arguments and should return a true value for any table to reflect.
:param extend_existing: Passed along to each :class:`.Table` as
:paramref:`.Table.extend_existing`.
.. versionadded:: 0.9.1
:param autoload_replace: Passed along to each :class:`.Table` as
:paramref:`.Table.autoload_replace`.
.. versionadded:: 0.9.1
:param \**dialect_kwargs: Additional keyword arguments not mentioned
above are dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
.. versionadded:: 0.9.2 - Added
:paramref:`.MetaData.reflect.**dialect_kwargs` to support
dialect-level reflection options for all :class:`.Table`
objects reflected.
"""
if bind is None:
bind = _bind_or_error(self)
with bind.connect() as conn:
reflect_opts = {
'autoload': True,
'autoload_with': conn,
'extend_existing': extend_existing,
'autoload_replace': autoload_replace,
'_extend_on': set()
}
reflect_opts.update(dialect_kwargs)
if schema is None:
schema = self.schema
if schema is not None:
reflect_opts['schema'] = schema
available = util.OrderedSet(
bind.engine.table_names(schema, connection=conn))
if views:
available.update(
bind.dialect.get_view_names(conn, schema)
)
if schema is not None:
available_w_schema = util.OrderedSet(["%s.%s" % (schema, name)
for name in available])
else:
available_w_schema = available
current = set(self.tables)
if only is None:
load = [name for name, schname in
zip(available, available_w_schema)
if extend_existing or schname not in current]
elif util.callable(only):
load = [name for name, schname in
zip(available, available_w_schema)
if (extend_existing or schname not in current)
and only(name, self)]
else:
missing = [name for name in only if name not in available]
if missing:
s = schema and (" schema '%s'" % schema) or ''
raise exc.InvalidRequestError(
'Could not reflect: requested table(s) not available '
'in %r%s: (%s)' %
(bind.engine, s, ', '.join(missing)))
load = [name for name in only if extend_existing or
name not in current]
for name in load:
Table(name, self, **reflect_opts)
def append_ddl_listener(self, event_name, listener):
"""Append a DDL event listener to this ``MetaData``.
.. deprecated:: 0.7
See :class:`.DDLEvents`.
"""
def adapt_listener(target, connection, **kw):
tables = kw['tables']
listener(event, target, connection, tables=tables)
event.listen(self, "" + event_name.replace('-', '_'), adapt_listener)
def create_all(self, bind=None, tables=None, checkfirst=True):
"""Create all tables stored in this metadata.
Conditional by default, will not attempt to recreate tables already
present in the target database.
:param bind:
A :class:`.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param tables:
Optional list of ``Table`` objects, which is a subset of the total
tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, don't issue CREATEs for tables already present
in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator,
self,
checkfirst=checkfirst,
tables=tables)
def drop_all(self, bind=None, tables=None, checkfirst=True):
"""Drop all tables stored in this metadata.
Conditional by default, will not attempt to drop tables not present in
the target database.
:param bind:
A :class:`.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param tables:
Optional list of ``Table`` objects, which is a subset of the
total tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, only issue DROPs for tables confirmed to be
present in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper,
self,
checkfirst=checkfirst,
tables=tables)
class ThreadLocalMetaData(MetaData):
"""A MetaData variant that presents a different ``bind`` in every thread.
Makes the ``bind`` property of the MetaData a thread-local value, allowing
this collection of tables to be bound to different ``Engine``
implementations or connections in each thread.
The ThreadLocalMetaData starts off bound to None in each thread. Binds
must be made explicitly by assigning to the ``bind`` property or using
``connect()``. You can also re-bind dynamically multiple times per
thread, just like a regular ``MetaData``.
"""
__visit_name__ = 'metadata'
def __init__(self):
"""Construct a ThreadLocalMetaData."""
self.context = util.threading.local()
self.__engines = {}
super(ThreadLocalMetaData, self).__init__()
def bind(self):
"""The bound Engine or Connection for this thread.
This property may be assigned an Engine or Connection, or assigned a
string or URL to automatically create a basic Engine for this bind
with ``create_engine()``."""
return getattr(self.context, '_engine', None)
@util.dependencies("sqlalchemy.engine.url")
def _bind_to(self, url, bind):
"""Bind to a Connectable in the caller's thread."""
if isinstance(bind, util.string_types + (url.URL, )):
try:
self.context._engine = self.__engines[bind]
except KeyError:
e = sqlalchemy.create_engine(bind)
self.__engines[bind] = e
self.context._engine = e
else:
# TODO: this is squirrely. we shouldn't have to hold onto engines
# in a case like this
if bind not in self.__engines:
self.__engines[bind] = bind
self.context._engine = bind
bind = property(bind, _bind_to)
def is_bound(self):
"""True if there is a bind for this thread."""
return (hasattr(self.context, '_engine') and
self.context._engine is not None)
def dispose(self):
"""Dispose all bound engines, in all thread contexts."""
for e in self.__engines.values():
if hasattr(e, 'dispose'):
e.dispose()
class _SchemaTranslateMap(object):
"""Provide translation of schema names based on a mapping.
Also provides helpers for producing cache keys and optimized
access when no mapping is present.
Used by the :paramref:`.Connection.execution_options.schema_translate_map`
feature.
.. versionadded:: 1.1
"""
__slots__ = 'map_', '__call__', 'hash_key', 'is_default'
_default_schema_getter = operator.attrgetter("schema")
def __init__(self, map_):
self.map_ = map_
if map_ is not None:
def schema_for_object(obj):
effective_schema = self._default_schema_getter(obj)
effective_schema = obj._translate_schema(
effective_schema, map_)
return effective_schema
self.__call__ = schema_for_object
self.hash_key = ";".join(
"%s=%s" % (k, map_[k])
for k in sorted(map_, key=str)
)
self.is_default = False
else:
self.hash_key = 0
self.__call__ = self._default_schema_getter
self.is_default = True
@classmethod
def _schema_getter(cls, map_):
if map_ is None:
return _default_schema_map
elif isinstance(map_, _SchemaTranslateMap):
return map_
else:
return _SchemaTranslateMap(map_)
_default_schema_map = _SchemaTranslateMap(None)
_schema_getter = _SchemaTranslateMap._schema_getter
|
paulondc/gaffer
|
refs/heads/master
|
python/GafferUITest/ButtonTest.py
|
5
|
##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import GafferUI
import GafferUITest
class ButtonTest( GafferUITest.TestCase ) :
def testConstructor( self ) :
b = GafferUI.Button( "" )
self.assertEqual( b.getText(), "" )
self.assertEqual( b.getImage(), None )
b = GafferUI.Button( "OK" )
self.assertEqual( b.getText(), "OK" )
self.assertEqual( b.getImage(), None )
b = GafferUI.Button( "", "arrowRight10.png" )
self.assertEqual( b.getText(), "" )
self.failUnless( isinstance( b.getImage(), GafferUI.Image ) )
def testAccessors( self ) :
b = GafferUI.Button()
b.setText( "a" )
self.assertEqual( b.getText(), "a" )
i = GafferUI.Image( "arrowRight10.png" )
b.setImage( i )
self.failUnless( b.getImage() is i )
b.setImage( "arrowRight10.png" )
self.failUnless( isinstance( b.getImage(), GafferUI.Image ) )
b.setImage( None )
self.assertEqual( b.getImage(), None )
self.assertEqual( b.getHasFrame(), True )
b.setHasFrame( False )
self.assertEqual( b.getHasFrame(), False )
def testAccessorTypeChecking( self ) :
b = GafferUI.Button()
self.assertRaises( Exception, b.setText, 1 )
self.assertRaises( Exception, b.setImage, 1 )
if __name__ == "__main__":
unittest.main()
|
gavin-feng/odoo
|
refs/heads/8.0
|
openerp/report/render/rml2pdf/trml2pdf.py
|
256
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import copy
import reportlab
import re
from reportlab.pdfgen import canvas
from reportlab import platypus
import utils
import color
import os
import logging
from lxml import etree
import base64
from distutils.version import LooseVersion
from reportlab.platypus.doctemplate import ActionFlowable
from openerp.tools.safe_eval import safe_eval as eval
from reportlab.lib.units import inch,cm,mm
from openerp.tools.misc import file_open
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.pagesizes import A4, letter
try:
from cStringIO import StringIO
_hush_pyflakes = [ StringIO ]
except ImportError:
from StringIO import StringIO
_logger = logging.getLogger(__name__)
encoding = 'utf-8'
def select_fontname(fontname, default_fontname):
if fontname not in pdfmetrics.getRegisteredFontNames()\
or fontname not in pdfmetrics.standardFonts:
# let reportlab attempt to find it
try:
pdfmetrics.getFont(fontname)
except Exception:
addition = ""
if " " in fontname:
addition = ". Your font contains spaces which is not valid in RML."
_logger.warning('Could not locate font %s, substituting default: %s%s',
fontname, default_fontname, addition)
fontname = default_fontname
return fontname
def _open_image(filename, path=None):
"""Attempt to open a binary file and return the descriptor
"""
if os.path.isfile(filename):
return open(filename, 'rb')
for p in (path or []):
if p and os.path.isabs(p):
fullpath = os.path.join(p, filename)
if os.path.isfile(fullpath):
return open(fullpath, 'rb')
try:
if p:
fullpath = os.path.join(p, filename)
else:
fullpath = filename
return file_open(fullpath)
except IOError:
pass
raise IOError("File %s cannot be found in image path" % filename)
class NumberedCanvas(canvas.Canvas):
def __init__(self, *args, **kwargs):
canvas.Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def showPage(self):
self._startPage()
def save(self):
"""add page info to each page (page x of y)"""
for state in self._saved_page_states:
self.__dict__.update(state)
self.draw_page_number()
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
def draw_page_number(self):
page_count = len(self._saved_page_states)
self.setFont("Helvetica", 8)
self.drawRightString((self._pagesize[0]-30), (self._pagesize[1]-40),
" %(this)i / %(total)i" % {
'this': self._pageNumber,
'total': page_count,
}
)
class PageCount(platypus.Flowable):
def __init__(self, story_count=0):
platypus.Flowable.__init__(self)
self.story_count = story_count
def draw(self):
self.canv.beginForm("pageCount%d" % self.story_count)
self.canv.setFont("Helvetica", utils.unit_get(str(8)))
self.canv.drawString(0, 0, str(self.canv.getPageNumber()))
self.canv.endForm()
class PageReset(platypus.Flowable):
def draw(self):
"""Flag to close current story page numbering and prepare for the next
should be executed after the rendering of the full story"""
self.canv._doPageReset = True
class _rml_styles(object,):
def __init__(self, nodes, localcontext):
self.localcontext = localcontext
self.styles = {}
self.styles_obj = {}
self.names = {}
self.table_styles = {}
self.default_style = reportlab.lib.styles.getSampleStyleSheet()
for node in nodes:
for style in node.findall('blockTableStyle'):
self.table_styles[style.get('id')] = self._table_style_get(style)
for style in node.findall('paraStyle'):
sname = style.get('name')
self.styles[sname] = self._para_style_update(style)
if self.default_style.has_key(sname):
for key, value in self.styles[sname].items():
setattr(self.default_style[sname], key, value)
else:
self.styles_obj[sname] = reportlab.lib.styles.ParagraphStyle(sname, self.default_style["Normal"], **self.styles[sname])
for variable in node.findall('initialize'):
for name in variable.findall('name'):
self.names[ name.get('id')] = name.get('value')
def _para_style_update(self, node):
data = {}
for attr in ['textColor', 'backColor', 'bulletColor', 'borderColor']:
if node.get(attr):
data[attr] = color.get(node.get(attr))
for attr in ['bulletFontName', 'fontName']:
if node.get(attr):
fontname= select_fontname(node.get(attr), None)
if fontname is not None:
data['fontName'] = fontname
for attr in ['bulletText']:
if node.get(attr):
data[attr] = node.get(attr)
for attr in ['fontSize', 'leftIndent', 'rightIndent', 'spaceBefore', 'spaceAfter',
'firstLineIndent', 'bulletIndent', 'bulletFontSize', 'leading',
'borderWidth','borderPadding','borderRadius']:
if node.get(attr):
data[attr] = utils.unit_get(node.get(attr))
if node.get('alignment'):
align = {
'right':reportlab.lib.enums.TA_RIGHT,
'center':reportlab.lib.enums.TA_CENTER,
'justify':reportlab.lib.enums.TA_JUSTIFY
}
data['alignment'] = align.get(node.get('alignment').lower(), reportlab.lib.enums.TA_LEFT)
data['splitLongWords'] = 0
return data
def _table_style_get(self, style_node):
styles = []
for node in style_node:
start = utils.tuple_int_get(node, 'start', (0,0) )
stop = utils.tuple_int_get(node, 'stop', (-1,-1) )
if node.tag=='blockValign':
styles.append(('VALIGN', start, stop, str(node.get('value'))))
elif node.tag=='blockFont':
styles.append(('FONT', start, stop, str(node.get('name'))))
elif node.tag=='blockTextColor':
styles.append(('TEXTCOLOR', start, stop, color.get(str(node.get('colorName')))))
elif node.tag=='blockLeading':
styles.append(('LEADING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockAlignment':
styles.append(('ALIGNMENT', start, stop, str(node.get('value'))))
elif node.tag=='blockSpan':
styles.append(('SPAN', start, stop))
elif node.tag=='blockLeftPadding':
styles.append(('LEFTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockRightPadding':
styles.append(('RIGHTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockTopPadding':
styles.append(('TOPPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBottomPadding':
styles.append(('BOTTOMPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBackground':
styles.append(('BACKGROUND', start, stop, color.get(node.get('colorName'))))
if node.get('size'):
styles.append(('FONTSIZE', start, stop, utils.unit_get(node.get('size'))))
elif node.tag=='lineStyle':
kind = node.get('kind')
kind_list = [ 'GRID', 'BOX', 'OUTLINE', 'INNERGRID', 'LINEBELOW', 'LINEABOVE','LINEBEFORE', 'LINEAFTER' ]
assert kind in kind_list
thick = 1
if node.get('thickness'):
thick = float(node.get('thickness'))
styles.append((kind, start, stop, thick, color.get(node.get('colorName'))))
return platypus.tables.TableStyle(styles)
def para_style_get(self, node):
style = False
sname = node.get('style')
if sname:
if sname in self.styles_obj:
style = self.styles_obj[sname]
else:
_logger.debug('Warning: style not found, %s - setting default!', node.get('style'))
if not style:
style = self.default_style['Normal']
para_update = self._para_style_update(node)
if para_update:
# update style only is necessary
style = copy.deepcopy(style)
style.__dict__.update(para_update)
return style
class _rml_doc(object):
def __init__(self, node, localcontext=None, images=None, path='.', title=None):
if images is None:
images = {}
if localcontext is None:
localcontext = {}
self.localcontext = localcontext
self.etree = node
self.filename = self.etree.get('filename')
self.images = images
self.path = path
self.title = title
def docinit(self, els):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
for node in els:
for font in node.findall('registerFont'):
name = font.get('fontName').encode('ascii')
fname = font.get('fontFile').encode('ascii')
if name not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(name, fname))
#by default, we map the fontName to each style (bold, italic, bold and italic), so that
#if there isn't any font defined for one of these style (via a font family), the system
#will fallback on the normal font.
addMapping(name, 0, 0, name) #normal
addMapping(name, 0, 1, name) #italic
addMapping(name, 1, 0, name) #bold
addMapping(name, 1, 1, name) #italic and bold
#if registerFontFamily is defined, we register the mapping of the fontName to use for each style.
for font_family in node.findall('registerFontFamily'):
family_name = font_family.get('normal').encode('ascii')
if font_family.get('italic'):
addMapping(family_name, 0, 1, font_family.get('italic').encode('ascii'))
if font_family.get('bold'):
addMapping(family_name, 1, 0, font_family.get('bold').encode('ascii'))
if font_family.get('boldItalic'):
addMapping(family_name, 1, 1, font_family.get('boldItalic').encode('ascii'))
def setTTFontMapping(self,face, fontname, filename, mode='all'):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
if mode:
mode = mode.lower()
if fontname not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(fontname, filename))
if mode == 'all':
addMapping(face, 0, 0, fontname) #normal
addMapping(face, 0, 1, fontname) #italic
addMapping(face, 1, 0, fontname) #bold
addMapping(face, 1, 1, fontname) #italic and bold
elif mode in ['italic', 'oblique']:
addMapping(face, 0, 1, fontname) #italic
elif mode == 'bold':
addMapping(face, 1, 0, fontname) #bold
elif mode in ('bolditalic', 'bold italic','boldoblique', 'bold oblique'):
addMapping(face, 1, 1, fontname) #italic and bold
else:
addMapping(face, 0, 0, fontname) #normal
def _textual_image(self, node):
rc = ''
for n in node:
rc +=( etree.tostring(n) or '') + n.tail
return base64.decodestring(node.tostring())
def _images(self, el):
result = {}
for node in el.findall('.//image'):
rc =( node.text or '')
result[node.get('name')] = base64.decodestring(rc)
return result
def render(self, out):
el = self.etree.findall('.//docinit')
if el:
self.docinit(el)
el = self.etree.findall('.//stylesheet')
self.styles = _rml_styles(el,self.localcontext)
el = self.etree.findall('.//images')
if el:
self.images.update( self._images(el[0]) )
el = self.etree.findall('.//template')
if len(el):
pt_obj = _rml_template(self.localcontext, out, el[0], self, images=self.images, path=self.path, title=self.title)
el = utils._child_get(self.etree, self, 'story')
pt_obj.render(el)
else:
self.canvas = canvas.Canvas(out)
pd = self.etree.find('pageDrawing')[0]
pd_obj = _rml_canvas(self.canvas, self.localcontext, None, self, self.images, path=self.path, title=self.title)
pd_obj.render(pd)
self.canvas.showPage()
self.canvas.save()
class _rml_canvas(object):
def __init__(self, canvas, localcontext, doc_tmpl=None, doc=None, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.canvas = canvas
self.styles = doc.styles
self.doc_tmpl = doc_tmpl
self.doc = doc
self.images = images
self.path = path
self.title = title
if self.title:
self.canvas.setTitle(self.title)
def _textual(self, node, x=0, y=0):
text = node.text and node.text.encode('utf-8') or ''
rc = utils._process_text(self, text)
for n in node:
if n.tag == 'seq':
from reportlab.lib.sequencer import getSequencer
seq = getSequencer()
rc += str(seq.next(n.get('id')))
if n.tag == 'pageCount':
if x or y:
self.canvas.translate(x,y)
self.canvas.doForm('pageCount%s' % (self.canvas._storyCount,))
if x or y:
self.canvas.translate(-x,-y)
if n.tag == 'pageNumber':
rc += str(self.canvas.getPageNumber())
rc += utils._process_text(self, n.tail)
return rc.replace('\n','')
def _drawString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
try:
self.canvas.drawString(text=text, **v)
except TypeError:
_logger.error("Bad RML: <drawString> tag requires attributes 'x' and 'y'!")
raise
def _drawCenteredString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawCentredString(text=text, **v)
def _drawRightString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawRightString(text=text, **v)
def _rect(self, node):
if node.get('round'):
self.canvas.roundRect(radius=utils.unit_get(node.get('round')), **utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
else:
self.canvas.rect(**utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
def _ellipse(self, node):
x1 = utils.unit_get(node.get('x'))
x2 = utils.unit_get(node.get('width'))
y1 = utils.unit_get(node.get('y'))
y2 = utils.unit_get(node.get('height'))
self.canvas.ellipse(x1,y1,x2,y2, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _curves(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>7:
self.canvas.bezier(*[utils.unit_get(l) for l in line_str[0:8]])
line_str = line_str[8:]
def _lines(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>3:
lines.append([utils.unit_get(l) for l in line_str[0:4]])
line_str = line_str[4:]
self.canvas.lines(lines)
def _grid(self, node):
xlist = [utils.unit_get(s) for s in node.get('xs').split(',')]
ylist = [utils.unit_get(s) for s in node.get('ys').split(',')]
self.canvas.grid(xlist, ylist)
def _translate(self, node):
dx = utils.unit_get(node.get('dx')) or 0
dy = utils.unit_get(node.get('dy')) or 0
self.canvas.translate(dx,dy)
def _circle(self, node):
self.canvas.circle(x_cen=utils.unit_get(node.get('x')), y_cen=utils.unit_get(node.get('y')), r=utils.unit_get(node.get('radius')), **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _place(self, node):
flows = _rml_flowable(self.doc, self.localcontext, images=self.images, path=self.path, title=self.title, canvas=self.canvas).render(node)
infos = utils.attr_get(node, ['x','y','width','height'])
infos['y']+=infos['height']
for flow in flows:
w,h = flow.wrap(infos['width'], infos['height'])
if w<=infos['width'] and h<=infos['height']:
infos['y']-=h
flow.drawOn(self.canvas,infos['x'],infos['y'])
infos['height']-=h
else:
raise ValueError("Not enough space")
def _line_mode(self, node):
ljoin = {'round':1, 'mitered':0, 'bevelled':2}
lcap = {'default':0, 'round':1, 'square':2}
if node.get('width'):
self.canvas.setLineWidth(utils.unit_get(node.get('width')))
if node.get('join'):
self.canvas.setLineJoin(ljoin[node.get('join')])
if node.get('cap'):
self.canvas.setLineCap(lcap[node.get('cap')])
if node.get('miterLimit'):
self.canvas.setDash(utils.unit_get(node.get('miterLimit')))
if node.get('dash'):
dashes = node.get('dash').split(',')
for x in range(len(dashes)):
dashes[x]=utils.unit_get(dashes[x])
self.canvas.setDash(node.get('dash').split(','))
def _image(self, node):
import urllib
import urlparse
from reportlab.lib.utils import ImageReader
nfile = node.get('file')
if not nfile:
if node.get('name'):
image_data = self.images[node.get('name')]
_logger.debug("Image %s used", node.get('name'))
s = StringIO(image_data)
else:
newtext = node.text
if self.localcontext:
res = utils._regex.findall(newtext)
for key in res:
newtext = eval(key, {}, self.localcontext) or ''
image_data = None
if newtext:
image_data = base64.decodestring(newtext)
if image_data:
s = StringIO(image_data)
else:
_logger.debug("No image data!")
return False
else:
if nfile in self.images:
s = StringIO(self.images[nfile])
else:
try:
up = urlparse.urlparse(str(nfile))
except ValueError:
up = False
if up and up.scheme:
# RFC: do we really want to open external URLs?
# Are we safe from cross-site scripting or attacks?
_logger.debug("Retrieve image from %s", nfile)
u = urllib.urlopen(str(nfile))
s = StringIO(u.read())
else:
_logger.debug("Open image file %s ", nfile)
s = _open_image(nfile, path=self.path)
try:
img = ImageReader(s)
(sx,sy) = img.getSize()
_logger.debug("Image is %dx%d", sx, sy)
args = { 'x': 0.0, 'y': 0.0, 'mask': 'auto'}
for tag in ('width','height','x','y'):
if node.get(tag):
args[tag] = utils.unit_get(node.get(tag))
if ('width' in args) and (not 'height' in args):
args['height'] = sy * args['width'] / sx
elif ('height' in args) and (not 'width' in args):
args['width'] = sx * args['height'] / sy
elif ('width' in args) and ('height' in args):
if (float(args['width'])/args['height'])>(float(sx)>sy):
args['width'] = sx * args['height'] / sy
else:
args['height'] = sy * args['width'] / sx
self.canvas.drawImage(img, **args)
finally:
s.close()
# self.canvas._doc.SaveToFile(self.canvas._filename, self.canvas)
def _path(self, node):
self.path = self.canvas.beginPath()
self.path.moveTo(**utils.attr_get(node, ['x','y']))
for n in utils._child_get(node, self):
if not n.text :
if n.tag=='moveto':
vals = utils.text_get(n).split()
self.path.moveTo(utils.unit_get(vals[0]), utils.unit_get(vals[1]))
elif n.tag=='curvesto':
vals = utils.text_get(n).split()
while len(vals)>5:
pos=[]
while len(pos)<6:
pos.append(utils.unit_get(vals.pop(0)))
self.path.curveTo(*pos)
elif n.text:
data = n.text.split() # Not sure if I must merge all TEXT_NODE ?
while len(data)>1:
x = utils.unit_get(data.pop(0))
y = utils.unit_get(data.pop(0))
self.path.lineTo(x,y)
if (not node.get('close')) or utils.bool_get(node.get('close')):
self.path.close()
self.canvas.drawPath(self.path, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def setFont(self, node):
fontname = select_fontname(node.get('name'), self.canvas._fontname)
return self.canvas.setFont(fontname, utils.unit_get(node.get('size')))
def render(self, node):
tags = {
'drawCentredString': self._drawCenteredString,
'drawRightString': self._drawRightString,
'drawString': self._drawString,
'rect': self._rect,
'ellipse': self._ellipse,
'lines': self._lines,
'grid': self._grid,
'curves': self._curves,
'fill': lambda node: self.canvas.setFillColor(color.get(node.get('color'))),
'stroke': lambda node: self.canvas.setStrokeColor(color.get(node.get('color'))),
'setFont': self.setFont ,
'place': self._place,
'circle': self._circle,
'lineMode': self._line_mode,
'path': self._path,
'rotate': lambda node: self.canvas.rotate(float(node.get('degrees'))),
'translate': self._translate,
'image': self._image
}
for n in utils._child_get(node, self):
if n.tag in tags:
tags[n.tag](n)
class _rml_draw(object):
def __init__(self, localcontext, node, styles, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.node = node
self.styles = styles
self.canvas = None
self.images = images
self.path = path
self.canvas_title = title
def render(self, canvas, doc):
canvas.saveState()
cnv = _rml_canvas(canvas, self.localcontext, doc, self.styles, images=self.images, path=self.path, title=self.canvas_title)
cnv.render(self.node)
canvas.restoreState()
class _rml_Illustration(platypus.flowables.Flowable):
def __init__(self, node, localcontext, styles, self2):
self.localcontext = (localcontext or {}).copy()
self.node = node
self.styles = styles
self.width = utils.unit_get(node.get('width'))
self.height = utils.unit_get(node.get('height'))
self.self2 = self2
def wrap(self, *args):
return self.width, self.height
def draw(self):
drw = _rml_draw(self.localcontext ,self.node,self.styles, images=self.self2.images, path=self.self2.path, title=self.self2.title)
drw.render(self.canv, None)
# Workaround for issue #15: https://bitbucket.org/rptlab/reportlab/issue/15/infinite-pages-produced-when-splitting
original_pto_split = platypus.flowables.PTOContainer.split
def split(self, availWidth, availHeight):
res = original_pto_split(self, availWidth, availHeight)
if len(res) > 2 and len(self._content) > 0:
header = self._content[0]._ptoinfo.header
trailer = self._content[0]._ptoinfo.trailer
if isinstance(res[-2], platypus.flowables.UseUpSpace) and len(header + trailer) == len(res[:-2]):
return []
return res
platypus.flowables.PTOContainer.split = split
class _rml_flowable(object):
def __init__(self, doc, localcontext, images=None, path='.', title=None, canvas=None):
if images is None:
images = {}
self.localcontext = localcontext
self.doc = doc
self.styles = doc.styles
self.images = images
self.path = path
self.title = title
self.canvas = canvas
def _textual(self, node):
rc1 = utils._process_text(self, node.text or '')
for n in utils._child_get(node,self):
txt_n = copy.deepcopy(n)
for key in txt_n.attrib.keys():
if key in ('rml_except', 'rml_loop', 'rml_tag'):
del txt_n.attrib[key]
if not n.tag == 'bullet':
if n.tag == 'pageNumber':
txt_n.text = self.canvas and str(self.canvas.getPageNumber()) or ''
else:
txt_n.text = utils.xml2str(self._textual(n))
txt_n.tail = n.tail and utils.xml2str(utils._process_text(self, n.tail.replace('\n',''))) or ''
rc1 += etree.tostring(txt_n)
return rc1
def _table(self, node):
children = utils._child_get(node,self,'tr')
if not children:
return None
length = 0
colwidths = None
rowheights = None
data = []
styles = []
posy = 0
for tr in children:
paraStyle = None
if tr.get('style'):
st = copy.deepcopy(self.styles.table_styles[tr.get('style')])
for si in range(len(st._cmds)):
s = list(st._cmds[si])
s[1] = (s[1][0],posy)
s[2] = (s[2][0],posy)
st._cmds[si] = tuple(s)
styles.append(st)
if tr.get('paraStyle'):
paraStyle = self.styles.styles[tr.get('paraStyle')]
data2 = []
posx = 0
for td in utils._child_get(tr, self,'td'):
if td.get('style'):
st = copy.deepcopy(self.styles.table_styles[td.get('style')])
for s in st._cmds:
s[1][1] = posy
s[2][1] = posy
s[1][0] = posx
s[2][0] = posx
styles.append(st)
if td.get('paraStyle'):
# TODO: merge styles
paraStyle = self.styles.styles[td.get('paraStyle')]
posx += 1
flow = []
for n in utils._child_get(td, self):
if n.tag == etree.Comment:
n.text = ''
continue
fl = self._flowable(n, extra_style=paraStyle)
if isinstance(fl,list):
flow += fl
else:
flow.append( fl )
if not len(flow):
flow = self._textual(td)
data2.append( flow )
if len(data2)>length:
length=len(data2)
for ab in data:
while len(ab)<length:
ab.append('')
while len(data2)<length:
data2.append('')
data.append( data2 )
posy += 1
if node.get('colWidths'):
assert length == len(node.get('colWidths').split(','))
colwidths = [utils.unit_get(f.strip()) for f in node.get('colWidths').split(',')]
if node.get('rowHeights'):
rowheights = [utils.unit_get(f.strip()) for f in node.get('rowHeights').split(',')]
if len(rowheights) == 1:
rowheights = rowheights[0]
table = platypus.LongTable(data = data, colWidths=colwidths, rowHeights=rowheights, **(utils.attr_get(node, ['splitByRow'] ,{'repeatRows':'int','repeatCols':'int'})))
if node.get('style'):
table.setStyle(self.styles.table_styles[node.get('style')])
for s in styles:
table.setStyle(s)
return table
def _illustration(self, node):
return _rml_Illustration(node, self.localcontext, self.styles, self)
def _textual_image(self, node):
return base64.decodestring(node.text)
def _pto(self, node):
sub_story = []
pto_header = None
pto_trailer = None
for node in utils._child_get(node, self):
if node.tag == etree.Comment:
node.text = ''
continue
elif node.tag=='pto_header':
pto_header = self.render(node)
elif node.tag=='pto_trailer':
pto_trailer = self.render(node)
else:
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return platypus.flowables.PTOContainer(sub_story, trailer=pto_trailer, header=pto_header)
def _flowable(self, node, extra_style=None):
if node.tag=='pto':
return self._pto(node)
if node.tag=='para':
style = self.styles.para_style_get(node)
if extra_style:
style.__dict__.update(extra_style)
text_node = self._textual(node).strip().replace('\n\n', '\n').replace('\n', '<br/>')
instance = platypus.Paragraph(text_node, style, **(utils.attr_get(node, [], {'bulletText':'str'})))
result = [instance]
if LooseVersion(reportlab.Version) > LooseVersion('3.0') and not instance.getPlainText().strip() and instance.text.strip():
result.append(platypus.Paragraph(' <br/>', style, **(utils.attr_get(node, [], {'bulletText': 'str'}))))
return result
elif node.tag=='barCode':
try:
from reportlab.graphics.barcode import code128
from reportlab.graphics.barcode import code39
from reportlab.graphics.barcode import code93
from reportlab.graphics.barcode import common
from reportlab.graphics.barcode import fourstate
from reportlab.graphics.barcode import usps
from reportlab.graphics.barcode import createBarcodeDrawing
except ImportError:
_logger.warning("Cannot use barcode renderers:", exc_info=True)
return None
args = utils.attr_get(node, [], {'ratio':'float','xdim':'unit','height':'unit','checksum':'int','quiet':'int','width':'unit','stop':'bool','bearers':'int','barWidth':'float','barHeight':'float'})
codes = {
'codabar': lambda x: common.Codabar(x, **args),
'code11': lambda x: common.Code11(x, **args),
'code128': lambda x: code128.Code128(str(x), **args),
'standard39': lambda x: code39.Standard39(str(x), **args),
'standard93': lambda x: code93.Standard93(str(x), **args),
'i2of5': lambda x: common.I2of5(x, **args),
'extended39': lambda x: code39.Extended39(str(x), **args),
'extended93': lambda x: code93.Extended93(str(x), **args),
'msi': lambda x: common.MSI(x, **args),
'fim': lambda x: usps.FIM(x, **args),
'postnet': lambda x: usps.POSTNET(x, **args),
'ean13': lambda x: createBarcodeDrawing('EAN13', value=str(x), **args),
'qrcode': lambda x: createBarcodeDrawing('QR', value=x, **args),
}
code = 'code128'
if node.get('code'):
code = node.get('code').lower()
return codes[code](self._textual(node))
elif node.tag=='name':
self.styles.names[ node.get('id')] = node.get('value')
return None
elif node.tag=='xpre':
style = self.styles.para_style_get(node)
return platypus.XPreformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int','frags':'int'})))
elif node.tag=='pre':
style = self.styles.para_style_get(node)
return platypus.Preformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int'})))
elif node.tag=='illustration':
return self._illustration(node)
elif node.tag=='blockTable':
return self._table(node)
elif node.tag=='title':
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Title']
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif re.match('^h([1-9]+[0-9]*)$', (node.tag or '')):
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Heading'+str(node.tag[1:])]
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif node.tag=='image':
image_data = False
if not node.get('file'):
if node.get('name'):
if node.get('name') in self.doc.images:
_logger.debug("Image %s read ", node.get('name'))
image_data = self.doc.images[node.get('name')].read()
else:
_logger.warning("Image %s not defined", node.get('name'))
return False
else:
import base64
newtext = node.text
if self.localcontext:
newtext = utils._process_text(self, node.text or '')
image_data = base64.decodestring(newtext)
if not image_data:
_logger.debug("No inline image data")
return False
image = StringIO(image_data)
else:
_logger.debug("Image get from file %s", node.get('file'))
image = _open_image(node.get('file'), path=self.doc.path)
return platypus.Image(image, mask=(250,255,250,255,250,255), **(utils.attr_get(node, ['width','height'])))
elif node.tag=='spacer':
if node.get('width'):
width = utils.unit_get(node.get('width'))
else:
width = utils.unit_get('1cm')
length = utils.unit_get(node.get('length'))
return platypus.Spacer(width=width, height=length)
elif node.tag=='section':
return self.render(node)
elif node.tag == 'pageNumberReset':
return PageReset()
elif node.tag in ('pageBreak', 'nextPage'):
return platypus.PageBreak()
elif node.tag=='condPageBreak':
return platypus.CondPageBreak(**(utils.attr_get(node, ['height'])))
elif node.tag=='setNextTemplate':
return platypus.NextPageTemplate(str(node.get('name')))
elif node.tag=='nextFrame':
return platypus.CondPageBreak(1000) # TODO: change the 1000 !
elif node.tag == 'setNextFrame':
from reportlab.platypus.doctemplate import NextFrameFlowable
return NextFrameFlowable(str(node.get('name')))
elif node.tag == 'currentFrame':
from reportlab.platypus.doctemplate import CurrentFrameFlowable
return CurrentFrameFlowable(str(node.get('name')))
elif node.tag == 'frameEnd':
return EndFrameFlowable()
elif node.tag == 'hr':
width_hr=node.get('width') or '100%'
color_hr=node.get('color') or 'black'
thickness_hr=node.get('thickness') or 1
lineCap_hr=node.get('lineCap') or 'round'
return platypus.flowables.HRFlowable(width=width_hr,color=color.get(color_hr),thickness=float(thickness_hr),lineCap=str(lineCap_hr))
else:
sys.stderr.write('Warning: flowable not yet implemented: %s !\n' % (node.tag,))
return None
def render(self, node_story):
def process_story(node_story):
sub_story = []
for node in utils._child_get(node_story, self):
if node.tag == etree.Comment:
node.text = ''
continue
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return sub_story
return process_story(node_story)
class EndFrameFlowable(ActionFlowable):
def __init__(self,resume=0):
ActionFlowable.__init__(self,('frameEnd',resume))
class TinyDocTemplate(platypus.BaseDocTemplate):
def beforeDocument(self):
# Store some useful value directly inside canvas, so it's available
# on flowable drawing (needed for proper PageCount handling)
self.canv._doPageReset = False
self.canv._storyCount = 0
def ___handle_pageBegin(self):
self.page += 1
self.pageTemplate.beforeDrawPage(self.canv,self)
self.pageTemplate.checkPageSize(self.canv,self)
self.pageTemplate.onPage(self.canv,self)
for f in self.pageTemplate.frames: f._reset()
self.beforePage()
self._curPageFlowableCount = 0
if hasattr(self,'_nextFrameIndex'):
del self._nextFrameIndex
for f in self.pageTemplate.frames:
if f.id == 'first':
self.frame = f
break
self.handle_frameBegin()
def afterPage(self):
if isinstance(self.canv, NumberedCanvas):
# save current page states before eventual reset
self.canv._saved_page_states.append(dict(self.canv.__dict__))
if self.canv._doPageReset:
# Following a <pageReset/> tag:
# - we reset page number to 0
# - we add an new PageCount flowable (relative to the current
# story number), but not for NumeredCanvas at is handle page
# count itself)
# NOTE: _rml_template render() method add a PageReset flowable at end
# of each story, so we're sure to pass here at least once per story.
if not isinstance(self.canv, NumberedCanvas):
self.handle_flowable([ PageCount(story_count=self.canv._storyCount) ])
self.canv._pageCount = self.page
self.page = 0
self.canv._flag = True
self.canv._pageNumber = 0
self.canv._doPageReset = False
self.canv._storyCount += 1
class _rml_template(object):
def __init__(self, localcontext, out, node, doc, images=None, path='.', title=None):
if images is None:
images = {}
if not localcontext:
localcontext={'internal_header':True}
self.localcontext = localcontext
self.images= images
self.path = path
self.title = title
pagesize_map = {'a4': A4,
'us_letter': letter
}
pageSize = A4
if self.localcontext.get('company'):
pageSize = pagesize_map.get(self.localcontext.get('company').rml_paper_format, A4)
if node.get('pageSize'):
ps = map(lambda x:x.strip(), node.get('pageSize').replace(')', '').replace('(', '').split(','))
pageSize = ( utils.unit_get(ps[0]),utils.unit_get(ps[1]) )
self.doc_tmpl = TinyDocTemplate(out, pagesize=pageSize, **utils.attr_get(node, ['leftMargin','rightMargin','topMargin','bottomMargin'], {'allowSplitting':'int','showBoundary':'bool','rotation':'int','title':'str','author':'str'}))
self.page_templates = []
self.styles = doc.styles
self.doc = doc
self.image=[]
pts = node.findall('pageTemplate')
for pt in pts:
frames = []
for frame_el in pt.findall('frame'):
frame = platypus.Frame( **(utils.attr_get(frame_el, ['x1','y1', 'width','height', 'leftPadding', 'rightPadding', 'bottomPadding', 'topPadding'], {'id':'str', 'showBoundary':'bool'})) )
if utils.attr_get(frame_el, ['last']):
frame.lastFrame = True
frames.append( frame )
try :
gr = pt.findall('pageGraphics')\
or pt[1].findall('pageGraphics')
except Exception: # FIXME: be even more specific, perhaps?
gr=''
if len(gr):
# self.image=[ n for n in utils._child_get(gr[0], self) if n.tag=='image' or not self.localcontext]
drw = _rml_draw(self.localcontext,gr[0], self.doc, images=images, path=self.path, title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames, onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
else:
drw = _rml_draw(self.localcontext,node,self.doc,title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames,onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
self.doc_tmpl.addPageTemplates(self.page_templates)
def render(self, node_stories):
if self.localcontext and not self.localcontext.get('internal_header',False):
del self.localcontext['internal_header']
fis = []
r = _rml_flowable(self.doc,self.localcontext, images=self.images, path=self.path, title=self.title, canvas=None)
story_cnt = 0
for node_story in node_stories:
if story_cnt > 0:
fis.append(platypus.PageBreak())
fis += r.render(node_story)
# end of story numbering computation
fis.append(PageReset())
story_cnt += 1
try:
if self.localcontext and self.localcontext.get('internal_header',False):
self.doc_tmpl.afterFlowable(fis)
self.doc_tmpl.build(fis,canvasmaker=NumberedCanvas)
else:
self.doc_tmpl.build(fis)
except platypus.doctemplate.LayoutError, e:
e.name = 'Print Error'
e.value = 'The document you are trying to print contains a table row that does not fit on one page. Please try to split it in smaller rows or contact your administrator.'
raise
def parseNode(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
from customfonts import SetCustomFonts
SetCustomFonts(r)
except ImportError:
# means there is no custom fonts mapping in this system.
pass
except Exception:
_logger.warning('Cannot set font mapping', exc_info=True)
pass
fp = StringIO()
r.render(fp)
return fp.getvalue()
def parseString(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
from customfonts import SetCustomFonts
SetCustomFonts(r)
except Exception:
pass
if fout:
fp = file(fout,'wb')
r.render(fp)
fp.close()
return fout
else:
fp = StringIO()
r.render(fp)
return fp.getvalue()
def trml2pdf_help():
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Render the standard input (RML) and output a PDF file'
sys.exit(0)
if __name__=="__main__":
if len(sys.argv)>1:
if sys.argv[1]=='--help':
trml2pdf_help()
print parseString(file(sys.argv[1], 'r').read()),
else:
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Try \'trml2pdf --help\' for more information.'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mistakes-consortium/Supybot-Titler
|
refs/heads/master
|
test.py
|
3
|
###
# Copyright (c) 2013, spline
# All rights reserved.
#
#
###
from supybot.test import *
class TitlerTestCase(PluginTestCase):
plugins = ('Titler',)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
kostyll/micropython
|
refs/heads/master
|
tests/basics/set_specialmeth.py
|
113
|
# set object with special methods
s = {1, 2}
print(s.__contains__(1))
print(s.__contains__(3))
|
patricksnape/menpo
|
refs/heads/master
|
menpo/shape/adjacency.py
|
2
|
import numpy as np
def mask_adjacency_array(mask, adjacency_array):
# Find the indices that have been asked to be removed
indices_to_remove = np.nonzero(~mask)[0]
# Set intersection to find any rows containing those elements,
# reshape back in to the same size as adjacency array
entries_to_remove = np.in1d(adjacency_array, indices_to_remove)
entries_to_remove = entries_to_remove.reshape([-1, adjacency_array.shape[1]])
# Only keep those entries that are not flagged for removal
indices_to_keep = ~entries_to_remove.any(axis=1)
return adjacency_array[indices_to_keep, :]
def reindex_adjacency_array(adjacency_array):
remap_vector = np.arange(np.max(adjacency_array) + 1)
unique_values = np.unique(adjacency_array)
remap_vector[unique_values] = np.arange(unique_values.shape[0])
# Apply the mask
return remap_vector[adjacency_array]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.