repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
LightWind/usenix-conference-system | refs/heads/master | NFC controller/nfc/handover/client.py | 5 | # -*- coding: latin-1 -*-
# -----------------------------------------------------------------------------
# Copyright 2009-2011 Stephen Tiedemann <stephen.tiedemann@googlemail.com>
#
# Licensed under the EUPL, Version 1.1 or - as soon they
# will be approved by the European Commission - subsequent
# versions of the EUPL (the "Licence");
# You may not use this work except in compliance with the
# Licence.
# You may obtain a copy of the Licence at:
#
# http://www.osor.eu/eupl
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
# -----------------------------------------------------------------------------
#
# Negotiated Connection Handover - Client Base Class
#
import logging
log = logging.getLogger(__name__)
import nfc.llcp
import time
class HandoverClient(object):
""" NFC Forum Connection Handover client
"""
def __init__(self, llc):
self.socket = None
self.llc = llc
def connect(self, recv_miu=248, recv_buf=2):
"""Connect to the remote handover server if available. Raises
:exc:`nfc.llcp.ConnectRefused` if the remote device does not
have a handover service or the service does not accept any
more connections."""
socket = nfc.llcp.Socket(self.llc, nfc.llcp.DATA_LINK_CONNECTION)
socket.setsockopt(nfc.llcp.SO_RCVBUF, recv_buf)
socket.setsockopt(nfc.llcp.SO_RCVMIU, recv_miu)
socket.connect("urn:nfc:sn:handover")
server = socket.getpeername()
log.debug("handover client connected to remote sap {0}".format(server))
self.socket = socket
def close(self):
"""Disconnect from the remote handover server."""
if self.socket:
self.socket.close()
self.socket = None
def send(self, message):
"""Send a handover request message to the remote server."""
log.debug("sending '{0}' message".format(message.type))
send_miu = self.socket.getsockopt(nfc.llcp.SO_SNDMIU)
try:
data = str(message)
except nfc.llcp.EncodeError as e:
log.error("message encoding failed: {0}".format(e))
else:
return self._send(data, send_miu)
def _send(self, data, miu):
while len(data) > 0:
if self.socket.send(data[0:miu]):
data = data[miu:]
else: break
return bool(len(data) == 0)
def recv(self, timeout=None):
"""Receive a handover select message from the remote server."""
message = self._recv(timeout)
if message and message.type == "urn:nfc:wkt:Hs":
log.debug("received '{0}' message".format(message.type))
return nfc.ndef.HandoverSelectMessage(message)
else:
log.error("received invalid message type {0}".format(message.type))
return None
def _recv(self, timeout=None):
data = ''
started = time.time()
while self.socket.poll("recv", timeout):
try:
data += self.socket.recv()
message = nfc.ndef.Message(data)
log.debug("received message\n" + message.pretty())
return message
except nfc.ndef.LengthError:
elapsed = time.time() - started
log.debug("message is incomplete ({0} byte)".format(len(data)))
if timeout:
timeout = timeout - elapsed
log.debug("{0:.3f} seconds left to timeout".format(timeout))
continue # incomplete message
except TypeError:
log.debug("data link connection closed")
break # recv() returned None
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
tseaver/google-cloud-python | refs/heads/master | websecurityscanner/google/cloud/websecurityscanner_v1alpha/proto/finding_pb2_grpc.py | 573 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
|
sergiorua/libcloud | refs/heads/trunk | libcloud/test/common/test_aws.py | 26 | import sys
import unittest
from datetime import datetime
import mock
from libcloud.common.aws import SignedAWSConnection
from libcloud.common.aws import AWSRequestSignerAlgorithmV4
from libcloud.test import LibcloudTestCase
class EC2MockDriver(object):
region_name = 'my_region'
class AWSRequestSignerAlgorithmV4TestCase(LibcloudTestCase):
def setUp(self):
SignedAWSConnection.driver = EC2MockDriver()
SignedAWSConnection.service_name = 'my_service'
SignedAWSConnection.version = '2013-10-15'
self.connection = SignedAWSConnection('my_key', 'my_secret')
self.signer = AWSRequestSignerAlgorithmV4(access_key='my_key',
access_secret='my_secret',
version='2013-10-15',
connection=self.connection)
SignedAWSConnection.action = '/my_action/'
SignedAWSConnection.driver = EC2MockDriver()
self.now = datetime(2015, 3, 4, hour=17, minute=34, second=52)
def test_v4_signature(self):
params = {
'Action': 'DescribeInstances',
'Version': '2013-10-15'
}
headers = {
'Host': 'ec2.eu-west-1.amazonaws.com',
'Accept-Encoding': 'gzip,deflate',
'X-AMZ-Date': '20150304T173452Z',
'User-Agent': 'libcloud/0.17.0 (Amazon EC2 (eu-central-1)) '
}
dt = self.now
sig = self.signer._get_authorization_v4_header(params=params,
headers=headers,
dt=dt,
method='GET',
path='/my_action/')
self.assertEqual(sig, 'AWS4-HMAC-SHA256 '
'Credential=my_key/20150304/my_region/my_service/aws4_request, '
'SignedHeaders=accept-encoding;host;user-agent;x-amz-date, '
'Signature=f9868f8414b3c3f856c7955019cc1691265541f5162b9b772d26044280d39bd3')
def test_v4_signature_raises_error_if_request_method_not_GET(self):
with self.assertRaises(Exception):
self.signer._get_authorization_v4_header(params={}, headers={},
dt=self.now, method='POST')
def test_v4_signature_contains_user_id(self):
sig = self.signer._get_authorization_v4_header(params={}, headers={},
dt=self.now)
self.assertIn('Credential=my_key/', sig)
def test_v4_signature_contains_credential_scope(self):
with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_credential_scope') as mock_get_creds:
mock_get_creds.return_value = 'my_credential_scope'
sig = self.signer._get_authorization_v4_header(params={}, headers={}, dt=self.now)
self.assertIn('Credential=my_key/my_credential_scope, ', sig)
def test_v4_signature_contains_signed_headers(self):
with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_signed_headers') as mock_get_headers:
mock_get_headers.return_value = 'my_signed_headers'
sig = self.signer._get_authorization_v4_header({}, {}, self.now,
method='GET',
path='/')
self.assertIn('SignedHeaders=my_signed_headers, ', sig)
def test_v4_signature_contains_signature(self):
with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_signature') as mock_get_signature:
mock_get_signature.return_value = 'my_signature'
sig = self.signer._get_authorization_v4_header({}, {}, self.now)
self.assertIn('Signature=my_signature', sig)
def test_get_signature_(self):
def _sign(key, msg, hex=False):
if hex:
return 'H|%s|%s' % (key, msg)
else:
return '%s|%s' % (key, msg)
with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_key_to_sign_with') as mock_get_key:
with mock.patch('libcloud.common.aws.AWSRequestSignerAlgorithmV4._get_string_to_sign') as mock_get_string:
with mock.patch('libcloud.common.aws._sign', new=_sign):
mock_get_key.return_value = 'my_signing_key'
mock_get_string.return_value = 'my_string_to_sign'
sig = self.signer._get_signature({}, {}, self.now,
method='GET', path='/')
self.assertEqual(sig, 'H|my_signing_key|my_string_to_sign')
def test_get_string_to_sign(self):
with mock.patch('hashlib.sha256') as mock_sha256:
mock_sha256.return_value.hexdigest.return_value = 'chksum_of_canonical_request'
to_sign = self.signer._get_string_to_sign({}, {}, self.now,
method='GET', path='/')
self.assertEqual(to_sign,
'AWS4-HMAC-SHA256\n'
'20150304T173452Z\n'
'20150304/my_region/my_service/aws4_request\n'
'chksum_of_canonical_request')
def test_get_key_to_sign_with(self):
def _sign(key, msg, hex=False):
return '%s|%s' % (key, msg)
with mock.patch('libcloud.common.aws._sign', new=_sign):
key = self.signer._get_key_to_sign_with(self.now)
self.assertEqual(key, 'AWS4my_secret|20150304|my_region|my_service|aws4_request')
def test_get_signed_headers_contains_all_headers_lowercased(self):
headers = {'Content-Type': 'text/plain', 'Host': 'my_host', 'X-Special-Header': ''}
signed_headers = self.signer._get_signed_headers(headers)
self.assertIn('content-type', signed_headers)
self.assertIn('host', signed_headers)
self.assertIn('x-special-header', signed_headers)
def test_get_signed_headers_concats_headers_sorted_lexically(self):
headers = {'Host': 'my_host', 'X-Special-Header': '', '1St-Header': '2', 'Content-Type': 'text/plain'}
signed_headers = self.signer._get_signed_headers(headers)
self.assertEqual(signed_headers, '1st-header;content-type;host;x-special-header')
def test_get_credential_scope(self):
scope = self.signer._get_credential_scope(self.now)
self.assertEqual(scope, '20150304/my_region/my_service/aws4_request')
def test_get_canonical_headers_joins_all_headers(self):
headers = {
'accept-encoding': 'gzip,deflate',
'host': 'my_host',
}
self.assertEqual(self.signer._get_canonical_headers(headers),
'accept-encoding:gzip,deflate\n'
'host:my_host\n')
def test_get_canonical_headers_sorts_headers_lexically(self):
headers = {
'accept-encoding': 'gzip,deflate',
'host': 'my_host',
'1st-header': '2',
'x-amz-date': '20150304T173452Z',
'user-agent': 'my-ua'
}
self.assertEqual(self.signer._get_canonical_headers(headers),
'1st-header:2\n'
'accept-encoding:gzip,deflate\n'
'host:my_host\n'
'user-agent:my-ua\n'
'x-amz-date:20150304T173452Z\n')
def test_get_canonical_headers_lowercases_headers_names(self):
headers = {
'Accept-Encoding': 'GZIP,DEFLATE',
'User-Agent': 'My-UA'
}
self.assertEqual(self.signer._get_canonical_headers(headers),
'accept-encoding:GZIP,DEFLATE\n'
'user-agent:My-UA\n')
def test_get_canonical_headers_trims_header_values(self):
# TODO: according to AWS spec (and RFC 2616 Section 4.2.) excess whitespace
# from inside non-quoted strings should be stripped. Now we only strip the
# start and end of the string. See
# http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
headers = {
'accept-encoding': ' gzip,deflate',
'user-agent': 'libcloud/0.17.0 '
}
self.assertEqual(self.signer._get_canonical_headers(headers),
'accept-encoding:gzip,deflate\n'
'user-agent:libcloud/0.17.0\n')
def test_get_request_params_joins_params_sorted_lexically(self):
self.assertEqual(self.signer._get_request_params({
'Action': 'DescribeInstances',
'Filter.1.Name': 'state',
'Version': '2013-10-15'
}),
'Action=DescribeInstances&Filter.1.Name=state&Version=2013-10-15')
def test_get_request_params_allows_integers_as_value(self):
self.assertEqual(self.signer._get_request_params({'Action': 'DescribeInstances', 'Port': 22}),
'Action=DescribeInstances&Port=22')
def test_get_request_params_urlquotes_params_keys(self):
self.assertEqual(self.signer._get_request_params({'Action+Reaction': 'DescribeInstances'}),
'Action%2BReaction=DescribeInstances')
def test_get_request_params_urlquotes_params_values(self):
self.assertEqual(self.signer._get_request_params({
'Action': 'DescribeInstances&Addresses',
'Port-Range': '2000 3000'
}),
'Action=DescribeInstances%26Addresses&Port-Range=2000%203000')
def test_get_request_params_urlquotes_params_values_allows_safe_chars_in_value(self):
# http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
self.assertEqual('Action=a~b.c_d-e',
self.signer._get_request_params({'Action': 'a~b.c_d-e'}))
def test_get_payload_hash_returns_digest_of_empty_string_for_GET_requests(self):
SignedAWSConnection.method = 'GET'
self.assertEqual(self.signer._get_payload_hash(),
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
def test_get_canonical_request(self):
req = self.signer._get_canonical_request(
{'Action': 'DescribeInstances', 'Version': '2013-10-15'},
{'Accept-Encoding': 'gzip,deflate', 'User-Agent': 'My-UA'},
method='GET',
path='/my_action/'
)
self.assertEqual(req, 'GET\n'
'/my_action/\n'
'Action=DescribeInstances&Version=2013-10-15\n'
'accept-encoding:gzip,deflate\n'
'user-agent:My-UA\n'
'\n'
'accept-encoding;user-agent\n'
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
if __name__ == '__main__':
sys.exit(unittest.main())
|
kalaspuff/tomodachi | refs/heads/master | tests/services/dummy_protobuf_service.py | 1 | import tomodachi
from tomodachi.discovery.dummy_registry import DummyRegistry
from tomodachi.envelope.protobuf_base import ProtobufBase
@tomodachi.service
class DummyService(tomodachi.Service):
name = "test_dummy_protobuf"
discovery = [DummyRegistry]
message_envelope = ProtobufBase
options = {
"aws_sns_sqs": {
"region_name": "eu-west-1",
"aws_access_key_id": "XXXXXXXXX",
"aws_secret_access_key": "XXXXXXXXX",
},
"amqp": {"port": 54321, "login": "invalid", "password": "invalid"},
}
start = False
started = False
stop = False
async def _start_service(self) -> None:
self.start = True
async def _started_service(self) -> None:
self.started = True
async def _stop_service(self) -> None:
self.stop = True
|
alx-eu/django | refs/heads/stable/1.5.x | tests/regressiontests/comment_tests/urls_admin.py | 174 | from django.conf.urls import patterns, include
from django.contrib import admin
from django.contrib.comments.admin import CommentsAdmin
from django.contrib.comments.models import Comment
# Make a new AdminSite to avoid picking up the deliberately broken admin
# modules in other tests.
admin_site = admin.AdminSite()
admin_site.register(Comment, CommentsAdmin)
# To demonstrate proper functionality even when ``delete_selected`` is removed.
admin_site2 = admin.AdminSite()
admin_site2.disable_action('delete_selected')
admin_site2.register(Comment, CommentsAdmin)
urlpatterns = patterns('',
(r'^admin/', include(admin_site.urls)),
(r'^admin2/', include(admin_site2.urls)),
)
|
GdZ/scriptfile | refs/heads/master | software/googleAppEngine/lib/django_1_3/django/conf/locale/ka/formats.py | 329 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'l, j F, Y'
TIME_FORMAT = 'h:i:s a'
DATETIME_FORMAT = 'j F, Y h:i:s a'
YEAR_MONTH_FORMAT = 'F, Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j.M.Y'
SHORT_DATETIME_FORMAT = 'j.M.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%d %b %Y', '%d %b, %Y', '%d %b. %Y', # '25 Oct 2006', '25 Oct, 2006', '25 Oct. 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
# '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = " "
NUMBER_GROUPING = 3
|
HousekeepLtd/django | refs/heads/master | django/core/serializers/python.py | 153 | """
A Python "serializer". Doesn't do much serializing per se -- just converts to
and from basic Python data types (lists, dicts, strings, etc.). Useful as a basis for
other serializers.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils import six
from django.utils.encoding import force_text, is_protected_type
class Serializer(base.Serializer):
"""
Serializes a QuerySet to basic Python objects.
"""
internal_use_only = True
def start_serialization(self):
self._current = None
self.objects = []
def end_serialization(self):
pass
def start_object(self, obj):
self._current = OrderedDict()
def end_object(self, obj):
self.objects.append(self.get_dump_object(obj))
self._current = None
def get_dump_object(self, obj):
model = obj._meta.proxy_for_model if obj._deferred else obj.__class__
data = OrderedDict([('model', force_text(model._meta))])
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
data["pk"] = force_text(obj._get_pk_val(), strings_only=True)
data['fields'] = self._current
return data
def handle_field(self, obj, field):
value = field.value_from_object(obj)
# Protected types (i.e., primitives like None, numbers, dates,
# and Decimals) are passed through as is. All other values are
# converted to string first.
if is_protected_type(value):
self._current[field.name] = value
else:
self._current[field.name] = field.value_to_string(obj)
def handle_fk_field(self, obj, field):
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
related = getattr(obj, field.name)
if related:
value = related.natural_key()
else:
value = None
else:
value = getattr(obj, field.get_attname())
if not is_protected_type(value):
value = field.value_to_string(obj)
self._current[field.name] = value
def handle_m2m_field(self, obj, field):
if field.remote_field.through._meta.auto_created:
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
m2m_value = lambda value: value.natural_key()
else:
m2m_value = lambda value: force_text(value._get_pk_val(), strings_only=True)
self._current[field.name] = [m2m_value(related)
for related in getattr(obj, field.name).iterator()]
def getvalue(self):
return self.objects
def Deserializer(object_list, **options):
"""
Deserialize simple Python objects back into Django ORM instances.
It's expected that you pass the Python objects themselves (instead of a
stream or a string) to the constructor
"""
db = options.pop('using', DEFAULT_DB_ALIAS)
ignore = options.pop('ignorenonexistent', False)
for d in object_list:
# Look up the model and starting build a dict of data for it.
try:
Model = _get_model(d["model"])
except base.DeserializationError:
if ignore:
continue
else:
raise
data = {}
if 'pk' in d:
try:
data[Model._meta.pk.attname] = Model._meta.pk.to_python(d.get('pk'))
except Exception as e:
raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), None)
m2m_data = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Handle each field
for (field_name, field_value) in six.iteritems(d["fields"]):
if ignore and field_name not in field_names:
# skip fields no longer on model
continue
if isinstance(field_value, str):
field_value = force_text(
field_value, options.get("encoding", settings.DEFAULT_CHARSET), strings_only=True
)
field = Model._meta.get_field(field_name)
# Handle M2M relations
if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel):
model = field.remote_field.model
if hasattr(model._default_manager, 'get_by_natural_key'):
def m2m_convert(value):
if hasattr(value, '__iter__') and not isinstance(value, six.text_type):
return model._default_manager.db_manager(db).get_by_natural_key(*value).pk
else:
return force_text(model._meta.pk.to_python(value), strings_only=True)
else:
m2m_convert = lambda v: force_text(model._meta.pk.to_python(v), strings_only=True)
try:
m2m_data[field.name] = []
for pk in field_value:
m2m_data[field.name].append(m2m_convert(pk))
except Exception as e:
raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), pk)
# Handle FK fields
elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel):
model = field.remote_field.model
if field_value is not None:
try:
default_manager = model._default_manager
field_name = field.remote_field.field_name
if hasattr(default_manager, 'get_by_natural_key'):
if hasattr(field_value, '__iter__') and not isinstance(field_value, six.text_type):
obj = default_manager.db_manager(db).get_by_natural_key(*field_value)
value = getattr(obj, field.remote_field.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if model._meta.pk.remote_field:
value = value.pk
else:
value = model._meta.get_field(field_name).to_python(field_value)
data[field.attname] = value
else:
data[field.attname] = model._meta.get_field(field_name).to_python(field_value)
except Exception as e:
raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), field_value)
else:
data[field.attname] = None
# Handle all other fields
else:
try:
data[field.name] = field.to_python(field_value)
except Exception as e:
raise base.DeserializationError.WithData(e, d['model'], d.get('pk'), field_value)
obj = base.build_instance(Model, data, db)
yield base.DeserializedObject(obj, m2m_data)
def _get_model(model_identifier):
"""
Helper to look up a model from an "app_label.model_name" string.
"""
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError("Invalid model identifier: '%s'" % model_identifier)
|
sujeet4github/MyLangUtils | refs/heads/master | LangPython/oreilly-intro-to-flask-video/venv/lib/python3.6/site-packages/werkzeug/datastructures.py | 56 | # -*- coding: utf-8 -*-
"""
werkzeug.datastructures
~~~~~~~~~~~~~~~~~~~~~~~
This module provides mixins and classes with an immutable interface.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import codecs
import mimetypes
from copy import deepcopy
from itertools import repeat
from collections import Container, Iterable, MutableSet
from werkzeug._internal import _missing, _empty_stream
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
PY2, text_type, integer_types, string_types, make_literal_wrapper, \
to_native
from werkzeug.filesystem import get_filesystem_encoding
_locale_delim_re = re.compile(r'[_-]')
def is_immutable(self):
raise TypeError('%r objects are immutable' % self.__class__.__name__)
def iter_multi_items(mapping):
"""Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures.
"""
if isinstance(mapping, MultiDict):
for item in iteritems(mapping, multi=True):
yield item
elif isinstance(mapping, dict):
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
for value in value:
yield key, value
else:
yield key, value
else:
for item in mapping:
yield item
def native_itermethods(names):
if not PY2:
return lambda x: x
def setviewmethod(cls, name):
viewmethod_name = 'view%s' % name
viewmethod = lambda self, *a, **kw: ViewItems(self, name, 'view_%s' % name, *a, **kw)
viewmethod.__doc__ = \
'"""`%s()` object providing a view on %s"""' % (viewmethod_name, name)
setattr(cls, viewmethod_name, viewmethod)
def setitermethod(cls, name):
itermethod = getattr(cls, name)
setattr(cls, 'iter%s' % name, itermethod)
listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw))
listmethod.__doc__ = \
'Like :py:meth:`iter%s`, but returns a list.' % name
setattr(cls, name, listmethod)
def wrap(cls):
for name in names:
setitermethod(cls, name)
setviewmethod(cls, name)
return cls
return wrap
class ImmutableListMixin(object):
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self))
return rv
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
__imul__ = __iadd__
def __setitem__(self, key, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
remove = append
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, cmp=None, key=None, reverse=None):
is_immutable(self)
class ImmutableList(ImmutableListMixin, list):
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
list.__repr__(self),
)
class ImmutableDictMixin(object):
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super(cls, cls).__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return iteritems(self)
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def _iter_hashitems(self):
return iteritems(self, multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
class UpdateDictMixin(object):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def calls_update(name):
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
def setdefault(self, key, default=None):
modified = key not in self
rv = super(UpdateDictMixin, self).setdefault(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
def pop(self, key, default=_missing):
modified = key in self
if default is _missing:
rv = super(UpdateDictMixin, self).pop(key)
else:
rv = super(UpdateDictMixin, self).pop(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
__setitem__ = calls_update('__setitem__')
__delitem__ = calls_update('__delitem__')
clear = calls_update('clear')
popitem = calls_update('popitem')
update = calls_update('update')
del calls_update
class TypeConversionDict(dict):
"""Works like a regular dict but the :meth:`get` method can perform
type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
are subclasses of this class and provide the same feature.
.. versionadded:: 0.5
"""
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
if type is not None:
rv = type(rv)
except (KeyError, ValueError):
rv = default
return rv
class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
"""Works like a :class:`TypeConversionDict` but does not support
modifications.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return TypeConversionDict(self)
def __copy__(self):
return self
class ViewItems(object):
def __init__(self, multi_dict, method, repr_name, *a, **kw):
self.__multi_dict = multi_dict
self.__method = method
self.__repr_name = repr_name
self.__a = a
self.__kw = kw
def __get_items(self):
return getattr(self.__multi_dict, self.__method)(*self.__a, **self.__kw)
def __repr__(self):
return '%s(%r)' % (self.__repr_name, list(self.__get_items()))
def __iter__(self):
return iter(self.__get_items())
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class MultiDict(TypeConversionDict):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
"""
def __init__(self, mapping=None):
if isinstance(mapping, MultiDict):
dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping)))
elif isinstance(mapping, dict):
tmp = {}
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
if len(value) == 0:
continue
value = list(value)
else:
value = [value]
tmp[key] = value
dict.__init__(self, tmp)
else:
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
dict.__init__(self, tmp)
def __getstate__(self):
return dict(self.lists())
def __setstate__(self, value):
dict.clear(self)
dict.update(self, value)
def __getitem__(self, key):
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
lst = dict.__getitem__(self, key)
if len(lst) > 0:
return lst[0]
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
dict.__setitem__(self, key, [value])
def add(self, key, value):
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
dict.setdefault(self, key, []).append(value)
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just as `get`
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result
def setlist(self, key, new_list):
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
dict.__setitem__(self, key, list(new_list))
def setdefault(self, key, default=None):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default
else:
default = self[key]
return default
def setlistdefault(self, key, default_list=None):
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
default_list = list(default_list or ())
dict.__setitem__(self, key, default_list)
else:
default_list = dict.__getitem__(self, key)
return default_list
def items(self, multi=False):
"""Return an iterator of ``(key, value)`` pairs.
:param multi: If set to `True` the iterator returned will have a pair
for each value of each key. Otherwise it will only
contain pairs for the first value of each key.
"""
for key, values in iteritems(dict, self):
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def lists(self):
"""Return a list of ``(key, values)`` pairs, where values is the list
of all values associated with the key."""
for key, values in iteritems(dict, self):
yield key, list(values)
def keys(self):
return iterkeys(dict, self)
__iter__ = keys
def values(self):
"""Returns an iterator of the first value on every key's value list."""
for values in itervalues(dict, self):
yield values[0]
def listvalues(self):
"""Return an iterator of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
"""
return itervalues(dict, self)
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self)
def deepcopy(self, memo=None):
"""Return a deep copy of this object."""
return self.__class__(deepcopy(self.to_dict(flat=False), memo))
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(iteritems(self))
return dict(self.lists())
def update(self, other_dict):
"""update() extends rather than replaces existing key lists:
>>> a = MultiDict({'x': 1})
>>> b = MultiDict({'x': 2, 'y': 3})
>>> a.update(b)
>>> a
MultiDict([('y', 3), ('x', 1), ('x', 2)])
If the value list for a key in ``other_dict`` is empty, no new values
will be added to the dict and the key will not be created:
>>> x = {'empty_list': []}
>>> y = MultiDict()
>>> y.update(x)
>>> y
MultiDict([])
"""
for key, value in iter_multi_items(other_dict):
MultiDict.add(self, key, value)
def pop(self, key, default=_missing):
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
try:
lst = dict.pop(self, key)
if len(lst) == 0:
raise exceptions.BadRequestKeyError()
return lst[0]
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
def popitem(self):
"""Pop an item from the dict."""
try:
item = dict.popitem(self)
if len(item[1]) == 0:
raise exceptions.BadRequestKeyError()
return (item[0], item[1][0])
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def poplist(self, key):
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return dict.pop(self, key, [])
def popitemlist(self):
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.deepcopy(memo=memo)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True)))
class _omd_bucket(object):
"""Wraps values in the :class:`OrderedMultiDict`. This makes it
possible to keep an order over multiple different keys. It requires
a lot of extra memory and slows down access a lot, but makes it
possible to access elements in O(1) and iterate in O(n).
"""
__slots__ = ('prev', 'key', 'value', 'next')
def __init__(self, omd, key, value):
self.prev = omd._last_bucket
self.key = key
self.value = value
self.next = None
if omd._first_bucket is None:
omd._first_bucket = self
if omd._last_bucket is not None:
omd._last_bucket.next = self
omd._last_bucket = self
def unlink(self, omd):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
if omd._first_bucket is self:
omd._first_bucket = self.next
if omd._last_bucket is self:
omd._last_bucket = self.prev
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class OrderedMultiDict(MultiDict):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
"""
def __init__(self, mapping=None):
dict.__init__(self)
self._first_bucket = self._last_bucket = None
if mapping is not None:
OrderedMultiDict.update(self, mapping)
def __eq__(self, other):
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, OrderedMultiDict):
iter1 = iteritems(self, multi=True)
iter2 = iteritems(other, multi=True)
try:
for k1, v1 in iter1:
k2, v2 = next(iter2)
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
next(iter2)
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in iterlists(self):
if other.getlist(key) != values:
return False
return True
__hash__ = None
def __ne__(self, other):
return not self.__eq__(other)
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def __getstate__(self):
return list(iteritems(self, multi=True))
def __setstate__(self, values):
dict.clear(self)
for key, value in values:
self.add(key, value)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)[0].value
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
self.poplist(key)
self.add(key, value)
def __delitem__(self, key):
self.pop(key)
def keys(self):
return (key for key, value in iteritems(self))
__iter__ = keys
def values(self):
return (value for key, value in iteritems(self))
def items(self, multi=False):
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def lists(self):
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def listvalues(self):
for key, values in iterlists(self):
yield values
def add(self, key, value):
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
def getlist(self, key, type=None):
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except ValueError:
pass
return result
def setlist(self, key, new_list):
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key, default_list=None):
raise TypeError('setlistdefault is unsupported for '
'ordered multi dicts')
def update(self, mapping):
for key, value in iter_multi_items(mapping):
OrderedMultiDict.add(self, key, value)
def poplist(self, key):
buckets = dict.pop(self, key, ())
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
def pop(self, key, default=_missing):
try:
buckets = dict.pop(self, key)
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
def _options_header_vkw(value, kw):
return dump_options_header(value, dict((k.replace('_', '-'), v)
for k, v in kw.items()))
def _unicodify_header_value(value):
if isinstance(value, bytes):
value = value.decode('latin-1')
if not isinstance(value, text_type):
value = text_type(value)
return value
@native_itermethods(['keys', 'values', 'items'])
class Headers(object):
"""An object that stores some headers. It has a dict-like interface
but is ordered and can store the same keys multiple times.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new :class:`Headers` object pass it a list or dict of headers
which are used as default values. This does not reuse the list passed
to the constructor for internal usage.
:param defaults: The list of default values for the :class:`Headers`.
.. versionchanged:: 0.9
This data structure now stores unicode values similar to how the
multi dicts do it. The main difference is that bytes can be set as
well which will automatically be latin1 decoded.
.. versionchanged:: 0.9
The :meth:`linked` function was removed without replacement as it
was an API that does not support the changes to the encoding model.
"""
def __init__(self, defaults=None):
self._list = []
if defaults is not None:
if isinstance(defaults, (list, Headers)):
self._list.extend(defaults)
else:
self.extend(defaults)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
if isinstance(key, integer_types):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
if not isinstance(key, string_types):
raise exceptions.BadRequestKeyError(key)
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
# micro optimization: if we are in get mode we will catch that
# exception one stack level down so we can raise a standard
# key error instead of our special one.
if _get_mode:
raise KeyError()
raise exceptions.BadRequestKeyError(key)
def __eq__(self, other):
return other.__class__ is self.__class__ and \
set(other._list) == set(self._list)
__hash__ = None
def __ne__(self, other):
return not self.__eq__(other)
def get(self, key, default=None, type=None, as_bytes=False):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
If a headers object is bound you must not add unicode strings
because no encoding takes place.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
:param as_bytes: return bytes instead of unicode strings.
"""
try:
rv = self.__getitem__(key, _get_mode=True)
except KeyError:
return default
if as_bytes:
rv = rv.encode('latin1')
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None, as_bytes=False):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just as
:meth:`get` :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
:param as_bytes: return bytes instead of unicode strings.
"""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if as_bytes:
v = v.encode('latin1')
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def items(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def keys(self, lower=False):
for key, _ in iteritems(self, lower):
yield key
def values(self):
for _, value in iteritems(self):
yield value
def extend(self, iterable):
"""Extend the headers with a dict or an iterable yielding keys and
values.
"""
if isinstance(iterable, dict):
for key, value in iteritems(iterable):
if isinstance(value, (tuple, list)):
for v in value:
self.add(key, v)
else:
self.add(key, value)
else:
for key, value in iterable:
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (integer_types, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, integer_types):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _get_mode=True)
except KeyError:
return False
return True
has_key = __contains__
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
self._list.append((_key, _value))
def _validate_value(self, value):
if not isinstance(value, text_type):
raise TypeError('Value should be unicode.')
if u'\n' in value or u'\r' in value:
raise ValueError('Detected newline in header value. This is '
'a potential security problem')
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
if not self._list:
self._list.append((_key, _value))
return
listiter = iter(self._list)
ikey = _key.lower()
for idx, (old_key, old_value) in enumerate(listiter):
if old_key.lower() == ikey:
# replace first ocurrence
self._list[idx] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey]
def setdefault(self, key, value):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key in self:
return self[key]
self.set(key, value)
return value
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, integer_types)):
if isinstance(key, integer_types):
value = [value]
value = [(k, _unicodify_header_value(v)) for (k, v) in value]
[self._validate_value(v) for (k, v) in value]
if isinstance(key, integer_types):
self._list[key] = value[0]
else:
self._list[key] = value
else:
self.set(key, value)
def to_list(self, charset='iso-8859-1'):
"""Convert the headers into a list suitable for WSGI."""
from warnings import warn
warn(DeprecationWarning('Method removed, use to_wsgi_list instead'),
stacklevel=2)
return self.to_wsgi_list()
def to_wsgi_list(self):
"""Convert the headers into a list suitable for WSGI.
The values are byte strings in Python 2 converted to latin1 and unicode
strings in Python 3 for the WSGI server to encode.
:return: list
"""
if PY2:
return [(to_native(k), v.encode('latin1')) for k, v in self]
return list(self)
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_wsgi_list():
strs.append('%s: %s' % (key, value))
strs.append('\r\n')
return '\r\n'.join(strs)
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
list(self)
)
class ImmutableHeadersMixin(object):
"""Makes a :class:`Headers` immutable. We do not mark them as
hashable though since the only usecase for this datastructure
in Werkzeug is a view on a mutable structure.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
set = __setitem__
def add(self, item):
is_immutable(self)
remove = add_header = add
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
def __eq__(self, other):
return self.environ is other.environ
__hash__ = None
def __getitem__(self, key, _get_mode=False):
# _get_mode is a no-op for this class as there is no index but
# used because get() calls it.
key = key.upper().replace('-', '_')
if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
return _unicodify_header_value(self.environ[key])
return _unicodify_header_value(self.environ['HTTP_' + key])
def __len__(self):
# the iter is necessary because otherwise list calls our
# len which would call list again and so forth.
return len(list(iter(self)))
def __iter__(self):
for key, value in iteritems(self.environ):
if key.startswith('HTTP_') and key not in \
('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
yield (key[5:].replace('_', '-').title(),
_unicodify_header_value(value))
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield (key.replace('_', '-').title(),
_unicodify_header_value(value))
def copy(self):
raise TypeError('cannot create %r copies' % self.__class__.__name__)
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol):
return type(self), (self.dicts,)
def __init__(self, dicts=None):
self.dicts = dicts or []
@classmethod
def fromkeys(cls):
raise TypeError('cannot create %r instances by fromkeys' %
cls.__name__)
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise exceptions.BadRequestKeyError(key)
def get(self, key, default=None, type=None):
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except ValueError:
continue
return d[key]
return default
def getlist(self, key, type=None):
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type))
return rv
def _keys_impl(self):
"""This function exists so __len__ can be implemented more efficiently,
saving one list creation from an iterator.
Using this for Python 2's ``dict.keys`` behavior would be useless since
`dict.keys` in Python 2 returns a list, while we have a set here.
"""
rv = set()
for d in self.dicts:
rv.update(iterkeys(d))
return rv
def keys(self):
return iter(self._keys_impl())
__iter__ = keys
def items(self, multi=False):
found = set()
for d in self.dicts:
for key, value in iteritems(d, multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def values(self):
for key, value in iteritems(self):
yield value
def lists(self):
rv = {}
for d in self.dicts:
for key, values in iterlists(d):
rv.setdefault(key, []).extend(values)
return iteritems(rv)
def listvalues(self):
return (x[1] for x in self.lists())
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self.dicts[:])
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first item for each key.
:return: a :class:`dict`
"""
rv = {}
for d in reversed(self.dicts):
rv.update(d.to_dict(flat))
return rv
def __len__(self):
return len(self._keys_impl())
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.dicts)
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
value = file
else:
if isinstance(file, string_types):
if filename is None:
filename = file
file = open(file, 'rb')
if filename and content_type is None:
content_type = mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
value = FileStorage(file, filename, name, content_type)
self.add(name, value)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
"""An immutable :class:`MultiDict`.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return MultiDict(self)
def __copy__(self):
return self
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
"""An immutable :class:`OrderedMultiDict`.
.. versionadded:: 0.6
"""
def _iter_hashitems(self):
return enumerate(iteritems(self, multi=True))
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return OrderedMultiDict(self)
def __copy__(self):
return self
@native_itermethods(['values'])
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = sorted(values, key=lambda x: (x[1], x[0]), reverse=True)
list.__init__(self, values)
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == '*' or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, string_types):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
return '%s([%s])' % (
self.__class__.__name__,
', '.join('(%r, %s)' % (x, y) for x, y in self)
)
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, string_types):
for idx, (item, quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = '%s;q=%s' % (value, quality)
result.append(value)
return ','.join(result)
def __str__(self):
return self.to_header()
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the quality of the client. If two items have the same quality,
the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
best_quality = -1
result = default
for server_item in matches:
for client_item, quality in self:
if quality <= best_quality:
break
if self._value_matches(server_item, client_item) \
and quality > 0:
best_quality = quality
result = server_item
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _value_matches(self, value, item):
def _normalize(x):
x = x.lower()
return x == '*' and ('*', '*') or x.split('/', 1)
# this is from the application which is trusted. to avoid developer
# frustration we actually check these for valid values
if '/' not in value:
raise ValueError('invalid mimetype %r' % value)
value_type, value_subtype = _normalize(value)
if value_type == '*' and value_subtype != '*':
raise ValueError('invalid mimetype %r' % value)
if '/' not in item:
return False
item_type, item_subtype = _normalize(item)
if item_type == '*' and item_subtype != '*':
return False
return (
(item_type == item_subtype == '*' or
value_type == value_subtype == '*') or
(item_type == value_type and (item_subtype == '*' or
value_subtype == '*' or
item_subtype == value_subtype))
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
'text/html' in self or
'application/xhtml+xml' in self or
self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return (
'application/xhtml+xml' in self or
'application/xml' in self
)
@property
def accept_json(self):
"""True if this object accepts JSON."""
return 'application/json' in self
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for languages."""
def _value_matches(self, value, item):
def _normalize(language):
return _locale_delim_re.split(language.lower())
return item == '*' or _normalize(value) == _normalize(item)
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == '*' or _normalize(value) == _normalize(item)
def cache_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass."""
return property(lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
'accessor for %r' % key)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_property('no-cache', '*', None)
no_store = cache_property('no-store', None, bool)
max_age = cache_property('max-age', -1, int)
no_transform = cache_property('no-transform', None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
" ".join(
"%s=%r" % (k, v) for k, v in sorted(self.items())
),
)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_property('max-stale', '*', int)
min_fresh = cache_property('min-fresh', '*', int)
no_transform = cache_property('no-transform', None, None)
only_if_cached = cache_property('only-if-cached', None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_property('public', None, bool)
private = cache_property('private', '*', None)
must_revalidate = cache_property('must-revalidate', None, bool)
proxy_revalidate = cache_property('proxy-revalidate', None, bool)
s_maxage = cache_property('s-maxage', None, None)
# attach cache_property to the _CacheControl as staticmethod
# so that others can reuse it.
_CacheControl.cache_property = staticmethod(cache_property)
class CallbackDict(UpdateDictMixin, dict):
"""A dict that calls a function passed every time something is changed.
The function is passed the dict instance.
"""
def __init__(self, initial=None, on_update=None):
dict.__init__(self, initial or ())
self.on_update = on_update
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
dict.__repr__(self)
)
class HeaderSet(MutableSet):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(self, headers=None, on_update=None):
self._headers = list(headers or ())
self._set = set([x.lower() for x in self._headers])
self.on_update = on_update
def add(self, header):
"""Add a new header to the set."""
self.update((header,))
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self, iterable):
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header):
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
return self.remove(header)
except KeyError:
pass
def find(self, header):
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header):
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self):
"""Clear the set."""
self._set.clear()
del self._headers[:]
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self):
"""Convert the header set into an HTTP header string."""
return ', '.join(map(quote_header_value, self._headers))
def __getitem__(self, idx):
return self._headers[idx]
def __delitem__(self, idx):
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self, idx, value):
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header):
return header.lower() in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._headers)
def __nonzero__(self):
return bool(self._set)
def __str__(self):
return self.to_header()
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self._headers
)
class ETags(Container, Iterable):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
self._strong = frozenset(not star_tag and strong_etags or ())
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags.
It is also possible to use the ``in`` operator.
"""
if self.star_tag:
return True
return etag in self._strong
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
etag, weak = unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return '*'
return ', '.join(
['"%s"' % x for x in self._strong] +
['W/"%s"' % x for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError('either tag or data required, but at least one')
if etag is None:
etag = generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __bool__(self):
return bool(self.star_tag or self._strong or self._weak)
__nonzero__ = __bool__
def __str__(self):
return self.to_header()
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class IfRange(object):
"""Very simple object that represents the `If-Range` header in parsed
form. It will either have neither a etag or date or one of either but
never both.
.. versionadded:: 0.7
"""
def __init__(self, etag=None, date=None):
#: The etag parsed and unquoted. Ranges always operate on strong
#: etags so the weakness information is not necessary.
self.etag = etag
#: The date in parsed format or `None`.
self.date = date
def to_header(self):
"""Converts the object back into an HTTP header."""
if self.date is not None:
return http_date(self.date)
if self.etag is not None:
return quote_etag(self.etag)
return ''
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Range(object):
"""Represents a range header. All the methods are only supporting bytes
as unit. It does store multiple ranges but :meth:`range_for_length` will
only work if only one range is provided.
.. versionadded:: 0.7
"""
def __init__(self, units, ranges):
#: The units of this range. Usually "bytes".
self.units = units
#: A list of ``(begin, end)`` tuples for the range header provided.
#: The ranges are non-inclusive.
self.ranges = ranges
def range_for_length(self, length):
"""If the range is for bytes, the length is not None and there is
exactly one range and it is satisfiable it returns a ``(start, stop)``
tuple, otherwise `None`.
"""
if self.units != 'bytes' or length is None or len(self.ranges) != 1:
return None
start, end = self.ranges[0]
if end is None:
end = length
if start < 0:
start += length
if is_byte_range_valid(start, end, length):
return start, min(end, length)
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append(begin >= 0 and '%s-' % begin or str(begin))
else:
ranges.append('%s-%s' % (begin, end - 1))
return '%s=%s' % (self.units, ','.join(ranges))
def to_content_range_header(self, length):
"""Converts the object into `Content-Range` HTTP header,
based on given length
"""
range_for_length = self.range_for_length(length)
if range_for_length is not None:
return '%s %d-%d/%d' % (self.units,
range_for_length[0],
range_for_length[1] - 1, length)
return None
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class ContentRange(object):
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(self, units, start, stop, length=None, on_update=None):
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self.on_update = on_update
self.set(start, stop, length, units)
def _callback_property(name):
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
if self.on_update is not None:
self.on_update(self)
return property(fget, fset)
#: The units to use, usually "bytes"
units = _callback_property('_units')
#: The start point of the range or `None`.
start = _callback_property('_start')
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop = _callback_property('_stop')
#: The length of the range or `None`.
length = _callback_property('_length')
def set(self, start, stop, length=None, units='bytes'):
"""Simple method to update the ranges."""
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self._units = units
self._start = start
self._stop = stop
self._length = length
if self.on_update is not None:
self.on_update(self)
def unset(self):
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self):
if self.units is None:
return ''
if self.length is None:
length = '*'
else:
length = self.length
if self.start is None:
return '%s */%s' % (self.units, length)
return '%s %s-%s/%s' % (
self.units,
self.start,
self.stop - 1,
length
)
def __nonzero__(self):
return self.units is not None
__bool__ = __nonzero__
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Authorization(ImmutableDictMixin, dict):
"""Represents an `Authorization` header sent by the client. You should
not create this kind of object yourself but use it when it's returned by
the `parse_authorization_header` function.
This object is a dict subclass and can be altered by setting dict items
but it should be considered immutable as it's returned by the client and
not meant for modifications.
.. versionchanged:: 0.5
This object became immutable.
"""
def __init__(self, auth_type, data=None):
dict.__init__(self, data or {})
self.type = auth_type
username = property(lambda x: x.get('username'), doc='''
The username transmitted. This is set for both basic and digest
auth all the time.''')
password = property(lambda x: x.get('password'), doc='''
When the authentication type is basic this is the password
transmitted by the client, else `None`.''')
realm = property(lambda x: x.get('realm'), doc='''
This is the server realm sent back for HTTP digest auth.''')
nonce = property(lambda x: x.get('nonce'), doc='''
The nonce the server sent for digest auth, sent back by the client.
A nonce should be unique for every 401 response for HTTP digest
auth.''')
uri = property(lambda x: x.get('uri'), doc='''
The URI from Request-URI of the Request-Line; duplicated because
proxies are allowed to change the Request-Line in transit. HTTP
digest auth only.''')
nc = property(lambda x: x.get('nc'), doc='''
The nonce count value transmitted by clients if a qop-header is
also transmitted. HTTP digest auth only.''')
cnonce = property(lambda x: x.get('cnonce'), doc='''
If the server sent a qop-header in the ``WWW-Authenticate``
header, the client has to provide this value for HTTP digest auth.
See the RFC for more details.''')
response = property(lambda x: x.get('response'), doc='''
A string of 32 hex digits computed as defined in RFC 2617, which
proves that the user knows a password. Digest auth only.''')
opaque = property(lambda x: x.get('opaque'), doc='''
The opaque header from the server returned unchanged by the client.
It is recommended that this string be base64 or hexadecimal data.
Digest auth only.''')
@property
def qop(self):
"""Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth."""
def on_update(header_set):
if not header_set and 'qop' in self:
del self['qop']
elif header_set:
self['qop'] = header_set.to_header()
return parse_set_header(self.get('qop'), on_update)
class WWWAuthenticate(UpdateDictMixin, dict):
"""Provides simple access to `WWW-Authenticate` headers."""
#: list of keys that require quoting in the generated header
_require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm', 'qop'])
def __init__(self, auth_type=None, values=None, on_update=None):
dict.__init__(self, values or ())
if auth_type:
self['__auth_type__'] = auth_type
self.on_update = on_update
def set_basic(self, realm='authentication required'):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self)
def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
algorithm=None, stale=False):
"""Clear the auth info and enable digest auth."""
d = {
'__auth_type__': 'digest',
'realm': realm,
'nonce': nonce,
'qop': dump_header(qop)
}
if stale:
d['stale'] = 'TRUE'
if opaque is not None:
d['opaque'] = opaque
if algorithm is not None:
d['algorithm'] = algorithm
dict.clear(self)
dict.update(self, d)
if self.on_update:
self.on_update(self)
def to_header(self):
"""Convert the stored values into a WWW-Authenticate header."""
d = dict(self)
auth_type = d.pop('__auth_type__', None) or 'basic'
return '%s %s' % (auth_type.title(), ', '.join([
'%s=%s' % (key, quote_header_value(value,
allow_token=key not in self._require_quoting))
for key, value in iteritems(d)
]))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
def auth_property(name, doc=None):
"""A static helper function for subclasses to add extra authentication
system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
For more information have a look at the sourcecode to see how the
regular properties (:attr:`realm` etc.) are implemented.
"""
def _set_value(self, value):
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self:
del self[name]
elif header_set:
self[name] = header_set.to_header()
return parse_set_header(self.get(name), on_update)
return property(fget, doc=doc)
type = auth_property('__auth_type__', doc='''
The type of the auth mechanism. HTTP currently specifies
`Basic` and `Digest`.''')
realm = auth_property('realm', doc='''
A string to be displayed to users so they know which username and
password to use. This string should contain at least the name of
the host performing the authentication and might additionally
indicate the collection of users who might have access.''')
domain = _set_property('domain', doc='''
A list of URIs that define the protection space. If a URI is an
absolute path, it is relative to the canonical root URL of the
server being accessed.''')
nonce = auth_property('nonce', doc='''
A server-specified data string which should be uniquely generated
each time a 401 response is made. It is recommended that this
string be base64 or hexadecimal data.''')
opaque = auth_property('opaque', doc='''
A string of data, specified by the server, which should be returned
by the client unchanged in the Authorization header of subsequent
requests with URIs in the same protection space. It is recommended
that this string be base64 or hexadecimal data.''')
algorithm = auth_property('algorithm', doc='''
A string indicating a pair of algorithms used to produce the digest
and a checksum. If this is not present it is assumed to be "MD5".
If the algorithm is not understood, the challenge should be ignored
(and a different one used, if there is more than one).''')
qop = _set_property('qop', doc='''
A set of quality-of-privacy directives such as auth and auth-int.''')
def _get_stale(self):
val = self.get('stale')
if val is not None:
return val.lower() == 'true'
def _set_stale(self, value):
if value is None:
self.pop('stale', None)
else:
self['stale'] = value and 'TRUE' or 'FALSE'
stale = property(_get_stale, _set_stale, doc='''
A flag, indicating that the previous request from the client was
rejected because the nonce value was stale.''')
del _get_stale, _set_stale
# make auth_property a staticmethod so that subclasses of
# `WWWAuthenticate` can use it for new properties.
auth_property = staticmethod(auth_property)
del _set_property
class FileStorage(object):
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(self, stream=None, filename=None, name=None,
content_type=None, content_length=None,
headers=None):
self.name = name
self.stream = stream or _empty_stream
# if no filename is provided we can attempt to get the filename
# from the stream object passed. There we have to be careful to
# skip things like <fdopen>, <stderr> etc. Python marks these
# special filenames with angular brackets.
if filename is None:
filename = getattr(stream, 'name', None)
s = make_literal_wrapper(filename)
if filename and filename[0] == s('<') and filename[-1] == s('>'):
filename = None
# On Python 3 we want to make sure the filename is always unicode.
# This might not be if the name attribute is bytes due to the
# file being opened from the bytes API.
if not PY2 and isinstance(filename, bytes):
filename = filename.decode(get_filesystem_encoding(),
'replace')
self.filename = filename
if headers is None:
headers = Headers()
self.headers = headers
if content_type is not None:
headers['Content-Type'] = content_type
if content_length is not None:
headers['Content-Length'] = str(content_length)
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.content_type)
@property
def content_type(self):
"""The content-type sent in the header. Usually not available"""
return self.headers.get('content-type')
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
return int(self.headers.get('content-length') or 0)
@property
def mimetype(self):
"""Like :attr:`content_type`, but without parameters (eg, without
charset, type etc.) and always lowercase. For example if the content
type is ``text/HTML; charset=utf-8`` the mimetype would be
``'text/html'``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[0].lower()
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[1]
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename or open file object the uploaded file
is saved to.
:param buffer_size: the size of the buffer. This works the same as
the `length` parameter of
:func:`shutil.copyfileobj`.
"""
from shutil import copyfileobj
close_dst = False
if isinstance(dst, string_types):
dst = open(dst, 'wb')
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except Exception:
pass
def __nonzero__(self):
return bool(self.filename)
__bool__ = __nonzero__
def __getattr__(self, name):
return getattr(self.stream, name)
def __iter__(self):
return iter(self.stream)
def __repr__(self):
return '<%s: %r (%r)>' % (
self.__class__.__name__,
self.filename,
self.content_type
)
# circular dependencies
from werkzeug.http import dump_options_header, dump_header, generate_etag, \
quote_header_value, parse_set_header, unquote_etag, quote_etag, \
parse_options_header, http_date, is_byte_range_valid
from werkzeug import exceptions
|
shepdelacreme/ansible | refs/heads/devel | lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase.py | 36 | #!/usr/bin/python
#
# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_postgresqldatabase
version_added: "2.5"
short_description: Manage PostgreSQL Database instance.
description:
- Create, update and delete instance of PostgreSQL Database.
options:
resource_group:
description:
- The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
required: True
server_name:
description:
- The name of the server.
required: True
name:
description:
- The name of the database.
required: True
charset:
description:
- The charset of the database. Check PostgreSQL documentation for possible values.
- This is only set on creation, use I(force_update) to recreate a database if the
values don't match.
collation:
description:
- The collation of the database. Check PostgreSQL documentation for possible values.
- This is only set on creation, use I(force_update) to recreate a database if the
values don't match.
force_update:
description:
- When set to C(true), will delete and recreate the existing PostgreSQL database if any
of the properties don't match what is set.
- When set to C(false), no change will occur to the database even if any
of the properties do not match.
type: bool
default: 'no'
state:
description:
- Assert the state of the PostgreSQL database. Use 'present' to create or update a database and 'absent' to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- "Zim Kalinowski (@zikalino)"
'''
EXAMPLES = '''
- name: Create (or update) PostgreSQL Database
azure_rm_postgresqldatabase:
resource_group: TestGroup
server_name: testserver
name: db1
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: /subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/TestGroup/providers/Microsoft.DBforPostgreSQL/servers/testserver/databases/db1
name:
description:
- Resource name.
returned: always
type: str
sample: db1
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMDatabases(AzureRMModuleBase):
"""Configuration class for an Azure RM PostgreSQL Database resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
charset=dict(
type='str'
),
collation=dict(
type='str'
),
force_update=dict(
type='bool',
default=False
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.server_name = None
self.name = None
self.parameters = dict()
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
super(AzureRMDatabases, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
if key == "charset":
self.parameters["charset"] = kwargs[key]
elif key == "collation":
self.parameters["collation"] = kwargs[key]
old_response = None
response = None
self.mgmt_client = self.get_mgmt_svc_client(PostgreSQLManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
old_response = self.get_postgresqldatabase()
if not old_response:
self.log("PostgreSQL Database instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("PostgreSQL Database instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
self.log("Need to check if PostgreSQL Database instance has to be deleted or may be updated")
if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']):
self.to_do = Actions.Update
if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']):
self.to_do = Actions.Update
if self.to_do == Actions.Update:
if self.force_update:
if not self.check_mode:
self.delete_postgresqldatabase()
else:
self.to_do = Actions.NoAction
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the PostgreSQL Database instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_postgresqldatabase()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("PostgreSQL Database instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_postgresqldatabase()
# make sure instance is actually deleted, for some Azure resources, instance is hanging around
# for some time after deletion -- this should be really fixed in Azure
while self.get_postgresqldatabase():
time.sleep(20)
else:
self.log("PostgreSQL Database instance unchanged")
self.results['changed'] = False
response = old_response
if response:
self.results["id"] = response["id"]
self.results["name"] = response["name"]
return self.results
def create_update_postgresqldatabase(self):
'''
Creates or updates PostgreSQL Database with the specified configuration.
:return: deserialized PostgreSQL Database instance state dictionary
'''
self.log("Creating / Updating the PostgreSQL Database instance {0}".format(self.name))
try:
response = self.mgmt_client.databases.create_or_update(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name,
parameters=self.parameters)
if isinstance(response, LROPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the PostgreSQL Database instance.')
self.fail("Error creating the PostgreSQL Database instance: {0}".format(str(exc)))
return response.as_dict()
def delete_postgresqldatabase(self):
'''
Deletes specified PostgreSQL Database instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the PostgreSQL Database instance {0}".format(self.name))
try:
response = self.mgmt_client.databases.delete(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
except CloudError as e:
self.log('Error attempting to delete the PostgreSQL Database instance.')
self.fail("Error deleting the PostgreSQL Database instance: {0}".format(str(e)))
return True
def get_postgresqldatabase(self):
'''
Gets the properties of the specified PostgreSQL Database.
:return: deserialized PostgreSQL Database instance state dictionary
'''
self.log("Checking if the PostgreSQL Database instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.databases.get(resource_group_name=self.resource_group,
server_name=self.server_name,
database_name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("PostgreSQL Database instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the PostgreSQL Database instance.')
if found is True:
return response.as_dict()
return False
def main():
"""Main execution"""
AzureRMDatabases()
if __name__ == '__main__':
main()
|
mgraffg/simplegp | refs/heads/master | SimpleGP/tests/test_gpmae.py | 1 | # Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from SimpleGP import GPMAE
import numpy as np
def test_gpmae():
x = np.linspace(-10, 10, 100)
pol = np.array([0.2, -0.3, 0.2])
X = np.vstack((x**2, x, np.ones(x.shape[0])))
y = (X.T * pol).sum(axis=1)
x = x[:, np.newaxis]
gp = GPMAE.init_cl(verbose=True,
generations=30, seed=0,
max_length=1000).train(x, y)
gp.run()
fit = gp.fitness(gp.get_best())
print fit
assert fit >= -0.7906
|
asimshankar/tensorflow | refs/heads/master | tensorflow/contrib/distribute/python/estimator_integration_test.py | 7 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that show that DistributionStrategy works with canned Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.optimizer_v2 import adagrad
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import test
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator import training
from tensorflow.python.estimator.canned import dnn_linear_combined
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column_lib as feature_column
from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.summary.writer import writer_cache
class DNNLinearCombinedClassifierIntegrationTest(test.TestCase,
parameterized.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def dataset_input_fn(self, x, y, batch_size, shuffle):
def input_fn():
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
if shuffle:
dataset = dataset.shuffle(batch_size)
dataset = dataset.repeat(10).batch(batch_size)
return dataset
return input_fn
@combinations.generate(
combinations.combine(
mode=['graph'],
distribution=[
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus
],
use_train_and_evaluate=[True, False]))
def test_complete_flow_with_mode(self, distribution, use_train_and_evaluate):
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = self.dataset_input_fn(
x={'x': data},
y=data,
batch_size=batch_size // distribution.num_replicas_in_sync,
shuffle=True)
eval_input_fn = self.dataset_input_fn(
x={'x': data},
y=data,
batch_size=batch_size // distribution.num_replicas_in_sync,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))
]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))
]
feature_columns = linear_feature_columns + dnn_feature_columns
estimator = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir,
# TODO(isaprykin): Work around the colocate_with error.
dnn_optimizer=adagrad.AdagradOptimizer(0.001),
linear_optimizer=adagrad.AdagradOptimizer(0.001),
config=run_config.RunConfig(
train_distribute=distribution, eval_distribute=distribution))
num_steps = 10
if use_train_and_evaluate:
scores, _ = training.train_and_evaluate(
estimator,
training.TrainSpec(train_input_fn, max_steps=num_steps),
training.EvalSpec(eval_input_fn))
else:
estimator.train(train_input_fn, steps=num_steps)
scores = estimator.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in estimator.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = estimator.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
if __name__ == '__main__':
test.main()
|
unusedPhD/amoco | refs/heads/release | amoco/arch/arm/v7/formats.py | 5 | # -*- coding: utf-8 -*-
from .env import *
from .utils import *
from amoco.arch.core import Formatter
def mnemo(i):
m = i.mnemonic
if hasattr(i,'setflags') and i.setflags:
m += 'S'
if hasattr(i,'cond') and i.cond!=CONDITION_AL:
m += '.%s'%CONDITION[i.cond][0]
return '%s'%(m.lower()).ljust(12)
def regs(i,limit=None):
ops = i.operands
if limit: ops = ops[:limit]
return ['{0}'.format(r) for r in ops]
def reglist(i,pos=-1):
l = i.operands[pos]
return "{%s}"%(', '.join(['{0}'.format(r) for r in l]))
def deref(i,pos=-2):
assert len(i.operands)>2
base,offset = i.operands[pos], i.operands[pos+1]
sign = '+' if i.add else '-'
if offset._is_cst:
ostr = '#%c%d'%(sign,offset.value)
else:
ostr = sign+str(offset)
if hasattr(i,'wback'):
wb = '!' if i.wback else ''
if i.index:
loc = '[%s, %s]%s'%(base, ostr, wb)
else:
loc = '[%s], %s'%(base, ostr)
else:
loc = '[%s], %s'%(base,ostr)
return [loc]
def label(i,pos=0):
_pc = i.address
if _pc is None: _pc=pc
pcoffset = 4 if internals['isetstate']==0 else 2
_pc = _pc + 2*pcoffset
offset = i.operands[pos]
return '*'+str(_pc+offset)
def setend(i):
endian_specifier = 'BE' if i.set_bigend else 'LE'
return mnemo(i)+endian_specifier
def plx(i):
m = mnemo(i)
base,offset = i.operands[-2], i.operands[-1]
sign = '+' if i.add else '-'
if offset._is_cst:
ostr = '#%c%d'%(sign,offset.value)
else:
ostr = sign+str(offset)
loc = '[%s, %s]'%(base, ostr)
return m+loc
def specreg(i):
spec_reg = "%s_"%apsr
if i.write_nzcvq: spec_reg += 'nzcvq'
if i.write_g: spec_reg += 'g'
return '%s, %s'%(i.operands[0],spec_reg)
format_allregs = [lambda i: ', '.join(regs(i))]
format_default = [mnemo]+format_allregs
format_sreg = format_default
format_label = [mnemo, label]
format_adr = [mnemo, lambda i: '{0}, '.format(i.operands[0]), lambda i: label(i,1)]
format_bits = format_default
format_reglist = [mnemo, (lambda i: ', '.join(regs(i,-1))), reglist]
format_deref = [mnemo, lambda i: ', '.join(regs(i,-2)+deref(i,-2))]
format_plx = [plx]
format_msr = [mnemo, specreg]
format_setend = [setend]
ARM_V7_full_formats = {
'A_default' : format_default,
'A_sreg' : format_sreg,
'A_label' : format_label,
'A_adr' : format_adr,
'A_bits' : format_bits,
'A_reglist' : format_reglist,
'A_deref' : format_deref,
'instr_PLx' : format_plx,
'instr_MSR' : format_msr,
'instr_SETEND' : format_setend,
}
ARM_V7_full = Formatter(ARM_V7_full_formats)
|
MemeticParadigm/TensorFlow | refs/heads/master | tensorflow/python/training/momentum.py | 5 | """Momentum for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import constant_op
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class MomentumOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Momentum algorithm.
@@__init__
"""
def __init__(self, learning_rate, momentum,
use_locking=False, name="Momentum"):
"""Construct a new Momentum optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
momentum: A `Tensor` or a floating point value. The momentum.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Momentum".
"""
super(MomentumOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._momentum = momentum
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, "momentum", self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
self._momentum_tensor = ops.convert_to_tensor(self._momentum,
name="momentum")
def _apply_dense(self, grad, var):
mom = self.get_slot(var, "momentum")
return training_ops.apply_momentum(
var, mom,
self._learning_rate_tensor, grad, self._momentum_tensor,
use_locking=self._use_locking).op
def _apply_sparse(self, grad, var):
mom = self.get_slot(var, "momentum")
return training_ops.sparse_apply_momentum(
var, mom,
self._learning_rate_tensor, grad.values, grad.indices,
self._momentum_tensor, use_locking=self._use_locking).op
|
0k/OpenUpgrade | refs/heads/8.0 | addons/account_bank_statement_extensions/wizard/confirm_statement_line.py | 381 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class confirm_statement_line(osv.osv_memory):
_name = 'confirm.statement.line'
_description = 'Confirm selected statement lines'
def confirm_lines(self, cr, uid, ids, context):
line_ids = context['active_ids']
line_obj = self.pool.get('account.bank.statement.line')
line_obj.write(cr, uid, line_ids, {'state': 'confirm'}, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ryfeus/lambda-packs | refs/heads/master | LightGBM_sklearn_scipy_numpy/source/numpy/matrixlib/defmatrix.py | 10 | from __future__ import division, absolute_import, print_function
__all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
import sys
import ast
import numpy.core.numeric as N
from numpy.core.numeric import concatenate, isscalar, binary_repr, identity, asanyarray
from numpy.core.numerictypes import issubdtype
def _convert_from_string(data):
for char in '[]':
data = data.replace(char, '')
rows = data.split(';')
newdata = []
count = 0
for row in rows:
trow = row.split(',')
newrow = []
for col in trow:
temp = col.split()
newrow.extend(map(ast.literal_eval, temp))
if count == 0:
Ncols = len(newrow)
elif len(newrow) != Ncols:
raise ValueError("Rows not the same size.")
count += 1
newdata.append(newrow)
return newdata
def asmatrix(data, dtype=None):
"""
Interpret the input as a matrix.
Unlike `matrix`, `asmatrix` does not make a copy if the input is already
a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``.
Parameters
----------
data : array_like
Input data.
dtype : data-type
Data-type of the output matrix.
Returns
-------
mat : matrix
`data` interpreted as a matrix.
Examples
--------
>>> x = np.array([[1, 2], [3, 4]])
>>> m = np.asmatrix(x)
>>> x[0,0] = 5
>>> m
matrix([[5, 2],
[3, 4]])
"""
return matrix(data, dtype=dtype, copy=False)
def matrix_power(M, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
Parameters
----------
M : ndarray or matrix object
Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``,
with `m` a positive integer.
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
M**n : ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
If the matrix is not numerically invertible.
See Also
--------
matrix
Provides an equivalent function as the exponentiation operator
(``**``, not ``^``).
Examples
--------
>>> from numpy import linalg as LA
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> LA.matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
matrix([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> LA.matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
M = asanyarray(M)
if M.ndim != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
if not issubdtype(type(n), N.integer):
raise TypeError("exponent must be an integer")
from numpy.linalg import inv
if n==0:
M = M.copy()
M[:] = identity(M.shape[0])
return M
elif n<0:
M = inv(M)
n *= -1
result = M
if n <= 3:
for _ in range(n-1):
result=N.dot(result, M)
return result
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
beta = binary_repr(n)
Z, q, t = M, 0, len(beta)
while beta[t-q-1] == '0':
Z = N.dot(Z, Z)
q += 1
result = Z
for k in range(q+1, t):
Z = N.dot(Z, Z)
if beta[t-k-1] == '1':
result = N.dot(result, Z)
return result
class matrix(N.ndarray):
"""
matrix(data, dtype=None, copy=True)
Returns a matrix from an array-like object, or from a string of data.
A matrix is a specialized 2-D array that retains its 2-D nature
through operations. It has certain special operators, such as ``*``
(matrix multiplication) and ``**`` (matrix power).
Parameters
----------
data : array_like or string
If `data` is a string, it is interpreted as a matrix with commas
or spaces separating columns, and semicolons separating rows.
dtype : data-type
Data-type of the output matrix.
copy : bool
If `data` is already an `ndarray`, then this flag determines
whether the data is copied (the default), or whether a view is
constructed.
See Also
--------
array
Examples
--------
>>> a = np.matrix('1 2; 3 4')
>>> print(a)
[[1 2]
[3 4]]
>>> np.matrix([[1, 2], [3, 4]])
matrix([[1, 2],
[3, 4]])
"""
__array_priority__ = 10.0
def __new__(subtype, data, dtype=None, copy=True):
if isinstance(data, matrix):
dtype2 = data.dtype
if (dtype is None):
dtype = dtype2
if (dtype2 == dtype) and (not copy):
return data
return data.astype(dtype)
if isinstance(data, N.ndarray):
if dtype is None:
intype = data.dtype
else:
intype = N.dtype(dtype)
new = data.view(subtype)
if intype != data.dtype:
return new.astype(intype)
if copy: return new.copy()
else: return new
if isinstance(data, str):
data = _convert_from_string(data)
# now convert data to an array
arr = N.array(data, dtype=dtype, copy=copy)
ndim = arr.ndim
shape = arr.shape
if (ndim > 2):
raise ValueError("matrix must be 2-dimensional")
elif ndim == 0:
shape = (1, 1)
elif ndim == 1:
shape = (1, shape[0])
order = 'C'
if (ndim == 2) and arr.flags.fortran:
order = 'F'
if not (order or arr.flags.contiguous):
arr = arr.copy()
ret = N.ndarray.__new__(subtype, shape, arr.dtype,
buffer=arr,
order=order)
return ret
def __array_finalize__(self, obj):
self._getitem = False
if (isinstance(obj, matrix) and obj._getitem): return
ndim = self.ndim
if (ndim == 2):
return
if (ndim > 2):
newshape = tuple([x for x in self.shape if x > 1])
ndim = len(newshape)
if ndim == 2:
self.shape = newshape
return
elif (ndim > 2):
raise ValueError("shape too large to be a matrix.")
else:
newshape = self.shape
if ndim == 0:
self.shape = (1, 1)
elif ndim == 1:
self.shape = (1, newshape[0])
return
def __getitem__(self, index):
self._getitem = True
try:
out = N.ndarray.__getitem__(self, index)
finally:
self._getitem = False
if not isinstance(out, N.ndarray):
return out
if out.ndim == 0:
return out[()]
if out.ndim == 1:
sh = out.shape[0]
# Determine when we should have a column array
try:
n = len(index)
except Exception:
n = 0
if n > 1 and isscalar(index[1]):
out.shape = (sh, 1)
else:
out.shape = (1, sh)
return out
def __mul__(self, other):
if isinstance(other, (N.ndarray, list, tuple)) :
# This promotes 1-D vectors to row vectors
return N.dot(self, asmatrix(other))
if isscalar(other) or not hasattr(other, '__rmul__') :
return N.dot(self, other)
return NotImplemented
def __rmul__(self, other):
return N.dot(other, self)
def __imul__(self, other):
self[:] = self * other
return self
def __pow__(self, other):
return matrix_power(self, other)
def __ipow__(self, other):
self[:] = self ** other
return self
def __rpow__(self, other):
return NotImplemented
def _align(self, axis):
"""A convenience function for operations that need to preserve axis
orientation.
"""
if axis is None:
return self[0, 0]
elif axis==0:
return self
elif axis==1:
return self.transpose()
else:
raise ValueError("unsupported axis")
def _collapse(self, axis):
"""A convenience function for operations that want to collapse
to a scalar like _align, but are using keepdims=True
"""
if axis is None:
return self[0, 0]
else:
return self
# Necessary because base-class tolist expects dimension
# reduction by x[0]
def tolist(self):
"""
Return the matrix as a (possibly nested) list.
See `ndarray.tolist` for full documentation.
See Also
--------
ndarray.tolist
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.tolist()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
"""
return self.__array__().tolist()
# To preserve orientation of result...
def sum(self, axis=None, dtype=None, out=None):
"""
Returns the sum of the matrix elements, along the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum
Notes
-----
This is the same as `ndarray.sum`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix([[1, 2], [4, 3]])
>>> x.sum()
10
>>> x.sum(axis=1)
matrix([[3],
[7]])
>>> x.sum(axis=1, dtype='float')
matrix([[ 3.],
[ 7.]])
>>> out = np.zeros((1, 2), dtype='float')
>>> x.sum(axis=1, dtype='float', out=out)
matrix([[ 3.],
[ 7.]])
"""
return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis)
# To update docstring from array to matrix...
def squeeze(self, axis=None):
"""
Return a possibly reshaped matrix.
Refer to `numpy.squeeze` for more documentation.
Parameters
----------
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the shape.
If an axis is selected with shape entry greater than one,
an error is raised.
Returns
-------
squeezed : matrix
The matrix, but as a (1, N) matrix if it had shape (N, 1).
See Also
--------
numpy.squeeze : related function
Notes
-----
If `m` has a single column then that column is returned
as the single row of a matrix. Otherwise `m` is returned.
The returned matrix is always either `m` itself or a view into `m`.
Supplying an axis keyword argument will not affect the returned matrix
but it may cause an error to be raised.
Examples
--------
>>> c = np.matrix([[1], [2]])
>>> c
matrix([[1],
[2]])
>>> c.squeeze()
matrix([[1, 2]])
>>> r = c.T
>>> r
matrix([[1, 2]])
>>> r.squeeze()
matrix([[1, 2]])
>>> m = np.matrix([[1, 2], [3, 4]])
>>> m.squeeze()
matrix([[1, 2],
[3, 4]])
"""
return N.ndarray.squeeze(self, axis=axis)
# To update docstring from array to matrix...
def flatten(self, order='C'):
"""
Return a flattened copy of the matrix.
All `N` elements of the matrix are placed into a single row.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order. 'F' means to
flatten in column-major (Fortran-style) order. 'A' means to
flatten in column-major order if `m` is Fortran *contiguous* in
memory, row-major order otherwise. 'K' means to flatten `m` in
the order the elements occur in memory. The default is 'C'.
Returns
-------
y : matrix
A copy of the matrix, flattened to a `(1, N)` matrix where `N`
is the number of elements in the original matrix.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the matrix.
Examples
--------
>>> m = np.matrix([[1,2], [3,4]])
>>> m.flatten()
matrix([[1, 2, 3, 4]])
>>> m.flatten('F')
matrix([[1, 3, 2, 4]])
"""
return N.ndarray.flatten(self, order=order)
def mean(self, axis=None, dtype=None, out=None):
"""
Returns the average of the matrix elements along the given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean
Notes
-----
Same as `ndarray.mean` except that, where that returns an `ndarray`,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.mean()
5.5
>>> x.mean(0)
matrix([[ 4., 5., 6., 7.]])
>>> x.mean(1)
matrix([[ 1.5],
[ 5.5],
[ 9.5]])
"""
return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis)
def std(self, axis=None, dtype=None, out=None, ddof=0):
"""
Return the standard deviation of the array elements along the given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std
Notes
-----
This is the same as `ndarray.std`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.std()
3.4520525295346629
>>> x.std(0)
matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]])
>>> x.std(1)
matrix([[ 1.11803399],
[ 1.11803399],
[ 1.11803399]])
"""
return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def var(self, axis=None, dtype=None, out=None, ddof=0):
"""
Returns the variance of the matrix elements, along the given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var
Notes
-----
This is the same as `ndarray.var`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.var()
11.916666666666666
>>> x.var(0)
matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]])
>>> x.var(1)
matrix([[ 1.25],
[ 1.25],
[ 1.25]])
"""
return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def prod(self, axis=None, dtype=None, out=None):
"""
Return the product of the array elements over the given axis.
Refer to `prod` for full documentation.
See Also
--------
prod, ndarray.prod
Notes
-----
Same as `ndarray.prod`, except, where that returns an `ndarray`, this
returns a `matrix` object instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.prod()
0
>>> x.prod(0)
matrix([[ 0, 45, 120, 231]])
>>> x.prod(1)
matrix([[ 0],
[ 840],
[7920]])
"""
return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis)
def any(self, axis=None, out=None):
"""
Test whether any array element along a given axis evaluates to True.
Refer to `numpy.any` for full documentation.
Parameters
----------
axis : int, optional
Axis along which logical OR is performed
out : ndarray, optional
Output to existing array instead of creating new one, must have
same shape as expected output
Returns
-------
any : bool, ndarray
Returns a single bool if `axis` is ``None``; otherwise,
returns `ndarray`
"""
return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis)
def all(self, axis=None, out=None):
"""
Test whether all matrix elements along a given axis evaluate to True.
Parameters
----------
See `numpy.all` for complete descriptions
See Also
--------
numpy.all
Notes
-----
This is the same as `ndarray.all`, but it returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> y = x[0]; y
matrix([[0, 1, 2, 3]])
>>> (x == y)
matrix([[ True, True, True, True],
[False, False, False, False],
[False, False, False, False]])
>>> (x == y).all()
False
>>> (x == y).all(0)
matrix([[False, False, False, False]])
>>> (x == y).all(1)
matrix([[ True],
[False],
[False]])
"""
return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis)
def max(self, axis=None, out=None):
"""
Return the maximum value along an axis.
Parameters
----------
See `amax` for complete descriptions
See Also
--------
amax, ndarray.max
Notes
-----
This is the same as `ndarray.max`, but returns a `matrix` object
where `ndarray.max` would return an ndarray.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.max()
11
>>> x.max(0)
matrix([[ 8, 9, 10, 11]])
>>> x.max(1)
matrix([[ 3],
[ 7],
[11]])
"""
return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis)
def argmax(self, axis=None, out=None):
"""
Indexes of the maximum values along an axis.
Return the indexes of the first occurrences of the maximum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmax` for complete descriptions
See Also
--------
numpy.argmax
Notes
-----
This is the same as `ndarray.argmax`, but returns a `matrix` object
where `ndarray.argmax` would return an `ndarray`.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.argmax()
11
>>> x.argmax(0)
matrix([[2, 2, 2, 2]])
>>> x.argmax(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmax(self, axis, out)._align(axis)
def min(self, axis=None, out=None):
"""
Return the minimum value along an axis.
Parameters
----------
See `amin` for complete descriptions.
See Also
--------
amin, ndarray.min
Notes
-----
This is the same as `ndarray.min`, but returns a `matrix` object
where `ndarray.min` would return an ndarray.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.min()
-11
>>> x.min(0)
matrix([[ -8, -9, -10, -11]])
>>> x.min(1)
matrix([[ -3],
[ -7],
[-11]])
"""
return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis)
def argmin(self, axis=None, out=None):
"""
Indexes of the minimum values along an axis.
Return the indexes of the first occurrences of the minimum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmin` for complete descriptions.
See Also
--------
numpy.argmin
Notes
-----
This is the same as `ndarray.argmin`, but returns a `matrix` object
where `ndarray.argmin` would return an `ndarray`.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.argmin()
11
>>> x.argmin(0)
matrix([[2, 2, 2, 2]])
>>> x.argmin(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmin(self, axis, out)._align(axis)
def ptp(self, axis=None, out=None):
"""
Peak-to-peak (maximum - minimum) value along the given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp
Notes
-----
Same as `ndarray.ptp`, except, where that would return an `ndarray` object,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.ptp()
11
>>> x.ptp(0)
matrix([[8, 8, 8, 8]])
>>> x.ptp(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.ptp(self, axis, out)._align(axis)
def getI(self):
"""
Returns the (multiplicative) inverse of invertible `self`.
Parameters
----------
None
Returns
-------
ret : matrix object
If `self` is non-singular, `ret` is such that ``ret * self`` ==
``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return
``True``.
Raises
------
numpy.linalg.LinAlgError: Singular matrix
If `self` is singular.
See Also
--------
linalg.inv
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]'); m
matrix([[1, 2],
[3, 4]])
>>> m.getI()
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
>>> m.getI() * m
matrix([[ 1., 0.],
[ 0., 1.]])
"""
M, N = self.shape
if M == N:
from numpy.dual import inv as func
else:
from numpy.dual import pinv as func
return asmatrix(func(self))
def getA(self):
"""
Return `self` as an `ndarray` object.
Equivalent to ``np.asarray(self)``.
Parameters
----------
None
Returns
-------
ret : ndarray
`self` as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA()
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
"""
return self.__array__()
def getA1(self):
"""
Return `self` as a flattened `ndarray`.
Equivalent to ``np.asarray(x).ravel()``
Parameters
----------
None
Returns
-------
ret : ndarray
`self`, 1-D, as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA1()
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
return self.__array__().ravel()
def ravel(self, order='C'):
"""
Return a flattened matrix.
Refer to `numpy.ravel` for more documentation.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
The elements of `m` are read using this index order. 'C' means to
index the elements in C-like order, with the last axis index
changing fastest, back to the first axis index changing slowest.
'F' means to index the elements in Fortran-like index order, with
the first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of the
memory layout of the underlying array, and only refer to the order
of axis indexing. 'A' means to read the elements in Fortran-like
index order if `m` is Fortran *contiguous* in memory, C-like order
otherwise. 'K' means to read the elements in the order they occur
in memory, except for reversing the data when strides are negative.
By default, 'C' index order is used.
Returns
-------
ret : matrix
Return the matrix flattened to shape `(1, N)` where `N`
is the number of elements in the original matrix.
A copy is made only if necessary.
See Also
--------
matrix.flatten : returns a similar output matrix but always a copy
matrix.flat : a flat iterator on the array.
numpy.ravel : related function which returns an ndarray
"""
return N.ndarray.ravel(self, order=order)
def getT(self):
"""
Returns the transpose of the matrix.
Does *not* conjugate! For the complex conjugate transpose, use ``.H``.
Parameters
----------
None
Returns
-------
ret : matrix object
The (non-conjugated) transpose of the matrix.
See Also
--------
transpose, getH
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]')
>>> m
matrix([[1, 2],
[3, 4]])
>>> m.getT()
matrix([[1, 3],
[2, 4]])
"""
return self.transpose()
def getH(self):
"""
Returns the (complex) conjugate transpose of `self`.
Equivalent to ``np.transpose(self)`` if `self` is real-valued.
Parameters
----------
None
Returns
-------
ret : matrix object
complex conjugate transpose of `self`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4)))
>>> z = x - 1j*x; z
matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j],
[ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j],
[ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]])
>>> z.getH()
matrix([[ 0. +0.j, 4. +4.j, 8. +8.j],
[ 1. +1.j, 5. +5.j, 9. +9.j],
[ 2. +2.j, 6. +6.j, 10.+10.j],
[ 3. +3.j, 7. +7.j, 11.+11.j]])
"""
if issubclass(self.dtype.type, N.complexfloating):
return self.transpose().conjugate()
else:
return self.transpose()
T = property(getT, None)
A = property(getA, None)
A1 = property(getA1, None)
H = property(getH, None)
I = property(getI, None)
def _from_string(str, gdict, ldict):
rows = str.split(';')
rowtup = []
for row in rows:
trow = row.split(',')
newrow = []
for x in trow:
newrow.extend(x.split())
trow = newrow
coltup = []
for col in trow:
col = col.strip()
try:
thismat = ldict[col]
except KeyError:
try:
thismat = gdict[col]
except KeyError:
raise KeyError("%s not found" % (col,))
coltup.append(thismat)
rowtup.append(concatenate(coltup, axis=-1))
return concatenate(rowtup, axis=0)
def bmat(obj, ldict=None, gdict=None):
"""
Build a matrix object from a string, nested sequence, or array.
Parameters
----------
obj : str or array_like
Input data. If a string, variables in the current scope may be
referenced by name.
ldict : dict, optional
A dictionary that replaces local operands in current frame.
Ignored if `obj` is not a string or `gdict` is `None`.
gdict : dict, optional
A dictionary that replaces global operands in current frame.
Ignored if `obj` is not a string.
Returns
-------
out : matrix
Returns a matrix object, which is a specialized 2-D array.
See Also
--------
block :
A generalization of this function for N-d arrays, that returns normal
ndarrays.
Examples
--------
>>> A = np.mat('1 1; 1 1')
>>> B = np.mat('2 2; 2 2')
>>> C = np.mat('3 4; 5 6')
>>> D = np.mat('7 8; 9 0')
All the following expressions construct the same block matrix:
>>> np.bmat([[A, B], [C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat('A,B; C,D')
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
"""
if isinstance(obj, str):
if gdict is None:
# get previous frame
frame = sys._getframe().f_back
glob_dict = frame.f_globals
loc_dict = frame.f_locals
else:
glob_dict = gdict
loc_dict = ldict
return matrix(_from_string(obj, glob_dict, loc_dict))
if isinstance(obj, (tuple, list)):
# [[A,B],[C,D]]
arr_rows = []
for row in obj:
if isinstance(row, N.ndarray): # not 2-d
return matrix(concatenate(obj, axis=-1))
else:
arr_rows.append(concatenate(row, axis=-1))
return matrix(concatenate(arr_rows, axis=0))
if isinstance(obj, N.ndarray):
return matrix(obj)
mat = asmatrix
|
OCA/bank-payment | refs/heads/12.0 | account_payment_sale/models/sale_order.py | 1 | # Copyright 2014-2016 Akretion - Alexis de Lattre
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields, api
class SaleOrder(models.Model):
_inherit = "sale.order"
payment_mode_id = fields.Many2one(
'account.payment.mode', string='Payment Mode',
domain=[('payment_type', '=', 'inbound')])
def _get_payment_mode_vals(self, vals):
if self.payment_mode_id:
vals['payment_mode_id'] = self.payment_mode_id.id
if (self.payment_mode_id.bank_account_link == 'fixed' and
self.payment_mode_id.payment_method_id.code == 'manual'):
vals['partner_bank_id'] =\
self.payment_mode_id.fixed_journal_id.bank_account_id.id
return vals
@api.onchange('partner_id')
def onchange_partner_id(self):
res = super().onchange_partner_id()
if self.partner_id:
self.payment_mode_id = self.partner_id.with_context(
force_company=self.company_id.id
).customer_payment_mode_id
else:
self.payment_mode_id = False
return res
@api.multi
def _prepare_invoice(self):
"""Copy bank partner from sale order to invoice"""
vals = super()._prepare_invoice()
return self._get_payment_mode_vals(vals)
def _finalize_invoices(self, invoices, references):
"""
Invoked after creating invoices at the end of action_invoice_create.
We must override this method since the onchange on partner is called by
the base method and therefore will change the specific payment_mode set
on the SO if one is defined on the partner..
:param invoices: {group_key: invoice}
:param references: {invoice: order}
"""
payment_vals_by_invoice = {}
for invoice in invoices.values():
payment_vals_by_invoice[invoice] = {
'payment_mode_id': invoice.payment_mode_id.id,
'partner_bank_id': invoice.partner_bank_id.id
}
res = super()._finalize_invoices(invoices, references)
for invoice in invoices.values():
payment_vals = payment_vals_by_invoice[invoice]
if invoice.payment_mode_id.id == payment_vals['payment_mode_id']:
payment_vals.pop("payment_mode_id")
if invoice.partner_bank_id.id == payment_vals["partner_bank_id"]:
payment_vals.pop("partner_bank_id")
if payment_vals:
invoice.write(payment_vals)
return res
|
jeyraof/python-social-auth | refs/heads/master | social/backends/douban.py | 82 | """
Douban OAuth1 and OAuth2 backends, docs at:
http://psa.matiasaguirre.net/docs/backends/douban.html
"""
from social.backends.oauth import BaseOAuth2, BaseOAuth1
class DoubanOAuth(BaseOAuth1):
"""Douban OAuth authentication backend"""
name = 'douban'
EXTRA_DATA = [('id', 'id')]
AUTHORIZATION_URL = 'http://www.douban.com/service/auth/authorize'
REQUEST_TOKEN_URL = 'http://www.douban.com/service/auth/request_token'
ACCESS_TOKEN_URL = 'http://www.douban.com/service/auth/access_token'
def get_user_id(self, details, response):
return response['db:uid']['$t']
def get_user_details(self, response):
"""Return user details from Douban"""
return {'username': response["db:uid"]["$t"],
'email': ''}
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
return self.get_json('http://api.douban.com/people/%40me?&alt=json',
auth=self.oauth_auth(access_token))
class DoubanOAuth2(BaseOAuth2):
"""Douban OAuth authentication backend"""
name = 'douban-oauth2'
AUTHORIZATION_URL = 'https://www.douban.com/service/auth2/auth'
ACCESS_TOKEN_URL = 'https://www.douban.com/service/auth2/token'
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_STATE = False
EXTRA_DATA = [
('id', 'id'),
('uid', 'username'),
('refresh_token', 'refresh_token'),
]
def get_user_details(self, response):
"""Return user details from Douban"""
fullname, first_name, last_name = self.get_user_names(
response.get('name', '')
)
return {'username': response.get('uid', ''),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name,
'email': ''}
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
return self.get_json(
'https://api.douban.com/v2/user/~me',
headers={'Authorization': 'Bearer {0}'.format(access_token)}
)
|
xfournet/intellij-community | refs/heads/master | python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_0/_pkg0_0_0/_pkg0_0_0_0/_pkg0_0_0_0_0/_mod0_0_0_0_0_1.py | 30 | name0_0_0_0_0_1_0 = None
name0_0_0_0_0_1_1 = None
name0_0_0_0_0_1_2 = None
name0_0_0_0_0_1_3 = None
name0_0_0_0_0_1_4 = None |
CryptoCoderz/INSN | refs/heads/master | share/qt/make_spinner.py | 4415 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
cryptodev35/Shark | refs/heads/master | share/qt/make_spinner.py | 4415 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
|
MarcosCommunity/odoo | refs/heads/marcos-8.0 | addons/l10n_bo/__openerp__.py | 259 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Bolivia Localization Chart Account",
"version": "1.0",
"description": """
Bolivian accounting chart and tax localization.
Plan contable boliviano e impuestos de acuerdo a disposiciones vigentes
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Localization/Account Charts",
"depends": [
"account_chart",
],
"data":[
"account_tax_code.xml",
"l10n_bo_chart.xml",
"account_tax.xml",
"l10n_bo_wizard.xml",
],
"demo_xml": [
],
"data": [
],
"active": False,
"installable": True,
"certificate" : "",
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ESS-LLP/erpnext | refs/heads/develop | erpnext/projects/doctype/project_user/project_user.py | 56 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ProjectUser(Document):
pass
|
ogenstad/ansible | refs/heads/devel | lib/ansible/utils/module_docs_fragments/rackspace.py | 45 | # (c) 2014, Matt Martz <matt@sivel.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard Rackspace only documentation fragment
DOCUMENTATION = """
options:
api_key:
description:
- Rackspace API key, overrides I(credentials).
aliases:
- password
credentials:
description:
- File to find the Rackspace credentials in. Ignored if I(api_key) and
I(username) are provided.
aliases:
- creds_file
env:
description:
- Environment as configured in I(~/.pyrax.cfg),
see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
version_added: 1.5
region:
description:
- Region to create an instance in.
default: DFW
username:
description:
- Rackspace username, overrides I(credentials).
verify_ssl:
description:
- Whether or not to require SSL validation of API endpoints.
version_added: 1.5
requirements:
- "python >= 2.6"
- pyrax
notes:
- The following environment variables can be used, C(RAX_USERNAME),
C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
"""
# Documentation fragment including attributes to enable communication
# of other OpenStack clouds. Not all rax modules support this.
OPENSTACK = """
options:
api_key:
description:
- Rackspace API key, overrides I(credentials).
aliases:
- password
auth_endpoint:
description:
- The URI of the authentication service.
default: https://identity.api.rackspacecloud.com/v2.0/
version_added: 1.5
credentials:
description:
- File to find the Rackspace credentials in. Ignored if I(api_key) and
I(username) are provided.
aliases:
- creds_file
env:
description:
- Environment as configured in I(~/.pyrax.cfg),
see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
version_added: 1.5
identity_type:
description:
- Authentication mechanism to use, such as rackspace or keystone.
default: rackspace
version_added: 1.5
region:
description:
- Region to create an instance in.
default: DFW
tenant_id:
description:
- The tenant ID used for authentication.
version_added: 1.5
tenant_name:
description:
- The tenant name used for authentication.
version_added: 1.5
username:
description:
- Rackspace username, overrides I(credentials).
verify_ssl:
description:
- Whether or not to require SSL validation of API endpoints.
version_added: 1.5
requirements:
- "python >= 2.6"
- pyrax
notes:
- The following environment variables can be used, C(RAX_USERNAME),
C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
"""
|
joequery/django | refs/heads/master | django/template/base.py | 91 | """
This is the Django template system.
How it works:
The Lexer.tokenize() function converts a template string (i.e., a string containing
markup with custom template tags) to tokens, which can be either plain text
(TOKEN_TEXT), variables (TOKEN_VAR) or block statements (TOKEN_BLOCK).
The Parser() class takes a list of tokens in its constructor, and its parse()
method returns a compiled template -- which is, under the hood, a list of
Node objects.
Each Node is responsible for creating some sort of output -- e.g. simple text
(TextNode), variable values in a given context (VariableNode), results of basic
logic (IfNode), results of looping (ForNode), or anything else. The core Node
types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can
define their own custom node types.
Each Node has a render() method, which takes a Context and returns a string of
the rendered node. For example, the render() method of a Variable Node returns
the variable's value as a string. The render() method of a ForNode returns the
rendered output of whatever was inside the loop, recursively.
The Template class is a convenient wrapper that takes care of template
compilation and rendering.
Usage:
The only thing you should ever use directly in this file is the Template class.
Create a compiled template object with a template_string, then call render()
with a context. In the compilation stage, the TemplateSyntaxError exception
will be raised if the template doesn't have proper syntax.
Sample code:
>>> from django import template
>>> s = '<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>'
>>> t = template.Template(s)
(t is now a compiled template, and its render() method can be called multiple
times with multiple contexts)
>>> c = template.Context({'test':True, 'varvalue': 'Hello'})
>>> t.render(c)
'<html><h1>Hello</h1></html>'
>>> c = template.Context({'test':False, 'varvalue': 'Hello'})
>>> t.render(c)
'<html></html>'
"""
from __future__ import unicode_literals
import inspect
import logging
import re
import warnings
from django.template.context import ( # NOQA: imported for backwards compatibility
BaseContext, Context, ContextPopException, RequestContext,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.formats import localize
from django.utils.html import conditional_escape, escape
from django.utils.inspect import getargspec
from django.utils.safestring import (
EscapeData, SafeData, mark_for_escaping, mark_safe,
)
from django.utils.text import (
get_text_list, smart_split, unescape_string_literal,
)
from django.utils.timezone import template_localtime
from django.utils.translation import pgettext_lazy, ugettext_lazy
from .exceptions import TemplateSyntaxError
TOKEN_TEXT = 0
TOKEN_VAR = 1
TOKEN_BLOCK = 2
TOKEN_COMMENT = 3
TOKEN_MAPPING = {
TOKEN_TEXT: 'Text',
TOKEN_VAR: 'Var',
TOKEN_BLOCK: 'Block',
TOKEN_COMMENT: 'Comment',
}
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
TRANSLATOR_COMMENT_MARK = 'Translators'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# match a variable or block tag and capture the entire tag, including start/end
# delimiters
tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
(re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))))
logger = logging.getLogger('django.template')
class TemplateEncodingError(Exception):
pass
@python_2_unicode_compatible
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return self.msg % tuple(force_text(p, errors='replace') for p in self.params)
class Origin(object):
def __init__(self, name, template_name=None, loader=None):
self.name = name
self.template_name = template_name
self.loader = loader
def __str__(self):
return self.name
def __eq__(self, other):
if not isinstance(other, Origin):
return False
return (
self.name == other.name and
self.loader == other.loader
)
@property
def loader_name(self):
if self.loader:
return '%s.%s' % (
self.loader.__module__, self.loader.__class__.__name__,
)
class Template(object):
def __init__(self, template_string, origin=None, name=None, engine=None):
try:
template_string = force_text(template_string)
except UnicodeDecodeError:
raise TemplateEncodingError("Templates can only be constructed "
"from unicode or UTF-8 strings.")
# If Template is instantiated directly rather than from an Engine and
# exactly one Django template engine is configured, use that engine.
# This is required to preserve backwards-compatibility for direct use
# e.g. Template('...').render(Context({...}))
if engine is None:
from .engine import Engine
engine = Engine.get_default()
if origin is None:
origin = Origin(UNKNOWN_SOURCE)
self.name = name
self.origin = origin
self.engine = engine
self.source = template_string
self.nodelist = self.compile_nodelist()
def __iter__(self):
for node in self.nodelist:
for subnode in node:
yield subnode
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
context.render_context.push()
try:
if context.template is None:
with context.bind_template(self):
context.template_name = self.name
return self._render(context)
else:
return self._render(context)
finally:
context.render_context.pop()
def compile_nodelist(self):
"""
Parse and compile the template source into a nodelist. If debug
is True and an exception occurs during parsing, the exception is
is annotated with contextual line information where it occurred in the
template source.
"""
if self.engine.debug:
lexer = DebugLexer(self.source)
else:
lexer = Lexer(self.source)
tokens = lexer.tokenize()
parser = Parser(
tokens, self.engine.template_libraries, self.engine.template_builtins,
)
try:
return parser.parse()
except Exception as e:
if self.engine.debug:
e.template_debug = self.get_exception_info(e, e.token)
raise
def get_exception_info(self, exception, token):
"""
Return a dictionary containing contextual line information of where
the exception occurred in the template. The following information is
provided:
message
The message of the exception raised.
source_lines
The lines before, after, and including the line the exception
occurred on.
line
The line number the exception occurred on.
before, during, after
The line the exception occurred on split into three parts:
1. The content before the token that raised the error.
2. The token that raised the error.
3. The content after the token that raised the error.
total
The number of lines in source_lines.
top
The line number where source_lines starts.
bottom
The line number where source_lines ends.
start
The start position of the token in the template source.
end
The end position of the token in the template source.
"""
start, end = token.position
context_lines = 10
line = 0
upto = 0
source_lines = []
before = during = after = ""
for num, next in enumerate(linebreak_iter(self.source)):
if start >= upto and end <= next:
line = num
before = escape(self.source[upto:start])
during = escape(self.source[start:end])
after = escape(self.source[end:next])
source_lines.append((num, escape(self.source[upto:next])))
upto = next
total = len(source_lines)
top = max(1, line - context_lines)
bottom = min(total, line + 1 + context_lines)
# In some rare cases exc_value.args can be empty or an invalid
# unicode string.
try:
message = force_text(exception.args[0])
except (IndexError, UnicodeDecodeError):
message = '(Could not get exception message)'
return {
'message': message,
'source_lines': source_lines[top:bottom],
'before': before,
'during': during,
'after': after,
'top': top,
'bottom': bottom,
'total': total,
'line': line,
'name': self.origin.name,
'start': start,
'end': end,
}
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class Token(object):
def __init__(self, token_type, contents, position=None, lineno=None):
"""
A token representing a string from the template.
token_type
One of TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, or TOKEN_COMMENT.
contents
The token source string.
position
An optional tuple containing the start and end index of the token
in the template source. This is used for traceback information
when debug is on.
lineno
The line number the token appears on in the template source.
This is used for traceback information and gettext files.
"""
self.token_type, self.contents = token_type, contents
self.lineno = lineno
self.position = position
def __str__(self):
token_name = TOKEN_MAPPING[self.token_type]
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
def split_contents(self):
split = []
bits = iter(smart_split(self.contents))
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith(('_("', "_('")):
sentinal = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinal):
bit = next(bits)
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer(object):
def __init__(self, template_string):
self.template_string = template_string
self.verbatim = False
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
lineno = 1
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, None, lineno, in_tag))
in_tag = not in_tag
lineno += bit.count('\n')
return result
def create_token(self, token_string, position, lineno, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag and token_string.startswith(BLOCK_TAG_START):
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
block_content = token_string[2:-2].strip()
if self.verbatim and block_content == self.verbatim:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
token = Token(TOKEN_VAR, token_string[2:-2].strip(), position, lineno)
elif token_string.startswith(BLOCK_TAG_START):
if block_content[:9] in ('verbatim', 'verbatim '):
self.verbatim = 'end%s' % block_content
token = Token(TOKEN_BLOCK, block_content, position, lineno)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
token = Token(TOKEN_COMMENT, content, position, lineno)
else:
token = Token(TOKEN_TEXT, token_string, position, lineno)
return token
class DebugLexer(Lexer):
def tokenize(self):
"""
Split a template string into tokens and annotates each token with its
start and end position in the source. This is slower than the default
lexer so we only use it when debug is True.
"""
lineno = 1
result = []
upto = 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
token_string = self.template_string[upto:start]
result.append(self.create_token(token_string, (upto, start), lineno, in_tag=False))
lineno += token_string.count('\n')
upto = start
token_string = self.template_string[start:end]
result.append(self.create_token(token_string, (start, end), lineno, in_tag=True))
lineno += token_string.count('\n')
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), lineno, in_tag=False))
return result
class Parser(object):
def __init__(self, tokens, libraries=None, builtins=None):
self.tokens = tokens
self.tags = {}
self.filters = {}
self.command_stack = []
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.libraries = libraries
for builtin in builtins:
self.add_library(builtin)
def parse(self, parse_until=None):
"""
Iterate through the parser tokens and compils each one into a node.
If parse_until is provided, parsing will stop once one of the
specified tokens has been reached. This is formatted as a list of
tokens, e.g. ['elif', 'else', 'endif']. If no matching token is
reached, raise an exception with the unclosed block tag details.
"""
if parse_until is None:
parse_until = []
nodelist = NodeList()
while self.tokens:
token = self.next_token()
# Use the raw values here for TOKEN_* for a tiny performance boost.
if token.token_type == 0: # TOKEN_TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == 1: # TOKEN_VAR
if not token.contents:
raise self.error(token, 'Empty variable tag')
try:
filter_expression = self.compile_filter(token.contents)
except TemplateSyntaxError as e:
raise self.error(token, e)
var_node = VariableNode(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type == 2: # TOKEN_BLOCK
try:
command = token.contents.split()[0]
except IndexError:
raise self.error(token, 'Empty block tag')
if command in parse_until:
# A matching token has been reached. Return control to
# the caller. Put the token back on the token list so the
# caller knows where it terminated.
self.prepend_token(token)
return nodelist
# Add the token to the command stack. This is used for error
# messages if further parsing fails due to an unclosed block
# tag.
self.command_stack.append((command, token))
# Get the tag callback function from the ones registered with
# the parser.
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
# Compile the callback into a node object and add it to
# the node list.
try:
compiled_result = compile_func(self, token)
except Exception as e:
raise self.error(token, e)
self.extend_nodelist(nodelist, compiled_result, token)
# Compile success. Remove the token from the command stack.
self.command_stack.pop()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def extend_nodelist(self, nodelist, node, token):
# Check that non-text nodes don't appear before an extends tag.
if node.must_be_first and nodelist.contains_nontext:
raise self.error(
token, '%r must be the first tag in the template.' % node,
)
if isinstance(nodelist, NodeList) and not isinstance(node, TextNode):
nodelist.contains_nontext = True
# Set token here since we can't modify the node __init__ method
node.token = token
nodelist.append(node)
def error(self, token, e):
"""
Return an exception annotated with the originating token. Since the
parser can be called recursively, check if a token is already set. This
ensures the innermost token is highlighted if an exception occurs,
e.g. a compile error within the body of an if statement.
"""
if not isinstance(e, Exception):
e = TemplateSyntaxError(e)
if not hasattr(e, 'token'):
e.token = token
return e
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(token, "Invalid block tag: '%s', expected %s" %
(command, get_text_list(["'%s'" % p for p in parse_until])))
raise self.error(token, "Invalid block tag: '%s'" % command)
def unclosed_block_tag(self, parse_until):
command, token = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s." % (command, ', '.join(parse_until))
raise self.error(token, msg)
def next_token(self):
return self.tokens.pop(0)
def prepend_token(self, token):
self.tokens.insert(0, token)
def delete_first_token(self):
del self.tokens[0]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open': re.escape("_("),
'i18n_close': re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:\s*%(filter_sep)s\s*
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': "\w\.",
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = re.compile(filter_raw_string, re.UNICODE | re.VERBOSE)
class FilterExpression(object):
"""
Parses a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample::
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: "
"%s|%s|%s" %
(token[:upto], token[upto:start],
token[start:]))
if var_obj is None:
var, constant = match.group("var", "constant")
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at "
"start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match.group("filter_name")
args = []
constant_arg, var_arg = match.group("constant_arg", "var_arg")
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' "
"from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
string_if_invalid = context.template.engine.string_if_invalid
if string_if_invalid:
if '%s' in string_if_invalid:
return string_if_invalid % self.var
else:
return string_if_invalid
else:
obj = string_if_invalid
else:
obj = self.var
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:
arg_vals.append(arg.resolve(context))
if getattr(func, 'expects_localtime', False):
obj = template_localtime(obj, context.use_tz)
if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
elif isinstance(obj, EscapeData):
obj = mark_for_escaping(new_obj)
else:
obj = new_obj
return obj
def args_check(name, func, provided):
provided = list(provided)
# First argument, filter input, is implied.
plen = len(provided) + 1
# Check to see if a decorator is providing the real function.
func = getattr(func, '_decorated_function', func)
args, _, _, defaults = getargspec(func)
alen = len(args)
dlen = len(defaults or [])
# Not enough OR Too many
if plen < (alen - dlen) or plen > alen:
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, alen - dlen, plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
def resolve_variable(path, context):
"""
Returns the resolved variable, which may contain attribute syntax, within
the given context.
Deprecated; use the Variable class instead.
"""
warnings.warn("resolve_variable() is deprecated. Use django.template."
"Variable(path).resolve(context) instead",
RemovedInDjango110Warning, stacklevel=2)
return Variable(path).resolve(context)
class Variable(object):
"""
A template variable, resolvable against a given context. The variable may
be a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':'News'}}
>>> Variable('article.section').resolve(c)
'News'
>>> Variable('article').resolve(c)
{'section': 'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = 'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
if not isinstance(var, six.string_types):
raise TypeError(
"Variable must be a string or number, got %s" % type(var))
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
self.literal = float(var)
# So it's a float... is it an int? If the original value contained a
# dot or an "e" then it was a float, not an int.
if '.' not in var and 'e' not in var.lower():
self.literal = int(self.literal)
# "2." is invalid
if var.endswith('.'):
raise ValueError
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
is_safe = isinstance(value, SafeData)
msgid = value.replace('%', '%%')
msgid = mark_safe(msgid) if is_safe else msgid
if self.message_context:
return pgettext_lazy(self.message_context, msgid)
else:
return ugettext_lazy(msgid)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don't return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError) as e:
# Reraise an AttributeError raised by a @property
if (isinstance(e, AttributeError) and
not isinstance(current, BaseContext) and bit in dir(current)):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = context.template.engine.string_if_invalid
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
inspect.getcallargs(current)
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
except Exception as e:
template_name = getattr(context, 'template_name', 'unknown')
logger.debug('{} - {}'.format(template_name, e))
if getattr(e, 'silent_variable_failure', False):
current = context.template.engine.string_if_invalid
else:
raise
return current
class Node(object):
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
token = None
def render(self, context):
"""
Return the node rendered as a string.
"""
pass
def render_annotated(self, context):
"""
Render the node. If debug is True and an exception occurs during
rendering, the exception is annotated with contextual line information
where it occurred in the template. For internal usage this method is
preferred over using the render method directly.
"""
try:
return self.render(context)
except Exception as e:
if context.template.engine.debug and not hasattr(e, 'template_debug'):
e.template_debug = context.template.get_exception_info(e, self.token)
raise
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
bits = []
for node in self:
if isinstance(node, Node):
bit = node.render_annotated(context)
else:
bit = node
bits.append(force_text(bit))
return mark_safe(''.join(bits))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
class TextNode(Node):
def __init__(self, s):
self.s = s
def __repr__(self):
rep = "<%s: %r>" % (self.__class__.__name__, self.s[:25])
return force_str(rep, 'ascii', errors='replace')
def render(self, context):
return self.s
def render_value_in_context(value, context):
"""
Converts any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a unicode object. If value
is a string, it is expected to have already been translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
value = force_text(value)
if ((context.autoescape and not isinstance(value, SafeData)) or
isinstance(value, EscapeData)):
return conditional_escape(value)
else:
return value
class VariableNode(Node):
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ''
return render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
A utility method for parsing token keyword arguments.
:param bits: A list containing remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments will be removed
from this list.
:param support_legacy: If set to true ``True``, the legacy format
``1 as foo`` will be accepted. Otherwise, only the standard ``foo=1``
format is allowed.
:returns: A dictionary of the arguments retrieved from the ``bits`` token
list.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so the dictionary will be returned as soon as an invalid
argument format is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match.group(1):
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
|
crepererum/invenio | refs/heads/master | invenio/ext/session/interface.py | 13 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Flask :class:`~flask.sessions.SessionInterface` implementation."""
import six
from datetime import timedelta, datetime
from flask import current_app, request
from flask.helpers import locked_cached_property
from flask.sessions import SessionInterface as FlaskSessionInterface
from uuid import uuid4
from werkzeug.exceptions import BadRequest
from werkzeug.utils import import_string
from invenio.utils.serializers import ZlibPickle as Serializer
class SessionInterface(FlaskSessionInterface):
"""Extend :class:`~flask.sessions.SessionInterface` class."""
@locked_cached_property
def has_secure_url(self):
"""Return ``True`` if secure url is configured."""
return current_app.config.get('CFG_SITE_SECURE_URL', '').\
startswith("https://")
@locked_cached_property
def serializer(self):
"""Return serializer class."""
serializer_string = current_app.config.get('SESSION_SERIALIZER',
Serializer)
return import_string(serializer_string)() \
if isinstance(serializer_string, six.string_types) \
else serializer_string()
@locked_cached_property
def session_class(self):
"""Return session class."""
session_class_string = current_app.config.get(
'SESSION_CLASS', 'invenio.ext.session.legacy_session:Session')
return import_string(session_class_string) \
if isinstance(session_class_string, six.string_types) \
else session_class_string
@locked_cached_property
def backend(self):
"""Return session backend."""
storage_string = current_app.config.get(
'SESSION_BACKEND', 'invenio.ext.session.backends.cache:Storage')
return import_string(storage_string)() \
if isinstance(storage_string, six.string_types) \
else storage_string()
def generate_sid(self):
"""Generate unique session identifier."""
sid = uuid4().hex
return sid
def get_session_expiration_time(self, app, session):
"""Return session expiration time."""
if session.permanent:
return app.permanent_session_lifetime
return timedelta(days=1)
def open_session(self, app, request):
"""Return session instance."""
sid = request.cookies.get(app.session_cookie_name) or \
request.args.get('session_id')
if not sid:
sid = self.generate_sid()
return self.session_class(sid=sid)
try:
data = self.backend.get(sid)
if data:
session = self.session_class(self.serializer.loads(data),
sid=sid)
if session.check_ip(request):
return session
except:
current_app.logger.warning(
"Load session error. Returning empty session.",
exc_info=True)
return self.session_class(sid=sid)
def save_session(self, app, session, response):
"""Save current session."""
domain = self.get_cookie_domain(app)
if not session:
current_app.logger.debug("Empty session: " + str(request.url))
return
# response.delete_cookie(app.session_cookie_name,
# domain=domain)
# response.delete_cookie(app.session_cookie_name + 'stub',
# domain=domain)
# return
timeout = self.get_session_expiration_time(app, session)
session_expiry = datetime.utcnow() + timeout
max_age = cookie_expiry = None
uid = session.uid
if uid > -1 and session.permanent:
max_age = app.permanent_session_lifetime
cookie_expiry = session_expiry
sid = session.sid
if session.logging_in:
# # FIXME Do we really need to delete the session after login?
# # The user just logged in, better change the session ID
# sid = self.generate_sid()
# flashes = get_flashed_messages(with_categories=True)
# # And remove the cookie that has been set
# self.backend.delete(session.sid)
# session.clear()
# response.delete_cookie(app.session_cookie_name, domain=domain)
# response.delete_cookie(app.session_cookie_name + 'stub',
# domain=domain)
# session.sid = sid
# session.uid = uid
# # Fixes problem with lost flashes after login.
# map(lambda (cat, msg): flash(msg, cat), flashes)
pass
# Set all user id keys for compatibility.
if len(session.keys()) == 1 and '_id' in session:
session.delete()
return
elif not session.modified:
return
session.uid = uid
session.save_ip(request)
self.backend.set(sid,
self.serializer.dumps(dict(session)),
timeout=timeout)
if not self.has_secure_url:
response.set_cookie(app.session_cookie_name, sid,
expires=cookie_expiry, httponly=True,
domain=domain, max_age=max_age)
elif session.uid > 0:
# User is authenticated, we shall use HTTPS then
if request.scheme == 'https':
response.set_cookie(app.session_cookie_name, sid,
expires=cookie_expiry, httponly=True,
domain=domain, secure=True,
max_age=max_age)
response.set_cookie(app.session_cookie_name + 'stub', 'HTTPS',
expires=cookie_expiry, httponly=True,
domain=domain, max_age=max_age)
else:
raise BadRequest("The user is being authenticated over HTTP "
"rather than HTTPS?")
else:
response.set_cookie(app.session_cookie_name, sid, httponly=True,
domain=domain)
response.set_cookie(app.session_cookie_name + 'stub', 'NO',
httponly=True, domain=domain)
|
DarthMaulware/EquationGroupLeaks | refs/heads/master | Leak #5 - Lost In Translation/windows/Resources/ZBng/PyScripts/Lib/zbng/mca/security/cmd/logonasuser/data/__init__.py | 148 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
pass |
taoger/titanium_mobile | refs/heads/master | support/android/debugger.py | 37 | #!/usr/bin/env python
# This server acts as an intermediary between
# an ADB forwarded Titanium application w/ debugging
# and a local debug server that is listening for a connection
# TODO: this will work for on-device debugging
import asyncore
import socket
import sys
import time
def debug(msg):
print "[DEBUG] "+msg
sys.stdout.flush()
class AsyncStream(asyncore.dispatcher_with_send):
def __init__(self, host, attach_host=None, stream=None, retries=10):
asyncore.dispatcher_with_send.__init__(self)
self.stream = stream
self.host = host
self.attach_host = attach_host
self.retries = retries
self.rcvd_handshake = False
self.create_connection()
def create_connection(self):
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect(self.host)
def attach_local(self):
if self.attach_host != None and self.stream is None:
debug('Attaching to local %s port %d' % (self.attach_host[0], self.attach_host[1]))
self.stream = AsyncStream(self.attach_host, stream=self, retries=0)
def handle_connect(self):
debug("Connected to %s port %d" % (self.host[0], self.host[1]))
def handle_close(self):
self.close()
if self.retries > 0 and not self.rcvd_handshake:
time.sleep(0.5)
self.retries -= 1
debug('retrying debugger connection (#%d)' % self.retries)
self.create_connection()
else:
debug('closed from %s port %d' % (self.host[0], self.host[1]))
self.stream.close()
def handle_read(self):
data = self.recv(4096)
if not data: pass
if data == 'ti.debugger.handshake':
self.rcvd_handshake = True
self.attach_local()
elif data != '':
debug('read data: %s' % data)
self.stream.send(data)
def to_addr(hostport):
tokens = hostport.split(":")
return (tokens[0], int(tokens[1]))
def run(local, remote):
local_addr = to_addr(local)
remote_addr = to_addr(remote)
debug("Connecting to remote %s port %d..." % (remote_addr[0], remote_addr[1]))
AsyncStream(remote_addr, attach_host=local_addr)
asyncore.loop()
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: %s <local host:local port> <remote host:remote port>" % sys.argv[0]
sys.exit(1)
run(sys.argv[1], sys.argv[2]) |
ms-iot/python | refs/heads/develop | cpython/Lib/test/test_crypt.py | 91 | from test import support
import unittest
crypt = support.import_module('crypt')
class CryptTestCase(unittest.TestCase):
def test_crypt(self):
c = crypt.crypt('mypassword', 'ab')
if support.verbose:
print('Test encryption: ', c)
def test_salt(self):
self.assertEqual(len(crypt._saltchars), 64)
for method in crypt.methods:
salt = crypt.mksalt(method)
self.assertEqual(len(salt),
method.salt_chars + (3 if method.ident else 0))
def test_saltedcrypt(self):
for method in crypt.methods:
pw = crypt.crypt('assword', method)
self.assertEqual(len(pw), method.total_size)
pw = crypt.crypt('assword', crypt.mksalt(method))
self.assertEqual(len(pw), method.total_size)
def test_methods(self):
# Gurantee that METHOD_CRYPT is the last method in crypt.methods.
self.assertTrue(len(crypt.methods) >= 1)
self.assertEqual(crypt.METHOD_CRYPT, crypt.methods[-1])
if __name__ == "__main__":
unittest.main()
|
chriscabral/django-lean | refs/heads/master | docs/__init__.py | 887 | # Included so that Django's startproject comment runs against the docs directory
|
roshikouhai/courseEvalRetriever | refs/heads/master | CourseEvalRetriever/retrieveCourseEvalLinks.py | 1 | #TODO: Add functionality to save location, spot to resume download
#TODO: Set up how to download information
#TODO: Set up log file.
#TODO: Upload to online database.
"""
Web Page Scraper for Teacher Evals Page
Only goes through to find the links.
"""
import os, argparse, string
from selenium import webdriver
course_links =[]
browser = webdriver.Firefox()
def set_up_directory(directory):
"""
Place to download all the data
Will change in the future as we determine database
"""
os.chdir(directory)
def clean_links(links):
"""
Remove links that don't link to teacher evals
Add in the extension.
"""
return [webelement.get_attribute('href') for webelement in links[9:-3]]
def get_userpass():
"""
Get Username and Password User entered into Command Line
Returns parser
"""
parser = argparse.ArgumentParser(description='WebLogin to UW')
parser.add_argument('username')
parser.add_argument('password')
return parser.parse_args()
def login(user_input):
"""
Logs into WebLogin UW
Username and Password given work
Pass in Selenium browser to be in the same session
"""
browser.get('https://weblogin.washington.edu')
loginElement = browser.find_element_by_id('weblogin_netid')
loginElement.send_keys(user_input.username)
passwordElement = browser.find_element_by_id('weblogin_password')
passwordElement.send_keys(user_input.password)
passwordElement.submit()
def get_all_course_links(url):
"""
Open a page listing all courses and retrieve all the teacher links
"""
browser.get(url)
return clean_links(browser.find_elements_by_tag_name('a'))
def log_links(links):
"""
Saves all the links to a text file.
"""
with open('CourseEvalLinks.txt', 'w') as f:
for url in links:
f.write(url + '\n')
print('Done saving')
def main():
set_up_directory("C:\\Users\\Leo\\Envs\\courseEvalRetriever\\courseEvalRetriever\\CourseEvalRetriever")
login(get_userpass())
list_of_sites = ['https://www.washington.edu/cec/{0}-toc.html'.format(s) for s in string.ascii_lowercase[:23]]
# Traverse Courses A-W
for url in list_of_sites:
course_links.extend(get_all_course_links(url))
print('Done with {}'.format(url))
log_links(course_links)
browser.quit()
print("Done")
if __name__ == '__main__':
main()
|
elenanst/HPOlib | refs/heads/master | optimizers/smac/smac_2_08_00-master.py | 2 | ##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import glob
import logging
import os
import re
import subprocess
import sys
import time
from HPOlib.optimizer_algorithm import OptimizerAlgorithm
import numpy as np
import HPOlib.wrapping_util as wrapping_uti
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
version_info = ["Algorithm Execution & Abstraction Toolkit ==> v2.08.00-master-766 (85fc099c674a)",
"Random Forest Library ==> v1.05.01-master-106 (7fba58fe4271)",
"SMAC ==> v2.08.00-master-731 (0e43c26c3d1f)"
]
class SMAC(OptimizerAlgorithm):
def __init__(self):
self.optimizer_name = 'SMAC'
self.optimizer_dir = os.path.abspath("./smac_2_08_00-master")
self.logger = logging.getLogger("HPOlib.smac_2_08_00-master")
self.logger.info("optimizer_name:%s" % self.optimizer_name)
self.logger.info("optimizer_dir:%s" % self.optimizer_dir)
def get_algo_exec(self):
return '"python ' + os.path.join(os.path.dirname(__file__),
'SMAC_to_HPOlib.py') + '"'
def check_dependencies(self):
process = subprocess.Popen("which java", stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True, executable="/bin/bash")
stdoutdata, stderrdata = process.communicate()
if stdoutdata is not None and "java" in stdoutdata:
pass
else:
raise Exception("Java cannot not be found. "
"Are you sure that it's installed?\n"
"Your $PATH is: " + os.environ['PATH'])
# Check Java Version
version_str = 'java version "1.7.0_65"'
output = subprocess.check_output(["java", "-version"],
stderr=subprocess.STDOUT)
if version_str not in output:
self.logger.critical("Java version (%s) does not contain %s,"
"you continue at you own risk" % (output, version_str))
def _get_state_run(self, optimizer_dir):
rungroups = glob.glob(optimizer_dir + "/" + "scenario-SMAC*")
if len(rungroups) == 0:
raise Exception("Could not find a rungroup in %s" % optimizer_dir)
if len(rungroups) == 1:
rungroup = rungroups[0]
else:
self.logger.warning("Found multiple rungroups, take the newest one.")
creation_times = []
for i, filename in enumerate(rungroups):
creation_times.append(float(os.path.getctime(filename)))
newest = np.argmax(creation_times)
rungroup = rungroups[newest]
self.logger.info(creation_times, newest, rungroup)
state_runs = glob.glob(rungroup + "/state-run*")
if len(state_runs) != 1:
raise Exception("wrapping.py can only restore runs with only one" +
" state-run. Please delete all others you don't want" +
"to use.")
return state_runs[0]
def build_call(self, config, options, optimizer_dir):
import HPOlib
algo_exec_dir = os.path.dirname(HPOlib.__file__)
call = config.get('SMAC', 'path_to_optimizer') + "/smac"
# Set all general parallel stuff here
call = " ".join([call, '--numRun', str(options.seed),
'--cli-log-all-calls true',
'--cutoffTime', config.get('SMAC', 'cutoff_time'),
# The instance file does interfere with state restoration, it will only
# be loaded if no state is restored (look further down in the code
# '--instanceFile', config.get('SMAC', 'instanceFile'),
'--intraInstanceObj', config.get('SMAC', 'intra_instance_obj'),
'--runObj', config.get('SMAC', 'run_obj'),
# '--testInstanceFile', config.get('SMAC', 'testInstanceFile'),
'--algoExec', self.get_algo_exec(),
'--numIterations', config.get('SMAC', 'num_iterations'),
'--totalNumRunsLimit', config.get('SMAC', 'total_num_runs_limit'),
'--outputDirectory', optimizer_dir,
'--numConcurrentAlgoExecs', config.get('SMAC', 'num_concurrent_algo_execs'),
# '--runGroupName', config.get('SMAC', 'runGroupName'),
'--maxIncumbentRuns', config.get('SMAC', 'max_incumbent_runs'),
'--retryTargetAlgorithmRunCount',
config.get('SMAC', 'retry_target_algorithm_run_count'),
'--intensification-percentage',
config.get('SMAC', 'intensification_percentage'),
'--initial-incumbent', config.get('SMAC', 'initial_incumbent'),
'--rf-split-min', config.get('SMAC', 'rf_split_min'),
'--validation', config.get('SMAC', 'validation'),
'--runtime-limit', config.get('SMAC', 'runtime_limit'),
'--exec-mode', config.get('SMAC', 'exec_mode'),
'--rf-num-trees', config.get('SMAC', 'rf_num_trees'),
'--continous-neighbours', config.get('SMAC', 'continous_neighbours')])
if config.getboolean('SMAC', 'save_runs_every_iteration'):
call = " ".join([call, '--save-runs-every-iteration true'])
else:
call = " ".join([call, '--save-runs-every-iteration false'])
if config.getboolean('SMAC', 'deterministic'):
call = " ".join([call, '--deterministic true'])
if config.getboolean('SMAC', 'adaptive_capping') and \
config.get('SMAC', 'run_obj') == "RUNTIME":
call = " ".join([call, '--adaptiveCapping true'])
if config.getboolean('SMAC', 'rf_full_tree_bootstrap'):
call = " ".join([call, '--rf-full-tree-bootstrap true'])
# This options are set separately, because they depend on the optimizer directory and might cause trouble when
# using a shared model
if config.get('SMAC', 'shared_model') != 'False':
call = " ".join([call, "--shared-model-mode true",
"--shared-model-mode-frequency",
config.get("SMAC", "shared_model_mode_frequency"),
'-p', os.path.join(optimizer_dir, os.path.basename(config.get('SMAC', 'p'))),
'--scenario-file', os.path.join(optimizer_dir, 'scenario.txt')])
else:
call = " ".join([call, '-p', os.path.join(optimizer_dir, os.path.basename(config.get('SMAC', 'p'))),
'--execDir', optimizer_dir,
'--scenario-file', os.path.join(optimizer_dir, 'scenario.txt')])
if options.restore:
raise NotImplementedError("Restoring has not been tested for this SMAC version")
state_run = _get_state_run(optimizer_dir)
restore_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.getcwd(), state_run)
call = " ".join([call, "--restore-scenario", restore_path])
else:
call = " ".join([call, '--instanceFile',
os.path.join(optimizer_dir, 'train.txt'),
'--testInstanceFile',
os.path.join(optimizer_dir, 'test.txt')])
return call
def restore(self, config, optimizer_dir, **kwargs):
"""
Returns the number of restored runs.
"""
############################################################################
# Run SMAC in a manner that it restores the files but then exits
fh = open(optimizer_dir + "smac_restart.out", "w")
smac_cmd = re.sub('python ' + os.path.dirname(os.path.realpath(__file__)) +
"/" + config.get('SMAC', 'algo_exec'), 'pwd',
kwargs['cmd'])
smac_cmd = re.sub('--outputDirectory ' + optimizer_dir, '--outputDirectory '
+ optimizer_dir + "restart_rungroups", smac_cmd)
self.logger.info(smac_cmd)
process = subprocess.Popen(smac_cmd, stdout=fh, stderr=fh, shell=True,
executable="/bin/bash")
self.logger.info("----------------------RUNNING--------------------------------")
ret = process.wait()
fh.close()
self.logger.info("Finished with return code: " + str(ret))
# os.remove("smac_restart.out")
# read smac.out and look how many states are restored
fh = open(optimizer_dir + "smac_restart.out")
prog = re.compile(r"(Restored) ([0-9]{1,100}) (runs)")
restored_runs = 0
for line in fh.readlines():
match = prog.search(line)
if match:
restored_runs = int(match.group(2))
# Find out all rungroups and state-runs
############################################################################
state_run = self._get_state_run(optimizer_dir)
state_run_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.getcwd(), state_run)
state_runs = glob.glob(state_run_path + "/runs_and_results-it*.csv")
state_run_iterations = []
for state_run in state_runs:
match = re.search(r"(runs_and_results-it)([0-9]{1,100})(.csv)",
state_run)
if match:
state_run_iterations.append(float(match.group(2)))
run_and_results_fn = state_runs[np.argmax(state_run_iterations)]
runs_and_results = open(run_and_results_fn)
lines = runs_and_results.readlines()
state_run_iters = len(lines) - 1
runs_and_results.close()
fh.close()
# TODO: Wait for a fix in SMAC
# In SMAC, right now the number of restored iterations is at least one too high
assert state_run_iters == restored_runs - 1, (state_run_iters, restored_runs)
restored_runs = state_run_iters
return restored_runs
# setup directory where experiment will run
def custom_setup(self, config, options, experiment_dir, optimizer_dir):
optimizer_str = os.path.splitext(os.path.basename(__file__))[0]
# Find experiment directory
if options.restore:
if not os.path.exists(options.restore):
raise Exception("The restore directory does not exist")
optimizer_dir = options.restore
elif config.get('SMAC', 'shared_model') != 'False':
optimizer_dir = os.path.join(experiment_dir, optimizer_str + "_sharedModel_" +
config.get('SMAC', 'shared_model'))
# Set up experiment directory
# if not os.path.exists(optimizer_dir):
try:
os.mkdir(optimizer_dir)
# TODO: This can cause huge problems when the files are located
# somewhere else?
space = config.get('SMAC', "p")
abs_space = os.path.abspath(space)
parent_space = os.path.join(experiment_dir, optimizer_str, space)
if os.path.exists(abs_space):
space = abs_space
elif os.path.exists(parent_space):
space = parent_space
else:
raise Exception("SMAC search space not found. Searched at %s and "
"%s" % (abs_space, parent_space))
# if not os.path.exists(os.path.join(optimizer_dir, os.path.basename(space))):
os.symlink(os.path.join(experiment_dir, optimizer_str, space),
os.path.join(optimizer_dir, os.path.basename(space)))
# Copy the smac search space and create the instance information
fh = open(os.path.join(optimizer_dir, 'train.txt'), "w")
for i in range(config.getint('HPOLIB', 'number_cv_folds')):
fh.write(str(i) + "\n")
fh.close()
fh = open(os.path.join(optimizer_dir, 'test.txt'), "w")
for i in range(config.getint('HPOLIB', 'number_cv_folds')):
fh.write(str(i) + "\n")
fh.close()
fh = open(os.path.join(optimizer_dir, "scenario.txt"), "w")
fh.close()
except OSError:
space = config.get('SMAC', "p")
abs_space = os.path.abspath(space)
parent_space = os.path.join(experiment_dir, optimizer_str, space)
ct = 0
all_found = False
while ct < config.getint('SMAC', 'wait_for_shared_model') and not all_found:
time.sleep(1)
ct += 1
# So far we have not not found anything
all_found = None
if not os.path.isdir(optimizer_dir):
all_found = optimizer_dir
continue
if not os.path.exists(os.path.join(optimizer_dir, os.path.basename(space))) and \
not os.path.exists(parent_space):
all_found = parent_space
continue
if not os.path.exists(os.path.join(optimizer_dir, 'train.txt')):
all_found = os.path.join(optimizer_dir, 'train.txt')
continue
if not os.path.exists(os.path.join(optimizer_dir, 'test.txt')):
all_found = os.path.join(optimizer_dir, 'test.txt')
continue
if not os.path.exists(os.path.join(optimizer_dir, "scenario.txt")):
all_found = os.path.join(optimizer_dir, "scenario.txt")
continue
if all_found is not None:
self.logger.critical("Could not find all necessary files..abort. " +
"Experiment directory %s is somehow created, but not complete\n" % optimizer_dir +
"Missing: %s" % all_found)
sys.exit(1)
return optimizer_dir
def manipulate_config(self, config):
if not config.has_option('SMAC', 'cutoff_time'):
print config.get('HPOLIB', 'runsolver_time_limit')
if config.get('HPOLIB', 'runsolver_time_limit'):
config.set('SMAC', 'cutoff_time',
str(config.getint('HPOLIB', 'runsolver_time_limit') + 100))
else:
# SMACs maxint
config.set('SMAC', 'cutoff_time', "2147483647")
if not config.has_option('SMAC', 'total_num_runs_limit'):
config.set('SMAC', 'total_num_runs_limit',
str(config.getint('HPOLIB', 'number_of_jobs') *
config.getint('HPOLIB', 'number_cv_folds')))
if not config.has_option('SMAC', 'num_concurrent_algo_execs'):
config.set('SMAC', 'num_concurrent_algo_execs',
config.get('HPOLIB', 'number_of_concurrent_jobs'))
path_to_optimizer = config.get('SMAC', 'path_to_optimizer')
if not os.path.isabs(path_to_optimizer):
path_to_optimizer = os.path.join(os.path.dirname(os.path.realpath(__file__)), path_to_optimizer)
path_to_optimizer = os.path.normpath(path_to_optimizer)
if not os.path.exists(path_to_optimizer):
self.logger.critical("Path to optimizer not found: %s" % path_to_optimizer)
sys.exit(1)
config.set('SMAC', 'path_to_optimizer', path_to_optimizer)
config.set('SMAC', 'exec_mode', 'SMAC')
shared_model = config.get('SMAC', 'shared_model')
wait_time = config.getint('SMAC', 'wait_for_shared_model')
if shared_model != 'False':
config.getint('SMAC', 'shared_model')
if not os.path.isdir(shared_model):
config.set('SMAC', 'shared_model_scenario_file', os.path.join(shared_model, 'scenario.txt'))
if config.get('HPOLIB', 'temporary_output_directory') != '':
self.logger.critical('Using a temp_out_dir and a shared model is not possible')
sys.exit(1)
return config
|
netsamir/dotfiles | refs/heads/master | files/vim/bundle/YouCompleteMe/third_party/ycmd/cpp/ycm/tests/gmock/gtest/test/gtest_shuffle_test.py | 3023 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
|
markgw/jazzparser | refs/heads/master | src/jazzparser/taggers/segmidi/base.py | 1 | """Base classes for segmidi taggers.
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <mark.granroth-wilding@ed.ac.uk>"
from jazzparser import settings
from jazzparser.taggers.models import ModelTagger
from jazzparser.utils.options import ModuleOption
class SegmidiTagger(ModelTagger):
"""
Base class for segmented MIDI taggers.
Inherits from L{jazzparser.taggers.models.ModelTagger}, so subclasses
should implement the abstract methods of this.
"""
COMPATIBLE_FORMALISMS = [
'music_halfspan',
]
INPUT_TYPES = ['segmidi']
def __init__(self, *args, **kwargs):
super(SegmidiTagger, self).__init__(*args, **kwargs)
def get_signs(self, offset=0):
raise NotImplementedError, "called base SegmidiTagger's get_signs()"
def get_word(self, index):
return "<midi segment %d>" % index
def get_string_input(self):
return [str(i) for i in range(self.input_length)]
|
mdmintz/SeleniumBase | refs/heads/master | examples/custom_settings.py | 2 | """
To override default settings stored in seleniumbase/config/settings.py,
change the values here and add "--settings=custom_settings.py" when running.
"""
# Default timeout values for waiting for page elements to appear.
MINI_TIMEOUT = 2
SMALL_TIMEOUT = 6
LARGE_TIMEOUT = 10
EXTREME_TIMEOUT = 30
# If False, only logs from the most recent test run will be saved.
ARCHIVE_EXISTING_LOGS = False
ARCHIVE_EXISTING_DOWNLOADS = False
# Waiting for Document.readyState to be "Complete" after browser actions.
WAIT_FOR_RSC_ON_PAGE_LOADS = True
WAIT_FOR_RSC_ON_CLICKS = True
WAIT_FOR_ANGULARJS = True
# Changing the default behavior of Demo Mode. Activate with: --demo_mode
DEFAULT_DEMO_MODE_TIMEOUT = 0.5
HIGHLIGHTS = 4
DEFAULT_MESSAGE_DURATION = 2.55
# Disabling the Content Security Policy of the browser by default.
DISABLE_CSP_ON_FIREFOX = True
DISABLE_CSP_ON_CHROME = False
# If True and --proxy=IP_ADDRESS:PORT is invalid, then error immediately.
RAISE_INVALID_PROXY_STRING_EXCEPTION = True
# Default browser resolutions when opening new windows for tests.
# (Headless resolutions take priority, and include all browsers.)
# (Firefox starts maximized by default when running in GUI Mode.)
CHROME_START_WIDTH = 1250
CHROME_START_HEIGHT = 840
HEADLESS_START_WIDTH = 1440
HEADLESS_START_HEIGHT = 1880
# Changing the default behavior of MasterQA Mode.
MASTERQA_DEFAULT_VALIDATION_MESSAGE = "Does the page look good?"
MASTERQA_WAIT_TIME_BEFORE_VERIFY = 0.5
MASTERQA_START_IN_FULL_SCREEN_MODE = False
MASTERQA_MAX_IDLE_TIME_BEFORE_QUIT = 600
# Google Authenticator
# (For 2-factor authentication using a time-based one-time password algorithm)
# (See https://github.com/pyotp/pyotp and https://pypi.org/project/pyotp/ )
# (Also works with Authy and other compatible apps.)
# Usage: "self.get_google_auth_password()" (output based on timestamp)
# Usage with override: "self.get_google_auth_password(totp_key=TOTP_KEY)"
TOTP_KEY = "base32secretABCD"
# MySQL DB Credentials
# (For saving data from tests to a MySQL DB)
# Usage: "--with-db_reporting"
DB_HOST = "127.0.0.1"
DB_PORT = 3306
DB_USERNAME = "root"
DB_PASSWORD = "test"
DB_SCHEMA = "test_db"
# Amazon S3 Bucket Credentials
# (For saving screenshots and other log files from tests)
# (Bucket names are unique across all existing bucket names in Amazon S3)
# Usage: "--with-s3_logging"
S3_LOG_BUCKET = "[S3 BUCKET NAME]"
S3_BUCKET_URL = "https://s3.amazonaws.com/[S3 BUCKET NAME]/"
S3_SELENIUM_ACCESS_KEY = "[S3 ACCESS KEY]"
S3_SELENIUM_SECRET_KEY = "[S3 SECRET KEY]"
# Encryption Settings
# (Used for string/password obfuscation)
# (You should reset the Encryption Key for every clone of SeleniumBase)
ENCRYPTION_KEY = "Pg^.l!8UdJ+Y7dMIe&fl*%!p9@ej]/#tL~3E4%6?"
# These tokens are added to the beginning and end of obfuscated passwords.
# Helps identify which strings/passwords have been obfuscated.
OBFUSCATION_START_TOKEN = "$^*ENCRYPT="
OBFUSCATION_END_TOKEN = "?&#$"
|
UnbDroid/robomagellan | refs/heads/master | Codigos/Raspberry/desenvolvimentoRos/devel/lib/python2.7/dist-packages/visualization_msgs/msg/_MarkerArray.py | 1 | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from visualization_msgs/MarkerArray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import genpy
import geometry_msgs.msg
import visualization_msgs.msg
import std_msgs.msg
class MarkerArray(genpy.Message):
_md5sum = "d155b9ce5188fbaf89745847fd5882d7"
_type = "visualization_msgs/MarkerArray"
_has_header = False #flag to mark the presence of a Header object
_full_text = """Marker[] markers
================================================================================
MSG: visualization_msgs/Marker
# See http://www.ros.org/wiki/rviz/DisplayTypes/Marker and http://www.ros.org/wiki/rviz/Tutorials/Markers%3A%20Basic%20Shapes for more information on using this message with rviz
uint8 ARROW=0
uint8 CUBE=1
uint8 SPHERE=2
uint8 CYLINDER=3
uint8 LINE_STRIP=4
uint8 LINE_LIST=5
uint8 CUBE_LIST=6
uint8 SPHERE_LIST=7
uint8 POINTS=8
uint8 TEXT_VIEW_FACING=9
uint8 MESH_RESOURCE=10
uint8 TRIANGLE_LIST=11
uint8 ADD=0
uint8 MODIFY=0
uint8 DELETE=2
uint8 DELETEALL=3
Header header # header for time/frame information
string ns # Namespace to place this object in... used in conjunction with id to create a unique name for the object
int32 id # object ID useful in conjunction with the namespace for manipulating and deleting the object later
int32 type # Type of object
int32 action # 0 add/modify an object, 1 (deprecated), 2 deletes an object, 3 deletes all objects
geometry_msgs/Pose pose # Pose of the object
geometry_msgs/Vector3 scale # Scale of the object 1,1,1 means default (usually 1 meter square)
std_msgs/ColorRGBA color # Color [0.0-1.0]
duration lifetime # How long the object should last before being automatically deleted. 0 means forever
bool frame_locked # If this marker should be frame-locked, i.e. retransformed into its frame every timestep
#Only used if the type specified has some use for them (eg. POINTS, LINE_STRIP, ...)
geometry_msgs/Point[] points
#Only used if the type specified has some use for them (eg. POINTS, LINE_STRIP, ...)
#number of colors must either be 0 or equal to the number of points
#NOTE: alpha is not yet used
std_msgs/ColorRGBA[] colors
# NOTE: only used for text markers
string text
# NOTE: only used for MESH_RESOURCE markers
string mesh_resource
bool mesh_use_embedded_materials
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
================================================================================
MSG: geometry_msgs/Vector3
# This represents a vector in free space.
# It is only meant to represent a direction. Therefore, it does not
# make sense to apply a translation to it (e.g., when applying a
# generic rigid transformation to a Vector3, tf2 will only apply the
# rotation). If you want your data to be translatable too, use the
# geometry_msgs/Point message instead.
float64 x
float64 y
float64 z
================================================================================
MSG: std_msgs/ColorRGBA
float32 r
float32 g
float32 b
float32 a
"""
__slots__ = ['markers']
_slot_types = ['visualization_msgs/Marker[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
markers
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(MarkerArray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.markers is None:
self.markers = []
else:
self.markers = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.markers)
buff.write(_struct_I.pack(length))
for val1 in self.markers:
_v1 = val1.header
buff.write(_struct_I.pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.ns
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_3i.pack(_x.id, _x.type, _x.action))
_v3 = val1.pose
_v4 = _v3.position
_x = _v4
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v5 = _v3.orientation
_x = _v5
buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
_v6 = val1.scale
_x = _v6
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v7 = val1.color
_x = _v7
buff.write(_struct_4f.pack(_x.r, _x.g, _x.b, _x.a))
_v8 = val1.lifetime
_x = _v8
buff.write(_struct_2i.pack(_x.secs, _x.nsecs))
buff.write(_struct_B.pack(val1.frame_locked))
length = len(val1.points)
buff.write(_struct_I.pack(length))
for val2 in val1.points:
_x = val2
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
length = len(val1.colors)
buff.write(_struct_I.pack(length))
for val2 in val1.colors:
_x = val2
buff.write(_struct_4f.pack(_x.r, _x.g, _x.b, _x.a))
_x = val1.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.mesh_resource
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_B.pack(val1.mesh_use_embedded_materials))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.markers is None:
self.markers = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.markers = []
for i in range(0, length):
val1 = visualization_msgs.msg.Marker()
_v9 = val1.header
start = end
end += 4
(_v9.seq,) = _struct_I.unpack(str[start:end])
_v10 = _v9.stamp
_x = _v10
start = end
end += 8
(_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v9.frame_id = str[start:end].decode('utf-8')
else:
_v9.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.ns = str[start:end].decode('utf-8')
else:
val1.ns = str[start:end]
_x = val1
start = end
end += 12
(_x.id, _x.type, _x.action,) = _struct_3i.unpack(str[start:end])
_v11 = val1.pose
_v12 = _v11.position
_x = _v12
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v13 = _v11.orientation
_x = _v13
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
_v14 = val1.scale
_x = _v14
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v15 = val1.color
_x = _v15
start = end
end += 16
(_x.r, _x.g, _x.b, _x.a,) = _struct_4f.unpack(str[start:end])
_v16 = val1.lifetime
_x = _v16
start = end
end += 8
(_x.secs, _x.nsecs,) = _struct_2i.unpack(str[start:end])
start = end
end += 1
(val1.frame_locked,) = _struct_B.unpack(str[start:end])
val1.frame_locked = bool(val1.frame_locked)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.points = []
for i in range(0, length):
val2 = geometry_msgs.msg.Point()
_x = val2
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
val1.points.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.colors = []
for i in range(0, length):
val2 = std_msgs.msg.ColorRGBA()
_x = val2
start = end
end += 16
(_x.r, _x.g, _x.b, _x.a,) = _struct_4f.unpack(str[start:end])
val1.colors.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.text = str[start:end].decode('utf-8')
else:
val1.text = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.mesh_resource = str[start:end].decode('utf-8')
else:
val1.mesh_resource = str[start:end]
start = end
end += 1
(val1.mesh_use_embedded_materials,) = _struct_B.unpack(str[start:end])
val1.mesh_use_embedded_materials = bool(val1.mesh_use_embedded_materials)
self.markers.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.markers)
buff.write(_struct_I.pack(length))
for val1 in self.markers:
_v17 = val1.header
buff.write(_struct_I.pack(_v17.seq))
_v18 = _v17.stamp
_x = _v18
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v17.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.ns
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_3i.pack(_x.id, _x.type, _x.action))
_v19 = val1.pose
_v20 = _v19.position
_x = _v20
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v21 = _v19.orientation
_x = _v21
buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
_v22 = val1.scale
_x = _v22
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v23 = val1.color
_x = _v23
buff.write(_struct_4f.pack(_x.r, _x.g, _x.b, _x.a))
_v24 = val1.lifetime
_x = _v24
buff.write(_struct_2i.pack(_x.secs, _x.nsecs))
buff.write(_struct_B.pack(val1.frame_locked))
length = len(val1.points)
buff.write(_struct_I.pack(length))
for val2 in val1.points:
_x = val2
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
length = len(val1.colors)
buff.write(_struct_I.pack(length))
for val2 in val1.colors:
_x = val2
buff.write(_struct_4f.pack(_x.r, _x.g, _x.b, _x.a))
_x = val1.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.mesh_resource
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_B.pack(val1.mesh_use_embedded_materials))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.markers is None:
self.markers = None
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.markers = []
for i in range(0, length):
val1 = visualization_msgs.msg.Marker()
_v25 = val1.header
start = end
end += 4
(_v25.seq,) = _struct_I.unpack(str[start:end])
_v26 = _v25.stamp
_x = _v26
start = end
end += 8
(_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v25.frame_id = str[start:end].decode('utf-8')
else:
_v25.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.ns = str[start:end].decode('utf-8')
else:
val1.ns = str[start:end]
_x = val1
start = end
end += 12
(_x.id, _x.type, _x.action,) = _struct_3i.unpack(str[start:end])
_v27 = val1.pose
_v28 = _v27.position
_x = _v28
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v29 = _v27.orientation
_x = _v29
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
_v30 = val1.scale
_x = _v30
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v31 = val1.color
_x = _v31
start = end
end += 16
(_x.r, _x.g, _x.b, _x.a,) = _struct_4f.unpack(str[start:end])
_v32 = val1.lifetime
_x = _v32
start = end
end += 8
(_x.secs, _x.nsecs,) = _struct_2i.unpack(str[start:end])
start = end
end += 1
(val1.frame_locked,) = _struct_B.unpack(str[start:end])
val1.frame_locked = bool(val1.frame_locked)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.points = []
for i in range(0, length):
val2 = geometry_msgs.msg.Point()
_x = val2
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
val1.points.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.colors = []
for i in range(0, length):
val2 = std_msgs.msg.ColorRGBA()
_x = val2
start = end
end += 16
(_x.r, _x.g, _x.b, _x.a,) = _struct_4f.unpack(str[start:end])
val1.colors.append(val2)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.text = str[start:end].decode('utf-8')
else:
val1.text = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.mesh_resource = str[start:end].decode('utf-8')
else:
val1.mesh_resource = str[start:end]
start = end
end += 1
(val1.mesh_use_embedded_materials,) = _struct_B.unpack(str[start:end])
val1.mesh_use_embedded_materials = bool(val1.mesh_use_embedded_materials)
self.markers.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_B = struct.Struct("<B")
_struct_2i = struct.Struct("<2i")
_struct_3i = struct.Struct("<3i")
_struct_4f = struct.Struct("<4f")
_struct_4d = struct.Struct("<4d")
_struct_2I = struct.Struct("<2I")
_struct_3d = struct.Struct("<3d")
|
SOKP/external_chromium_org | refs/heads/sokp-l5.1 | tools/win/split_link/graph_dependencies.py | 145 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import subprocess
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def main():
if len(sys.argv) != 2:
print 'usage: %s <output.html>' % sys.argv[0]
return 1
env = os.environ.copy()
env['GYP_GENERATORS'] = 'dump_dependency_json'
print 'Dumping dependencies...'
popen = subprocess.Popen(
['python', 'build/gyp_chromium'],
shell=True, env=env)
popen.communicate()
if popen.returncode != 0:
return popen.returncode
print 'Finding problems...'
popen = subprocess.Popen(
['python', 'tools/gyp-explain.py', '--dot',
'chrome.gyp:browser#', 'core.gyp:webcore#'],
stdout=subprocess.PIPE,
shell=True)
out, _ = popen.communicate()
if popen.returncode != 0:
return popen.returncode
# Break into pairs to uniq to make graph less of a mess.
print 'Simplifying...'
deduplicated = set()
lines = out.splitlines()[2:-1]
for line in lines:
line = line.strip('\r\n ;')
pairs = line.split(' -> ')
for i in range(len(pairs) - 1):
deduplicated.add('%s -> %s;' % (pairs[i], pairs[i + 1]))
graph = 'strict digraph {\n' + '\n'.join(sorted(deduplicated)) + '\n}'
print 'Writing report to %s...' % sys.argv[1]
path_count = len(out.splitlines())
with open(os.path.join(BASE_DIR, 'viz.js', 'viz.js')) as f:
viz_js = f.read()
with open(sys.argv[1], 'w') as f:
f.write(PREFIX % path_count)
f.write(graph)
f.write(SUFFIX % viz_js)
print 'Done.'
PREFIX = r'''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Undesirable Dependencies</title>
</head>
<body>
<h1>Undesirable Dependencies</h1>
<h2>browser → webcore</h2>
<h3>%d paths</h3>
<script type="text/vnd.graphviz" id="graph">
'''
SUFFIX = r'''
</script>
<script>%s</script>
<div id="output">Rendering...</div>
<script>
setTimeout(function() {
document.getElementById("output").innerHTML =
Viz(document.getElementById("graph").innerHTML, "svg");
}, 1);
</script>
</body>
</html>
'''
if __name__ == '__main__':
sys.exit(main())
|
SUSE/azure-sdk-for-python | refs/heads/master | azure-keyvault/azure/keyvault/models/certificate_operation.py | 4 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CertificateOperation(Model):
"""A certificate operation is returned in case of asynchronous requests.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The certificate id.
:vartype id: str
:param issuer_parameters: Parameters for the issuer of the X509 component
of a certificate.
:type issuer_parameters: :class:`IssuerParameters
<azure.keyvault.models.IssuerParameters>`
:param csr: The certificate signing request (CSR) that is being used in
the certificate operation.
:type csr: bytearray
:param cancellation_requested: Indicates if cancellation was requested on
the certificate operation.
:type cancellation_requested: bool
:param status: Status of the certificate operation.
:type status: str
:param status_details: The status details of the certificate operation.
:type status_details: str
:param error: Error encountered, if any, during the certificate operation.
:type error: :class:`Error <azure.keyvault.models.Error>`
:param target: Location which contains the result of the certificate
operation.
:type target: str
:param request_id: Identifier for the certificate operation.
:type request_id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'issuer_parameters': {'key': 'issuer', 'type': 'IssuerParameters'},
'csr': {'key': 'csr', 'type': 'bytearray'},
'cancellation_requested': {'key': 'cancellation_requested', 'type': 'bool'},
'status': {'key': 'status', 'type': 'str'},
'status_details': {'key': 'status_details', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'target': {'key': 'target', 'type': 'str'},
'request_id': {'key': 'request_id', 'type': 'str'},
}
def __init__(self, issuer_parameters=None, csr=None, cancellation_requested=None, status=None, status_details=None, error=None, target=None, request_id=None):
self.id = None
self.issuer_parameters = issuer_parameters
self.csr = csr
self.cancellation_requested = cancellation_requested
self.status = status
self.status_details = status_details
self.error = error
self.target = target
self.request_id = request_id
|
lshain-android-source/external-chromium_org | refs/heads/master | tools/valgrind/browser_wrapper_win.py | 80 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import re
import sys
import subprocess
# TODO(timurrrr): we may use it on POSIX too to avoid code duplication once we
# support layout_tests, remove Dr. Memory specific code and verify it works
# on a "clean" Mac.
testcase_name = None
for arg in sys.argv:
m = re.match("\-\-test\-name=(.*)", arg)
if m:
assert testcase_name is None
testcase_name = m.groups()[0]
# arg #0 is the path to this python script
cmd_to_run = sys.argv[1:]
# TODO(timurrrr): this is Dr. Memory-specific
# Usually, we pass "-logdir" "foo\bar\spam path" args to Dr. Memory.
# To group reports per UI test, we want to put the reports for each test into a
# separate directory. This code can be simplified when we have
# http://code.google.com/p/drmemory/issues/detail?id=684 fixed.
logdir_idx = cmd_to_run.index("-logdir")
old_logdir = cmd_to_run[logdir_idx + 1]
wrapper_pid = str(os.getpid())
# On Windows, there is a chance of PID collision. We avoid it by appending the
# number of entries in the logdir at the end of wrapper_pid.
# This number is monotonic and we can't have two simultaneously running wrappers
# with the same PID.
wrapper_pid += "_%d" % len(glob.glob(old_logdir + "\\*"))
cmd_to_run[logdir_idx + 1] += "\\testcase.%s.logs" % wrapper_pid
os.makedirs(cmd_to_run[logdir_idx + 1])
if testcase_name:
f = open(old_logdir + "\\testcase.%s.name" % wrapper_pid, "w")
print >>f, testcase_name
f.close()
exit(subprocess.call(cmd_to_run))
|
ASCrookes/django | refs/heads/master | django/conf/locale/bg/formats.py | 619 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' ' # Non-breaking space
# NUMBER_GROUPING =
|
aaannndddyyy/Replicating-DeepMind | refs/heads/master | libraries/cuda-convnet2/python_util/gpumodel.py | 175 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as n
import os
from time import time, asctime, localtime, strftime
from util import *
from data import *
from options import *
from math import ceil, floor, sqrt
from data import DataProvider, dp_types
import sys
import shutil
import platform
from os import linesep as NL
from threading import Thread
import tempfile as tf
class ModelStateException(Exception):
pass
class CheckpointWriter(Thread):
def __init__(self, path, dic):
Thread.__init__(self)
self.path = path
self.dic = dic
def run(self):
save_dir = os.path.dirname(self.path)
save_file = os.path.basename(self.path)
# Write checkpoint to temporary filename
tmpfile = tf.NamedTemporaryFile(dir=os.path.dirname(save_dir), delete=False)
pickle(tmpfile, self.dic) # Also closes tf
# Move it to final filename
os.rename(tmpfile.name, self.path)
# Delete old checkpoints
for f in os.listdir(save_dir):
if f != save_file:
os.remove(os.path.join(save_dir, f))
# GPU Model interface
class IGPUModel:
def __init__(self, model_name, op, load_dic, filename_options=[], dp_params={}):
# these are input parameters
self.model_name = model_name
self.op = op
self.options = op.options
self.load_dic = load_dic
self.filename_options = filename_options
self.dp_params = dp_params
self.device_ids = self.op.get_value('gpu')
self.fill_excused_options()
self.checkpoint_writer = None
#assert self.op.all_values_given()
for o in op.get_options_list():
setattr(self, o.name, o.value)
self.loaded_from_checkpoint = load_dic is not None
# these are things that the model must remember but they're not input parameters
if self.loaded_from_checkpoint:
self.model_state = load_dic["model_state"]
self.save_file = self.options["save_file_override"].value if self.options["save_file_override"].value_given else self.options['load_file'].value
if not os.path.isdir(self.save_file) and os.path.exists(self.save_file):
self.save_file = os.path.dirname(self.save_file)
# print self.options["save_file_override"].value, self.save_file
else:
self.model_state = {}
self.save_file = self.options["save_file_override"].value if self.options["save_file_override"].value_given else os.path.join(self.options['save_path'].value, model_name + "_" + '_'.join(['%s_%s' % (char, self.options[opt].get_str_value()) for opt, char in filename_options]) + '_' + strftime('%Y-%m-%d_%H.%M.%S'))
self.model_state["train_outputs"] = []
self.model_state["test_outputs"] = []
self.model_state["epoch"] = 1
self.model_state["batchnum"] = self.train_batch_range[0]
# print self.save_file
self.init_data_providers()
if load_dic:
self.train_data_provider.advance_batch()
# model state often requries knowledge of data provider, so it's initialized after
try:
self.init_model_state()
except ModelStateException, e:
print e
sys.exit(1)
for var, val in self.model_state.iteritems():
setattr(self, var, val)
self.import_model()
self.init_model_lib()
def import_model(self):
print "========================="
print "Importing %s C++ module" % ('_' + self.model_name)
self.libmodel = __import__('_' + self.model_name)
def fill_excused_options(self):
pass
def init_data_providers(self):
self.dp_params['convnet'] = self
try:
self.test_data_provider = DataProvider.get_instance(self.data_path, self.test_batch_range,
type=self.dp_type, dp_params=self.dp_params, test=True)
self.train_data_provider = DataProvider.get_instance(self.data_path, self.train_batch_range,
self.model_state["epoch"], self.model_state["batchnum"],
type=self.dp_type, dp_params=self.dp_params, test=False)
except DataProviderException, e:
print "Unable to create data provider: %s" % e
self.print_data_providers()
sys.exit()
def init_model_state(self):
pass
def init_model_lib(self):
pass
def start(self):
if self.test_only:
self.test_outputs += [self.get_test_error()]
self.print_test_results()
else:
self.train()
self.cleanup()
if self.force_save:
self.save_state().join()
sys.exit(0)
def train(self):
print "========================="
print "Training %s" % self.model_name
self.op.print_values()
print "========================="
self.print_model_state()
print "Running on CUDA device(s) %s" % ", ".join("%d" % d for d in self.device_ids)
print "Current time: %s" % asctime(localtime())
print "Saving checkpoints to %s" % self.save_file
print "========================="
next_data = self.get_next_batch()
while self.epoch <= self.num_epochs:
data = next_data
self.epoch, self.batchnum = data[0], data[1]
self.print_iteration()
sys.stdout.flush()
compute_time_py = time()
self.start_batch(data)
# load the next batch while the current one is computing
next_data = self.get_next_batch()
batch_output = self.finish_batch()
self.train_outputs += [batch_output]
self.print_train_results()
if self.get_num_batches_done() % self.testing_freq == 0:
self.sync_with_host()
self.test_outputs += [self.get_test_error()]
self.print_test_results()
self.print_test_status()
self.conditional_save()
self.print_elapsed_time(time() - compute_time_py)
def cleanup(self):
if self.checkpoint_writer is not None:
self.checkpoint_writer.join()
self.checkpoint_writer = None
def print_model_state(self):
pass
def get_num_batches_done(self):
return len(self.train_batch_range) * (self.epoch - 1) + self.batchnum - self.train_batch_range[0] + 1
def get_next_batch(self, train=True):
dp = self.train_data_provider
if not train:
dp = self.test_data_provider
return self.parse_batch_data(dp.get_next_batch(), train=train)
def parse_batch_data(self, batch_data, train=True):
return batch_data[0], batch_data[1], batch_data[2]['data']
def start_batch(self, batch_data, train=True):
self.libmodel.startBatch(batch_data[2], not train)
def finish_batch(self):
return self.libmodel.finishBatch()
def print_iteration(self):
print "\t%d.%d..." % (self.epoch, self.batchnum),
def print_elapsed_time(self, compute_time_py):
print "(%.3f sec)" % (compute_time_py)
def print_train_results(self):
batch_error = self.train_outputs[-1][0]
if not (batch_error > 0 and batch_error < 2e20):
print "Crazy train error: %.6f" % batch_error
self.cleanup()
print "Train error: %.6f " % (batch_error),
def print_test_results(self):
batch_error = self.test_outputs[-1][0]
print "%s\t\tTest error: %.6f" % (NL, batch_error),
def print_test_status(self):
status = (len(self.test_outputs) == 1 or self.test_outputs[-1][0] < self.test_outputs[-2][0]) and "ok" or "WORSE"
print status,
def sync_with_host(self):
if self.checkpoint_writer is not None:
self.checkpoint_writer.join()
self.checkpoint_writer = None
self.libmodel.syncWithHost()
def conditional_save(self):
batch_error = self.test_outputs[-1][0]
if batch_error > 0 and batch_error < self.max_test_err:
self.save_state()
else:
print "\tTest error > %g, not saving." % self.max_test_err,
def aggregate_test_outputs(self, test_outputs):
test_error = tuple([sum(t[r] for t in test_outputs) / (1 if self.test_one else len(self.test_batch_range)) for r in range(len(test_outputs[-1]))])
return test_error
def get_test_error(self):
next_data = self.get_next_batch(train=False)
test_outputs = []
while True:
data = next_data
start_time_test = time()
self.start_batch(data, train=False)
load_next = (not self.test_one or self.test_only) and data[1] < self.test_batch_range[-1]
if load_next: # load next batch
next_data = self.get_next_batch(train=False)
test_outputs += [self.finish_batch()]
if self.test_only: # Print the individual batch results for safety
print "batch %d: %s" % (data[1], str(test_outputs[-1])),
self.print_elapsed_time(time() - start_time_test)
if not load_next:
break
sys.stdout.flush()
return self.aggregate_test_outputs(test_outputs)
def set_var(self, var_name, var_val):
setattr(self, var_name, var_val)
self.model_state[var_name] = var_val
return var_val
def get_var(self, var_name):
return self.model_state[var_name]
def has_var(self, var_name):
return var_name in self.model_state
def save_state(self):
for att in self.model_state:
if hasattr(self, att):
self.model_state[att] = getattr(self, att)
dic = {"model_state": self.model_state,
"op": self.op}
checkpoint_file = "%d.%d" % (self.epoch, self.batchnum)
checkpoint_file_full_path = os.path.join(self.save_file, checkpoint_file)
if not os.path.exists(self.save_file):
os.makedirs(self.save_file)
assert self.checkpoint_writer is None
self.checkpoint_writer = CheckpointWriter(checkpoint_file_full_path, dic)
self.checkpoint_writer.start()
print "-------------------------------------------------------"
print "Saved checkpoint to %s" % self.save_file
print "=======================================================",
return self.checkpoint_writer
def get_progress(self):
num_batches_total = self.num_epochs * len(self.train_batch_range)
return min(1.0, max(0.0, float(self.get_num_batches_done()-1) / num_batches_total))
@staticmethod
def load_checkpoint(load_dir):
if os.path.isdir(load_dir):
return unpickle(os.path.join(load_dir, sorted(os.listdir(load_dir), key=alphanum_key)[-1]))
return unpickle(load_dir)
@staticmethod
def get_options_parser():
op = OptionsParser()
op.add_option("load-file", "load_file", StringOptionParser, "Load file", default="", excuses=OptionsParser.EXCUSE_ALL)
op.add_option("save-path", "save_path", StringOptionParser, "Save path", excuses=['save_file_override'])
op.add_option("save-file", "save_file_override", StringOptionParser, "Save file override", excuses=['save_path'])
op.add_option("train-range", "train_batch_range", RangeOptionParser, "Data batch range: training")
op.add_option("test-range", "test_batch_range", RangeOptionParser, "Data batch range: testing")
op.add_option("data-provider", "dp_type", StringOptionParser, "Data provider", default="default")
op.add_option("test-freq", "testing_freq", IntegerOptionParser, "Testing frequency", default=25)
op.add_option("epochs", "num_epochs", IntegerOptionParser, "Number of epochs", default=500)
op.add_option("data-path", "data_path", StringOptionParser, "Data path")
op.add_option("max-test-err", "max_test_err", FloatOptionParser, "Maximum test error for saving")
op.add_option("test-only", "test_only", BooleanOptionParser, "Test and quit?", default=0)
op.add_option("test-one", "test_one", BooleanOptionParser, "Test on one batch at a time?", default=1)
op.add_option("force-save", "force_save", BooleanOptionParser, "Force save before quitting", default=0)
op.add_option("gpu", "gpu", ListOptionParser(IntegerOptionParser), "GPU override")
return op
@staticmethod
def print_data_providers():
print "Available data providers:"
for dp, desc in dp_types.iteritems():
print " %s: %s" % (dp, desc)
@staticmethod
def parse_options(op):
try:
load_dic = None
options = op.parse()
load_location = None
# print options['load_file'].value_given, options['save_file_override'].value_given
# print options['save_file_override'].value
if options['load_file'].value_given:
load_location = options['load_file'].value
elif options['save_file_override'].value_given and os.path.exists(options['save_file_override'].value):
load_location = options['save_file_override'].value
if load_location is not None:
load_dic = IGPUModel.load_checkpoint(load_location)
old_op = load_dic["op"]
old_op.merge_from(op)
op = old_op
op.eval_expr_defaults()
return op, load_dic
except OptionMissingException, e:
print e
op.print_usage()
except OptionException, e:
print e
except UnpickleError, e:
print "Error loading checkpoint:"
print e
sys.exit()
|
nfallen/servo | refs/heads/master | tests/jquery/run_jquery.py | 215 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import re
import subprocess
import sys
import BaseHTTPServer
import SimpleHTTPServer
import SocketServer
import threading
import urlparse
# List of jQuery modules that will be tested.
# TODO(gw): Disabled most of them as something has been
# introduced very recently that causes the resource task
# to panic - and hard fail doesn't exit the servo
# process when this happens.
# See https://github.com/servo/servo/issues/6210 and
# https://github.com/servo/servo/issues/6211
JQUERY_MODULES = [
# "ajax", # panics
# "attributes",
# "callbacks",
# "core", # mozjs crash
# "css",
# "data",
# "deferred",
# "dimensions",
# "effects",
# "event", # panics
# "manipulation", # mozjs crash
# "offset",
# "queue",
"selector",
# "serialize",
# "support",
# "traversing",
# "wrap"
]
# Port to run the HTTP server on for jQuery.
TEST_SERVER_PORT = 8192
# A regex for matching console.log output lines from the test runner.
REGEX_PATTERN = "^\[jQuery test\] \[([0-9]+)/([0-9]+)/([0-9]+)] (.*)"
# The result of a single test group.
class TestResult:
def __init__(self, success, fail, total, text):
self.success = int(success)
self.fail = int(fail)
self.total = int(total)
self.text = text
def __key(self):
return (self.success, self.fail, self.total, self.text)
def __eq__(self, other):
return self.__key() == other.__key()
def __ne__(self, other):
return self.__key() != other.__key()
def __hash__(self):
return hash(self.__key())
def __repr__(self):
return "ok={0} fail={1} total={2}".format(self.success, self.fail, self.total)
# Parse a line, producing a TestResult.
# Throws if unable to parse.
def parse_line_to_result(line):
match = re.match(REGEX_PATTERN, line)
success, fail, total, name = match.groups()
return name, TestResult(success, fail, total, line)
# Parse an entire buffer of lines to a dictionary
# of test results, keyed by the test name.
def parse_string_to_results(buffer):
test_results = {}
lines = buffer.splitlines()
for line in lines:
name, test_result = parse_line_to_result(line)
test_results[name] = test_result
return test_results
# Run servo and print / parse the results for a specific jQuery test module.
def run_servo(servo_exe, module):
url = "http://localhost:{0}/jquery/test/?module={1}".format(TEST_SERVER_PORT, module)
args = [servo_exe, url, "-z", "-f"]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if len(line) == 0:
break
line = line.rstrip()
try:
name, test_result = parse_line_to_result(line)
yield name, test_result
except AttributeError:
pass
# Build the filename for an expected results file.
def module_filename(module):
return 'expected_{0}.txt'.format(module)
# Read an existing set of expected results to compare against.
def read_existing_results(module):
with open(module_filename(module), 'r') as file:
buffer = file.read()
return parse_string_to_results(buffer)
# Write a set of results to file
def write_results(module, results):
with open(module_filename(module), 'w') as file:
for result in test_results.itervalues():
file.write(result.text + '\n')
# Print usage if command line args are incorrect
def print_usage():
print("USAGE: {0} test|update servo_binary jquery_base_dir".format(sys.argv[0]))
# Run a simple HTTP server to serve up the jQuery test suite
def run_http_server():
class ThreadingSimpleServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
allow_reuse_address = True
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
# TODO(gw): HACK copy the fixed version from python
# main repo - due to https://bugs.python.org/issue23112
def send_head(self):
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
parts = urlparse.urlsplit(self.path)
if not parts.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
new_parts = (parts[0], parts[1], parts[2] + '/',
parts[3], parts[4])
new_url = urlparse.urlunsplit(new_parts)
self.send_header("Location", new_url)
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
try:
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
except:
f.close()
raise
def log_message(self, format, *args):
return
server = ThreadingSimpleServer(('', TEST_SERVER_PORT), RequestHandler)
while True:
sys.stdout.flush()
server.handle_request()
if __name__ == '__main__':
if len(sys.argv) == 4:
cmd = sys.argv[1]
servo_exe = sys.argv[2]
base_dir = sys.argv[3]
os.chdir(base_dir)
# Ensure servo binary can be found
if not os.path.isfile(servo_exe):
print("Unable to find {0}. This script expects an existing build of Servo.".format(servo_exe))
sys.exit(1)
# Start the test server
httpd_thread = threading.Thread(target=run_http_server)
httpd_thread.setDaemon(True)
httpd_thread.start()
if cmd == "test":
print("Testing jQuery on Servo!")
test_count = 0
unexpected_count = 0
individual_success = 0
individual_total = 0
# Test each module separately
for module in JQUERY_MODULES:
print("\t{0}".format(module))
prev_test_results = read_existing_results(module)
for name, current_result in run_servo(servo_exe, module):
test_count += 1
individual_success += current_result.success
individual_total += current_result.total
# If this test was in the previous results, compare them.
if name in prev_test_results:
prev_result = prev_test_results[name]
if prev_result == current_result:
print("\t\tOK: {0}".format(name))
else:
unexpected_count += 1
print("\t\tFAIL: {0}: WAS {1} NOW {2}".format(name, prev_result, current_result))
del prev_test_results[name]
else:
# There was a new test that wasn't expected
unexpected_count += 1
print("\t\tNEW: {0}".format(current_result.text))
# Check what's left over, these are tests that were expected but didn't run this time.
for name in prev_test_results:
test_count += 1
unexpected_count += 1
print("\t\tMISSING: {0}".format(prev_test_results[name].text))
print("\tRan {0} test groups. {1} unexpected results.".format(test_count, unexpected_count))
print("\t{0} tests succeeded of {1} ({2:.2f}%)".format(individual_success,
individual_total,
100.0 * individual_success / individual_total))
if unexpected_count > 0:
sys.exit(1)
elif cmd == "update":
print("Updating jQuery expected results")
for module in JQUERY_MODULES:
print("\t{0}".format(module))
test_results = {}
for name, test_result in run_servo(servo_exe, module):
print("\t\t{0} {1}".format(name, test_result))
test_results[name] = test_result
write_results(module, test_results)
else:
print_usage()
else:
print_usage()
|
wemanuel/smry | refs/heads/master | server-auth/ls/google-cloud-sdk/platform/gsutil/gslib/copy_helper.py | 11 | # -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
# Copyright 2011, Nexenta Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for copy functionality."""
from __future__ import absolute_import
import base64
from collections import namedtuple
import csv
import datetime
import errno
import gzip
from hashlib import md5
import json
import logging
import mimetypes
import multiprocessing
import os
import pickle
import random
import re
import shutil
import stat
import subprocess
import tempfile
import textwrap
import time
import traceback
from boto import config
import crcmod
import gslib
from gslib.cloud_api import ArgumentException
from gslib.cloud_api import CloudApi
from gslib.cloud_api import NotFoundException
from gslib.cloud_api import PreconditionException
from gslib.cloud_api import Preconditions
from gslib.cloud_api import ResumableDownloadException
from gslib.cloud_api import ResumableUploadAbortException
from gslib.cloud_api import ResumableUploadException
from gslib.cloud_api import ResumableUploadStartOverException
from gslib.cloud_api_helper import GetDownloadSerializationDict
from gslib.commands.compose import MAX_COMPOSE_ARITY
from gslib.commands.config import DEFAULT_PARALLEL_COMPOSITE_UPLOAD_COMPONENT_SIZE
from gslib.commands.config import DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD
from gslib.cs_api_map import ApiSelector
from gslib.daisy_chain_wrapper import DaisyChainWrapper
from gslib.exception import CommandException
from gslib.exception import HashMismatchException
from gslib.file_part import FilePart
from gslib.hashing_helper import Base64EncodeHash
from gslib.hashing_helper import CalculateB64EncodedMd5FromContents
from gslib.hashing_helper import CalculateHashesFromContents
from gslib.hashing_helper import GetDownloadHashAlgs
from gslib.hashing_helper import GetUploadHashAlgs
from gslib.hashing_helper import HashingFileUploadWrapper
from gslib.parallelism_framework_util import ThreadAndProcessSafeDict
from gslib.parallelism_framework_util import ThreadSafeDict
from gslib.progress_callback import ConstructAnnounceText
from gslib.progress_callback import FileProgressCallbackHandler
from gslib.progress_callback import ProgressCallbackWithBackoff
from gslib.resumable_streaming_upload import ResumableStreamingJsonUploadWrapper
from gslib.storage_url import ContainsWildcard
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.tracker_file import DeleteTrackerFile
from gslib.tracker_file import GetTrackerFilePath
from gslib.tracker_file import RaiseUnwritableTrackerFileException
from gslib.tracker_file import ReadOrCreateDownloadTrackerFile
from gslib.tracker_file import TrackerFileType
from gslib.translation_helper import AddS3MarkerAclToObjectMetadata
from gslib.translation_helper import CopyObjectMetadata
from gslib.translation_helper import DEFAULT_CONTENT_TYPE
from gslib.translation_helper import GenerationFromUrlAndString
from gslib.translation_helper import ObjectMetadataFromHeaders
from gslib.translation_helper import PreconditionsFromHeaders
from gslib.translation_helper import S3MarkerAclFromObjectMetadata
from gslib.util import CreateLock
from gslib.util import DEFAULT_FILE_BUFFER_SIZE
from gslib.util import GetCloudApiInstance
from gslib.util import GetFileSize
from gslib.util import GetJsonResumableChunkSize
from gslib.util import GetMaxRetryDelay
from gslib.util import GetNumRetries
from gslib.util import GetStreamFromFileUrl
from gslib.util import HumanReadableToBytes
from gslib.util import IS_WINDOWS
from gslib.util import IsCloudSubdirPlaceholder
from gslib.util import MakeHumanReadable
from gslib.util import MIN_SIZE_COMPUTE_LOGGING
from gslib.util import MultiprocessingIsAvailable
from gslib.util import ResumableThreshold
from gslib.util import TEN_MIB
from gslib.util import UTF8
from gslib.wildcard_iterator import CreateWildcardIterator
# pylint: disable=g-import-not-at-top
if IS_WINDOWS:
import msvcrt
from ctypes import c_int
from ctypes import c_uint64
from ctypes import c_char_p
from ctypes import c_wchar_p
from ctypes import windll
from ctypes import POINTER
from ctypes import WINFUNCTYPE
from ctypes import WinError
# Declare copy_helper_opts as a global because namedtuple isn't aware of
# assigning to a class member (which breaks pickling done by multiprocessing).
# For details see
# http://stackoverflow.com/questions/16377215/how-to-pickle-a-namedtuple-instance-correctly
# Similarly can't pickle logger.
# pylint: disable=global-at-module-level
global global_copy_helper_opts, global_logger
# In-memory map of local files that are currently opened for write. Used to
# ensure that if we write to the same file twice (say, for example, because the
# user specified two identical source URLs), the writes occur serially.
global open_files_map
open_files_map = (
ThreadSafeDict() if (IS_WINDOWS or not MultiprocessingIsAvailable()[0])
else ThreadAndProcessSafeDict(multiprocessing.Manager()))
# For debugging purposes; if True, files and objects that fail hash validation
# will be saved with the below suffix appended.
_RENAME_ON_HASH_MISMATCH = False
_RENAME_ON_HASH_MISMATCH_SUFFIX = '_corrupt'
PARALLEL_UPLOAD_TEMP_NAMESPACE = (
u'/gsutil/tmp/parallel_composite_uploads/for_details_see/gsutil_help_cp/')
PARALLEL_UPLOAD_STATIC_SALT = u"""
PARALLEL_UPLOAD_SALT_TO_PREVENT_COLLISIONS.
The theory is that no user will have prepended this to the front of
one of their object names and then done an MD5 hash of the name, and
then prepended PARALLEL_UPLOAD_TEMP_NAMESPACE to the front of their object
name. Note that there will be no problems with object name length since we
hash the original name.
"""
# When uploading a file, get the following fields in the response for
# filling in command output and manifests.
UPLOAD_RETURN_FIELDS = ['crc32c', 'etag', 'generation', 'md5Hash', 'size']
# This tuple is used only to encapsulate the arguments needed for
# command.Apply() in the parallel composite upload case.
# Note that content_type is used instead of a full apitools Object() because
# apitools objects are not picklable.
# filename: String name of file.
# file_start: start byte of file (may be in the middle of a file for partitioned
# files).
# file_length: length of upload (may not be the entire length of a file for
# partitioned files).
# src_url: FileUrl describing the source file.
# dst_url: CloudUrl describing the destination component file.
# canned_acl: canned_acl to apply to the uploaded file/component.
# content_type: content-type for final object, used for setting content-type
# of components and final object.
# tracker_file: tracker file for this component.
# tracker_file_lock: tracker file lock for tracker file(s).
PerformParallelUploadFileToObjectArgs = namedtuple(
'PerformParallelUploadFileToObjectArgs',
'filename file_start file_length src_url dst_url canned_acl '
'content_type tracker_file tracker_file_lock')
ObjectFromTracker = namedtuple('ObjectFromTracker',
'object_name generation')
# TODO: Refactor this file to be less cumbersome. In particular, some of the
# different paths (e.g., uploading a file to an object vs. downloading an
# object to a file) could be split into separate files.
# Chunk size to use while zipping/unzipping gzip files.
GZIP_CHUNK_SIZE = 8192
PARALLEL_COMPOSITE_SUGGESTION_THRESHOLD = 150 * 1024 * 1024
# S3 requires special Multipart upload logic (that we currently don't implement)
# for files > 5GiB in size.
S3_MAX_UPLOAD_SIZE = 5 * 1024 * 1024 * 1024
suggested_parallel_composites = False
class FileConcurrencySkipError(Exception):
"""Raised when skipping a file due to a concurrent, duplicate copy."""
def _RmExceptionHandler(cls, e):
"""Simple exception handler to allow post-completion status."""
cls.logger.error(str(e))
def _ParallelUploadCopyExceptionHandler(cls, e):
"""Simple exception handler to allow post-completion status."""
cls.logger.error(str(e))
cls.op_failure_count += 1
cls.logger.debug('\n\nEncountered exception while copying:\n%s\n',
traceback.format_exc())
def _PerformParallelUploadFileToObject(cls, args, thread_state=None):
"""Function argument to Apply for performing parallel composite uploads.
Args:
cls: Calling Command class.
args: PerformParallelUploadFileToObjectArgs tuple describing the target.
thread_state: gsutil Cloud API instance to use for the operation.
Returns:
StorageUrl representing a successfully uploaded component.
"""
fp = FilePart(args.filename, args.file_start, args.file_length)
gsutil_api = GetCloudApiInstance(cls, thread_state=thread_state)
with fp:
# We take many precautions with the component names that make collisions
# effectively impossible. Specifying preconditions will just allow us to
# reach a state in which uploads will always fail on retries.
preconditions = None
# Fill in content type if one was provided.
dst_object_metadata = apitools_messages.Object(
name=args.dst_url.object_name,
bucket=args.dst_url.bucket_name,
contentType=args.content_type)
try:
if global_copy_helper_opts.canned_acl:
# No canned ACL support in JSON, force XML API to be used for
# upload/copy operations.
orig_prefer_api = gsutil_api.prefer_api
gsutil_api.prefer_api = ApiSelector.XML
ret = _UploadFileToObject(args.src_url, fp, args.file_length,
args.dst_url, dst_object_metadata,
preconditions, gsutil_api, cls.logger, cls,
_ParallelUploadCopyExceptionHandler,
gzip_exts=None, allow_splitting=False)
finally:
if global_copy_helper_opts.canned_acl:
gsutil_api.prefer_api = orig_prefer_api
component = ret[2]
_AppendComponentTrackerToParallelUploadTrackerFile(
args.tracker_file, component, args.tracker_file_lock)
return ret
CopyHelperOpts = namedtuple('CopyHelperOpts', [
'perform_mv',
'no_clobber',
'daisy_chain',
'read_args_from_stdin',
'print_ver',
'use_manifest',
'preserve_acl',
'canned_acl',
'skip_unsupported_objects',
'test_callback_file'])
# pylint: disable=global-variable-undefined
def CreateCopyHelperOpts(perform_mv=False, no_clobber=False, daisy_chain=False,
read_args_from_stdin=False, print_ver=False,
use_manifest=False, preserve_acl=False,
canned_acl=None, skip_unsupported_objects=False,
test_callback_file=None):
"""Creates CopyHelperOpts for passing options to CopyHelper."""
# We create a tuple with union of options needed by CopyHelper and any
# copy-related functionality in CpCommand, RsyncCommand, or Command class.
global global_copy_helper_opts
global_copy_helper_opts = CopyHelperOpts(
perform_mv=perform_mv,
no_clobber=no_clobber,
daisy_chain=daisy_chain,
read_args_from_stdin=read_args_from_stdin,
print_ver=print_ver,
use_manifest=use_manifest,
preserve_acl=preserve_acl,
canned_acl=canned_acl,
skip_unsupported_objects=skip_unsupported_objects,
test_callback_file=test_callback_file)
return global_copy_helper_opts
# pylint: disable=global-variable-undefined
# pylint: disable=global-variable-not-assigned
def GetCopyHelperOpts():
"""Returns namedtuple holding CopyHelper options."""
global global_copy_helper_opts
return global_copy_helper_opts
def _SelectDownloadStrategy(dst_url):
"""Get download strategy based on the destination object.
Args:
dst_url: Destination StorageUrl.
Returns:
gsutil Cloud API DownloadStrategy.
"""
dst_is_special = False
if dst_url.IsFileUrl():
# Check explicitly first because os.stat doesn't work on 'nul' in Windows.
if dst_url.object_name == os.devnull:
dst_is_special = True
try:
mode = os.stat(dst_url.object_name).st_mode
if stat.S_ISCHR(mode):
dst_is_special = True
except OSError:
pass
if dst_is_special:
return CloudApi.DownloadStrategy.ONE_SHOT
else:
return CloudApi.DownloadStrategy.RESUMABLE
def _GetUploadTrackerData(tracker_file_name, logger):
"""Reads tracker data from an upload tracker file if it exists.
Args:
tracker_file_name: Tracker file name for this upload.
logger: for outputting log messages.
Returns:
Serialization data if the tracker file already exists (resume existing
upload), None otherwise.
"""
tracker_file = None
# If we already have a matching tracker file, get the serialization data
# so that we can resume the upload.
try:
tracker_file = open(tracker_file_name, 'r')
tracker_data = tracker_file.read()
return tracker_data
except IOError as e:
# Ignore non-existent file (happens first time a upload is attempted on an
# object, or when re-starting an upload after a
# ResumableUploadStartOverException), but warn user for other errors.
if e.errno != errno.ENOENT:
logger.warn('Couldn\'t read upload tracker file (%s): %s. Restarting '
'upload from scratch.', tracker_file_name, e.strerror)
finally:
if tracker_file:
tracker_file.close()
def InsistDstUrlNamesContainer(exp_dst_url, have_existing_dst_container,
command_name):
"""Ensures the destination URL names a container.
Acceptable containers include directory, bucket, bucket
subdir, and non-existent bucket subdir.
Args:
exp_dst_url: Wildcard-expanded destination StorageUrl.
have_existing_dst_container: bool indicator of whether exp_dst_url
names a container (directory, bucket, or existing bucket subdir).
command_name: Name of command making call. May not be the same as the
calling class's self.command_name in the case of commands implemented
atop other commands (like mv command).
Raises:
CommandException: if the URL being checked does not name a container.
"""
if ((exp_dst_url.IsFileUrl() and not exp_dst_url.IsDirectory()) or
(exp_dst_url.IsCloudUrl() and exp_dst_url.IsBucket()
and not have_existing_dst_container)):
raise CommandException('Destination URL must name a directory, bucket, '
'or bucket\nsubdirectory for the multiple '
'source form of the %s command.' % command_name)
def _ShouldTreatDstUrlAsBucketSubDir(have_multiple_srcs, dst_url,
have_existing_dest_subdir,
src_url_names_container,
recursion_requested):
"""Checks whether dst_url should be treated as a bucket "sub-directory".
The decision about whether something constitutes a bucket "sub-directory"
depends on whether there are multiple sources in this request and whether
there is an existing bucket subdirectory. For example, when running the
command:
gsutil cp file gs://bucket/abc
if there's no existing gs://bucket/abc bucket subdirectory we should copy
file to the object gs://bucket/abc. In contrast, if
there's an existing gs://bucket/abc bucket subdirectory we should copy
file to gs://bucket/abc/file. And regardless of whether gs://bucket/abc
exists, when running the command:
gsutil cp file1 file2 gs://bucket/abc
we should copy file1 to gs://bucket/abc/file1 (and similarly for file2).
Finally, for recursive copies, if the source is a container then we should
copy to a container as the target. For example, when running the command:
gsutil cp -r dir1 gs://bucket/dir2
we should copy the subtree of dir1 to gs://bucket/dir2.
Note that we don't disallow naming a bucket "sub-directory" where there's
already an object at that URL. For example it's legitimate (albeit
confusing) to have an object called gs://bucket/dir and
then run the command
gsutil cp file1 file2 gs://bucket/dir
Doing so will end up with objects gs://bucket/dir, gs://bucket/dir/file1,
and gs://bucket/dir/file2.
Args:
have_multiple_srcs: Bool indicator of whether this is a multi-source
operation.
dst_url: StorageUrl to check.
have_existing_dest_subdir: bool indicator whether dest is an existing
subdirectory.
src_url_names_container: bool indicator of whether the source URL
is a container.
recursion_requested: True if a recursive operation has been requested.
Returns:
bool indicator.
"""
if have_existing_dest_subdir:
return True
if dst_url.IsCloudUrl():
return (have_multiple_srcs or
(src_url_names_container and recursion_requested))
def _ShouldTreatDstUrlAsSingleton(have_multiple_srcs,
have_existing_dest_subdir, dst_url,
recursion_requested):
"""Checks that dst_url names a single file/object after wildcard expansion.
It is possible that an object path might name a bucket sub-directory.
Args:
have_multiple_srcs: Bool indicator of whether this is a multi-source
operation.
have_existing_dest_subdir: bool indicator whether dest is an existing
subdirectory.
dst_url: StorageUrl to check.
recursion_requested: True if a recursive operation has been requested.
Returns:
bool indicator.
"""
if recursion_requested:
return False
if dst_url.IsFileUrl():
return not dst_url.IsDirectory()
else: # dst_url.IsCloudUrl()
return (not have_multiple_srcs and
not have_existing_dest_subdir and
dst_url.IsObject())
def ConstructDstUrl(src_url, exp_src_url, src_url_names_container,
have_multiple_srcs, exp_dst_url, have_existing_dest_subdir,
recursion_requested):
"""Constructs the destination URL for a given exp_src_url/exp_dst_url pair.
Uses context-dependent naming rules that mimic Linux cp and mv behavior.
Args:
src_url: Source StorageUrl to be copied.
exp_src_url: Single StorageUrl from wildcard expansion of src_url.
src_url_names_container: True if src_url names a container (including the
case of a wildcard-named bucket subdir (like gs://bucket/abc,
where gs://bucket/abc/* matched some objects).
have_multiple_srcs: True if this is a multi-source request. This can be
true if src_url wildcard-expanded to multiple URLs or if there were
multiple source URLs in the request.
exp_dst_url: the expanded StorageUrl requested for the cp destination.
Final written path is constructed from this plus a context-dependent
variant of src_url.
have_existing_dest_subdir: bool indicator whether dest is an existing
subdirectory.
recursion_requested: True if a recursive operation has been requested.
Returns:
StorageUrl to use for copy.
Raises:
CommandException if destination object name not specified for
source and source is a stream.
"""
if _ShouldTreatDstUrlAsSingleton(
have_multiple_srcs, have_existing_dest_subdir, exp_dst_url,
recursion_requested):
# We're copying one file or object to one file or object.
return exp_dst_url
if exp_src_url.IsFileUrl() and exp_src_url.IsStream():
if have_existing_dest_subdir:
raise CommandException('Destination object name needed when '
'source is a stream')
return exp_dst_url
if not recursion_requested and not have_multiple_srcs:
# We're copying one file or object to a subdirectory. Append final comp
# of exp_src_url to exp_dst_url.
src_final_comp = exp_src_url.object_name.rpartition(src_url.delim)[-1]
return StorageUrlFromString('%s%s%s' % (
exp_dst_url.url_string.rstrip(exp_dst_url.delim),
exp_dst_url.delim, src_final_comp))
# Else we're copying multiple sources to a directory, bucket, or a bucket
# "sub-directory".
# Ensure exp_dst_url ends in delim char if we're doing a multi-src copy or
# a copy to a directory. (The check for copying to a directory needs
# special-case handling so that the command:
# gsutil cp gs://bucket/obj dir
# will turn into file://dir/ instead of file://dir -- the latter would cause
# the file "dirobj" to be created.)
# Note: need to check have_multiple_srcs or src_url.names_container()
# because src_url could be a bucket containing a single object, named
# as gs://bucket.
if ((have_multiple_srcs or src_url_names_container or
(exp_dst_url.IsFileUrl() and exp_dst_url.IsDirectory()))
and not exp_dst_url.url_string.endswith(exp_dst_url.delim)):
exp_dst_url = StorageUrlFromString('%s%s' % (exp_dst_url.url_string,
exp_dst_url.delim))
# Making naming behavior match how things work with local Linux cp and mv
# operations depends on many factors, including whether the destination is a
# container, the plurality of the source(s), and whether the mv command is
# being used:
# 1. For the "mv" command that specifies a non-existent destination subdir,
# renaming should occur at the level of the src subdir, vs appending that
# subdir beneath the dst subdir like is done for copying. For example:
# gsutil rm -r gs://bucket
# gsutil cp -r dir1 gs://bucket
# gsutil cp -r dir2 gs://bucket/subdir1
# gsutil mv gs://bucket/subdir1 gs://bucket/subdir2
# would (if using cp naming behavior) end up with paths like:
# gs://bucket/subdir2/subdir1/dir2/.svn/all-wcprops
# whereas mv naming behavior should result in:
# gs://bucket/subdir2/dir2/.svn/all-wcprops
# 2. Copying from directories, buckets, or bucket subdirs should result in
# objects/files mirroring the source directory hierarchy. For example:
# gsutil cp dir1/dir2 gs://bucket
# should create the object gs://bucket/dir2/file2, assuming dir1/dir2
# contains file2).
# To be consistent with Linux cp behavior, there's one more wrinkle when
# working with subdirs: The resulting object names depend on whether the
# destination subdirectory exists. For example, if gs://bucket/subdir
# exists, the command:
# gsutil cp -r dir1/dir2 gs://bucket/subdir
# should create objects named like gs://bucket/subdir/dir2/a/b/c. In
# contrast, if gs://bucket/subdir does not exist, this same command
# should create objects named like gs://bucket/subdir/a/b/c.
# 3. Copying individual files or objects to dirs, buckets or bucket subdirs
# should result in objects/files named by the final source file name
# component. Example:
# gsutil cp dir1/*.txt gs://bucket
# should create the objects gs://bucket/f1.txt and gs://bucket/f2.txt,
# assuming dir1 contains f1.txt and f2.txt.
recursive_move_to_new_subdir = False
if (global_copy_helper_opts.perform_mv and recursion_requested
and src_url_names_container and not have_existing_dest_subdir):
# Case 1. Handle naming rules for bucket subdir mv. Here we want to
# line up the src_url against its expansion, to find the base to build
# the new name. For example, running the command:
# gsutil mv gs://bucket/abcd gs://bucket/xyz
# when processing exp_src_url=gs://bucket/abcd/123
# exp_src_url_tail should become /123
# Note: mv.py code disallows wildcard specification of source URL.
recursive_move_to_new_subdir = True
exp_src_url_tail = (
exp_src_url.url_string[len(src_url.url_string):])
dst_key_name = '%s/%s' % (exp_dst_url.object_name.rstrip('/'),
exp_src_url_tail.strip('/'))
elif src_url_names_container and (exp_dst_url.IsCloudUrl() or
exp_dst_url.IsDirectory()):
# Case 2. Container copy to a destination other than a file.
# Build dst_key_name from subpath of exp_src_url past
# where src_url ends. For example, for src_url=gs://bucket/ and
# exp_src_url=gs://bucket/src_subdir/obj, dst_key_name should be
# src_subdir/obj.
src_url_path_sans_final_dir = GetPathBeforeFinalDir(src_url)
dst_key_name = exp_src_url.versionless_url_string[
len(src_url_path_sans_final_dir):].lstrip(src_url.delim)
# Handle case where dst_url is a non-existent subdir.
if not have_existing_dest_subdir:
dst_key_name = dst_key_name.partition(src_url.delim)[-1]
# Handle special case where src_url was a directory named with '.' or
# './', so that running a command like:
# gsutil cp -r . gs://dest
# will produce obj names of the form gs://dest/abc instead of
# gs://dest/./abc.
if dst_key_name.startswith('.%s' % os.sep):
dst_key_name = dst_key_name[2:]
else:
# Case 3.
dst_key_name = exp_src_url.object_name.rpartition(src_url.delim)[-1]
if (not recursive_move_to_new_subdir and (
exp_dst_url.IsFileUrl() or _ShouldTreatDstUrlAsBucketSubDir(
have_multiple_srcs, exp_dst_url, have_existing_dest_subdir,
src_url_names_container, recursion_requested))):
if exp_dst_url.object_name and exp_dst_url.object_name.endswith(
exp_dst_url.delim):
dst_key_name = '%s%s%s' % (
exp_dst_url.object_name.rstrip(exp_dst_url.delim),
exp_dst_url.delim, dst_key_name)
else:
delim = exp_dst_url.delim if exp_dst_url.object_name else ''
dst_key_name = '%s%s%s' % (exp_dst_url.object_name or '',
delim, dst_key_name)
new_exp_dst_url = exp_dst_url.Clone()
new_exp_dst_url.object_name = dst_key_name.replace(src_url.delim,
exp_dst_url.delim)
return new_exp_dst_url
def _CreateDigestsFromDigesters(digesters):
digests = {}
if digesters:
for alg in digesters:
digests[alg] = base64.encodestring(
digesters[alg].digest()).rstrip('\n')
return digests
def _CreateDigestsFromLocalFile(logger, algs, file_name, src_obj_metadata):
"""Creates a base64 CRC32C and/or MD5 digest from file_name.
Args:
logger: for outputting log messages.
algs: list of algorithms to compute.
file_name: file to digest.
src_obj_metadata: metadta of source object.
Returns:
Dict of algorithm name : base 64 encoded digest
"""
hash_dict = {}
if 'md5' in algs:
if src_obj_metadata.size and src_obj_metadata.size > TEN_MIB:
logger.info(
'Computing MD5 for %s...', file_name)
hash_dict['md5'] = md5()
if 'crc32c' in algs:
hash_dict['crc32c'] = crcmod.predefined.Crc('crc-32c')
with open(file_name, 'rb') as fp:
CalculateHashesFromContents(
fp, hash_dict, ProgressCallbackWithBackoff(
src_obj_metadata.size,
FileProgressCallbackHandler(
ConstructAnnounceText('Hashing', file_name), logger).call))
digests = {}
for alg_name, digest in hash_dict.iteritems():
digests[alg_name] = Base64EncodeHash(digest.hexdigest())
return digests
def _CheckCloudHashes(logger, src_url, dst_url, src_obj_metadata,
dst_obj_metadata):
"""Validates integrity of two cloud objects copied via daisy-chain.
Args:
logger: for outputting log messages.
src_url: CloudUrl for source cloud object.
dst_url: CloudUrl for destination cloud object.
src_obj_metadata: Cloud Object metadata for object being downloaded from.
dst_obj_metadata: Cloud Object metadata for object being uploaded to.
Raises:
CommandException: if cloud digests don't match local digests.
"""
checked_one = False
download_hashes = {}
upload_hashes = {}
if src_obj_metadata.md5Hash:
download_hashes['md5'] = src_obj_metadata.md5Hash
if src_obj_metadata.crc32c:
download_hashes['crc32c'] = src_obj_metadata.crc32c
if dst_obj_metadata.md5Hash:
upload_hashes['md5'] = dst_obj_metadata.md5Hash
if dst_obj_metadata.crc32c:
upload_hashes['crc32c'] = dst_obj_metadata.crc32c
for alg, upload_b64_digest in upload_hashes.iteritems():
if alg not in download_hashes:
continue
download_b64_digest = download_hashes[alg]
logger.debug(
'Comparing source vs destination %s-checksum for %s. (%s/%s)', alg,
dst_url, download_b64_digest, upload_b64_digest)
if download_b64_digest != upload_b64_digest:
raise HashMismatchException(
'%s signature for source object (%s) doesn\'t match '
'destination object digest (%s). Object (%s) will be deleted.' % (
alg, download_b64_digest, upload_b64_digest, dst_url))
checked_one = True
if not checked_one:
# One known way this can currently happen is when downloading objects larger
# than 5 GiB from S3 (for which the etag is not an MD5).
logger.warn(
'WARNING: Found no hashes to validate object downloaded from %s and '
'uploaded to %s. Integrity cannot be assured without hashes.',
src_url, dst_url)
def _CheckHashes(logger, obj_url, obj_metadata, file_name, digests,
is_upload=False):
"""Validates integrity by comparing cloud digest to local digest.
Args:
logger: for outputting log messages.
obj_url: CloudUrl for cloud object.
obj_metadata: Cloud Object being downloaded from or uploaded to.
file_name: Local file name on disk being downloaded to or uploaded from.
digests: Computed Digests for the object.
is_upload: If true, comparing for an uploaded object (controls logging).
Raises:
CommandException: if cloud digests don't match local digests.
"""
local_hashes = digests
cloud_hashes = {}
if obj_metadata.md5Hash:
cloud_hashes['md5'] = obj_metadata.md5Hash.rstrip('\n')
if obj_metadata.crc32c:
cloud_hashes['crc32c'] = obj_metadata.crc32c.rstrip('\n')
checked_one = False
for alg in local_hashes:
if alg not in cloud_hashes:
continue
local_b64_digest = local_hashes[alg]
cloud_b64_digest = cloud_hashes[alg]
logger.debug(
'Comparing local vs cloud %s-checksum for %s. (%s/%s)', alg, file_name,
local_b64_digest, cloud_b64_digest)
if local_b64_digest != cloud_b64_digest:
raise HashMismatchException(
'%s signature computed for local file (%s) doesn\'t match '
'cloud-supplied digest (%s). %s (%s) will be deleted.' % (
alg, local_b64_digest, cloud_b64_digest,
'Cloud object' if is_upload else 'Local file',
obj_url if is_upload else file_name))
checked_one = True
if not checked_one:
if is_upload:
logger.warn(
'WARNING: Found no hashes to validate object uploaded to %s. '
'Integrity cannot be assured without hashes.', obj_url)
else:
# One known way this can currently happen is when downloading objects larger
# than 5 GB from S3 (for which the etag is not an MD5).
logger.warn(
'WARNING: Found no hashes to validate object downloaded to %s. '
'Integrity cannot be assured without hashes.', file_name)
def IsNoClobberServerException(e):
"""Checks to see if the server attempted to clobber a file.
In this case we specified via a precondition that we didn't want the file
clobbered.
Args:
e: The Exception that was generated by a failed copy operation
Returns:
bool indicator - True indicates that the server did attempt to clobber
an existing file.
"""
return ((isinstance(e, PreconditionException)) or
(isinstance(e, ResumableUploadException) and '412' in e.message))
def CheckForDirFileConflict(exp_src_url, dst_url):
"""Checks whether copying exp_src_url into dst_url is not possible.
This happens if a directory exists in local file system where a file
needs to go or vice versa. In that case we print an error message and
exits. Example: if the file "./x" exists and you try to do:
gsutil cp gs://mybucket/x/y .
the request can't succeed because it requires a directory where
the file x exists.
Note that we don't enforce any corresponding restrictions for buckets,
because the flat namespace semantics for buckets doesn't prohibit such
cases the way hierarchical file systems do. For example, if a bucket
contains an object called gs://bucket/dir and then you run the command:
gsutil cp file1 file2 gs://bucket/dir
you'll end up with objects gs://bucket/dir, gs://bucket/dir/file1, and
gs://bucket/dir/file2.
Args:
exp_src_url: Expanded source StorageUrl.
dst_url: Destination StorageUrl.
Raises:
CommandException: if errors encountered.
"""
if dst_url.IsCloudUrl():
# The problem can only happen for file destination URLs.
return
dst_path = dst_url.object_name
final_dir = os.path.dirname(dst_path)
if os.path.isfile(final_dir):
raise CommandException('Cannot retrieve %s because a file exists '
'where a directory needs to be created (%s).' %
(exp_src_url.url_string, final_dir))
if os.path.isdir(dst_path):
raise CommandException('Cannot retrieve %s because a directory exists '
'(%s) where the file needs to be created.' %
(exp_src_url.url_string, dst_path))
def _PartitionFile(fp, file_size, src_url, content_type, canned_acl,
dst_bucket_url, random_prefix, tracker_file,
tracker_file_lock):
"""Partitions a file into FilePart objects to be uploaded and later composed.
These objects, when composed, will match the original file. This entails
splitting the file into parts, naming and forming a destination URL for each
part, and also providing the PerformParallelUploadFileToObjectArgs
corresponding to each part.
Args:
fp: The file object to be partitioned.
file_size: The size of fp, in bytes.
src_url: Source FileUrl from the original command.
content_type: content type for the component and final objects.
canned_acl: The user-provided canned_acl, if applicable.
dst_bucket_url: CloudUrl for the destination bucket
random_prefix: The randomly-generated prefix used to prevent collisions
among the temporary component names.
tracker_file: The path to the parallel composite upload tracker file.
tracker_file_lock: The lock protecting access to the tracker file.
Returns:
dst_args: The destination URIs for the temporary component objects.
"""
parallel_composite_upload_component_size = HumanReadableToBytes(
config.get('GSUtil', 'parallel_composite_upload_component_size',
DEFAULT_PARALLEL_COMPOSITE_UPLOAD_COMPONENT_SIZE))
(num_components, component_size) = _GetPartitionInfo(
file_size, MAX_COMPOSE_ARITY, parallel_composite_upload_component_size)
dst_args = {} # Arguments to create commands and pass to subprocesses.
file_names = [] # Used for the 2-step process of forming dst_args.
for i in range(num_components):
# "Salt" the object name with something a user is very unlikely to have
# used in an object name, then hash the extended name to make sure
# we don't run into problems with name length. Using a deterministic
# naming scheme for the temporary components allows users to take
# advantage of resumable uploads for each component.
encoded_name = (PARALLEL_UPLOAD_STATIC_SALT + fp.name).encode(UTF8)
content_md5 = md5()
content_md5.update(encoded_name)
digest = content_md5.hexdigest()
temp_file_name = (random_prefix + PARALLEL_UPLOAD_TEMP_NAMESPACE +
digest + '_' + str(i))
tmp_dst_url = dst_bucket_url.Clone()
tmp_dst_url.object_name = temp_file_name
if i < (num_components - 1):
# Every component except possibly the last is the same size.
file_part_length = component_size
else:
# The last component just gets all of the remaining bytes.
file_part_length = (file_size - ((num_components -1) * component_size))
offset = i * component_size
func_args = PerformParallelUploadFileToObjectArgs(
fp.name, offset, file_part_length, src_url, tmp_dst_url, canned_acl,
content_type, tracker_file, tracker_file_lock)
file_names.append(temp_file_name)
dst_args[temp_file_name] = func_args
return dst_args
def _DoParallelCompositeUpload(fp, src_url, dst_url, dst_obj_metadata,
canned_acl, file_size, preconditions, gsutil_api,
command_obj, copy_exception_handler):
"""Uploads a local file to a cloud object using parallel composite upload.
The file is partitioned into parts, and then the parts are uploaded in
parallel, composed to form the original destination object, and deleted.
Args:
fp: The file object to be uploaded.
src_url: FileUrl representing the local file.
dst_url: CloudUrl representing the destination file.
dst_obj_metadata: apitools Object describing the destination object.
canned_acl: The canned acl to apply to the object, if any.
file_size: The size of the source file in bytes.
preconditions: Cloud API Preconditions for the final object.
gsutil_api: gsutil Cloud API instance to use.
command_obj: Command object (for calling Apply).
copy_exception_handler: Copy exception handler (for use in Apply).
Returns:
Elapsed upload time, uploaded Object with generation, crc32c, and size
fields populated.
"""
start_time = time.time()
dst_bucket_url = StorageUrlFromString(dst_url.bucket_url_string)
api_selector = gsutil_api.GetApiSelector(provider=dst_url.scheme)
# Determine which components, if any, have already been successfully
# uploaded.
tracker_file = GetTrackerFilePath(dst_url, TrackerFileType.PARALLEL_UPLOAD,
api_selector, src_url)
tracker_file_lock = CreateLock()
(random_prefix, existing_components) = (
_ParseParallelUploadTrackerFile(tracker_file, tracker_file_lock))
# Create the initial tracker file for the upload.
_CreateParallelUploadTrackerFile(tracker_file, random_prefix,
existing_components, tracker_file_lock)
# Get the set of all components that should be uploaded.
dst_args = _PartitionFile(
fp, file_size, src_url, dst_obj_metadata.contentType, canned_acl,
dst_bucket_url, random_prefix, tracker_file, tracker_file_lock)
(components_to_upload, existing_components, existing_objects_to_delete) = (
FilterExistingComponents(dst_args, existing_components, dst_bucket_url,
gsutil_api))
# In parallel, copy all of the file parts that haven't already been
# uploaded to temporary objects.
cp_results = command_obj.Apply(
_PerformParallelUploadFileToObject, components_to_upload,
copy_exception_handler, ('op_failure_count', 'total_bytes_transferred'),
arg_checker=gslib.command.DummyArgChecker,
parallel_operations_override=True, should_return_results=True)
uploaded_components = []
for cp_result in cp_results:
uploaded_components.append(cp_result[2])
components = uploaded_components + existing_components
if len(components) == len(dst_args):
# Only try to compose if all of the components were uploaded successfully.
def _GetComponentNumber(component):
return int(component.object_name[component.object_name.rfind('_')+1:])
# Sort the components so that they will be composed in the correct order.
components = sorted(components, key=_GetComponentNumber)
request_components = []
for component_url in components:
src_obj_metadata = (
apitools_messages.ComposeRequest.SourceObjectsValueListEntry(
name=component_url.object_name))
if component_url.HasGeneration():
src_obj_metadata.generation = long(component_url.generation)
request_components.append(src_obj_metadata)
composed_object = gsutil_api.ComposeObject(
request_components, dst_obj_metadata, preconditions=preconditions,
provider=dst_url.scheme, fields=['generation', 'crc32c', 'size'])
try:
# Make sure only to delete things that we know were successfully
# uploaded (as opposed to all of the objects that we attempted to
# create) so that we don't delete any preexisting objects, except for
# those that were uploaded by a previous, failed run and have since
# changed (but still have an old generation lying around).
objects_to_delete = components + existing_objects_to_delete
command_obj.Apply(_DeleteObjectFn, objects_to_delete, _RmExceptionHandler,
arg_checker=gslib.command.DummyArgChecker,
parallel_operations_override=True)
except Exception: # pylint: disable=broad-except
# If some of the delete calls fail, don't cause the whole command to
# fail. The copy was successful iff the compose call succeeded, so
# reduce this to a warning.
logging.warning(
'Failed to delete some of the following temporary objects:\n' +
'\n'.join(dst_args.keys()))
finally:
with tracker_file_lock:
if os.path.exists(tracker_file):
os.unlink(tracker_file)
else:
# Some of the components failed to upload. In this case, we want to exit
# without deleting the objects.
raise CommandException(
'Some temporary components were not uploaded successfully. '
'Please retry this upload.')
elapsed_time = time.time() - start_time
return elapsed_time, composed_object
def _ShouldDoParallelCompositeUpload(logger, allow_splitting, src_url, dst_url,
file_size, canned_acl=None):
"""Determines whether parallel composite upload strategy should be used.
Args:
logger: for outputting log messages.
allow_splitting: If false, then this function returns false.
src_url: FileUrl corresponding to a local file.
dst_url: CloudUrl corresponding to destination cloud object.
file_size: The size of the source file, in bytes.
canned_acl: Canned ACL to apply to destination object, if any.
Returns:
True iff a parallel upload should be performed on the source file.
"""
global suggested_parallel_composites
parallel_composite_upload_threshold = HumanReadableToBytes(config.get(
'GSUtil', 'parallel_composite_upload_threshold',
DEFAULT_PARALLEL_COMPOSITE_UPLOAD_THRESHOLD))
all_factors_but_size = (
allow_splitting # Don't split the pieces multiple times.
and not src_url.IsStream() # We can't partition streams.
and dst_url.scheme == 'gs' # Compose is only for gs.
and not canned_acl) # TODO: Implement canned ACL support for compose.
# Since parallel composite uploads are disabled by default, make user aware of
# them.
# TODO: Once compiled crcmod is being distributed by major Linux distributions
# remove this check.
if (all_factors_but_size and parallel_composite_upload_threshold == 0
and file_size >= PARALLEL_COMPOSITE_SUGGESTION_THRESHOLD
and not suggested_parallel_composites):
logger.info('\n'.join(textwrap.wrap(
'==> NOTE: You are uploading one or more large file(s), which would '
'run significantly faster if you enable parallel composite uploads. '
'This feature can be enabled by editing the '
'"parallel_composite_upload_threshold" value in your .boto '
'configuration file. However, note that if you do this you and any '
'users that download such composite files will need to have a compiled '
'crcmod installed (see "gsutil help crcmod").')) + '\n')
suggested_parallel_composites = True
return (all_factors_but_size
and parallel_composite_upload_threshold > 0
and file_size >= parallel_composite_upload_threshold)
def ExpandUrlToSingleBlr(url_str, gsutil_api, debug, project_id,
treat_nonexistent_object_as_subdir=False):
"""Expands wildcard if present in url_str.
Args:
url_str: String representation of requested url.
gsutil_api: gsutil Cloud API instance to use.
debug: debug level to use (for iterators).
project_id: project ID to use (for iterators).
treat_nonexistent_object_as_subdir: indicates if should treat a non-existent
object as a subdir.
Returns:
(exp_url, have_existing_dst_container)
where exp_url is a StorageUrl
and have_existing_dst_container is a bool indicating whether
exp_url names an existing directory, bucket, or bucket subdirectory.
In the case where we match a subdirectory AND an object, the
object is returned.
Raises:
CommandException: if url_str matched more than 1 URL.
"""
# Handle wildcarded url case.
if ContainsWildcard(url_str):
blr_expansion = list(CreateWildcardIterator(url_str, gsutil_api,
debug=debug,
project_id=project_id))
if len(blr_expansion) != 1:
raise CommandException('Destination (%s) must match exactly 1 URL' %
url_str)
blr = blr_expansion[0]
# BLR is either an OBJECT, PREFIX, or BUCKET; the latter two represent
# directories.
return (StorageUrlFromString(blr.url_string), not blr.IsObject())
storage_url = StorageUrlFromString(url_str)
# Handle non-wildcarded URL.
if storage_url.IsFileUrl():
return (storage_url, storage_url.IsDirectory())
# At this point we have a cloud URL.
if storage_url.IsBucket():
return (storage_url, True)
# For object/prefix URLs check 3 cases: (a) if the name ends with '/' treat
# as a subdir; otherwise, use the wildcard iterator with url to
# find if (b) there's a Prefix matching url, or (c) name is of form
# dir_$folder$ (and in both these cases also treat dir as a subdir).
# Cloud subdirs are always considered to be an existing container.
if IsCloudSubdirPlaceholder(storage_url):
return (storage_url, True)
# Check for the special case where we have a folder marker object.
folder_expansion = CreateWildcardIterator(
storage_url.versionless_url_string + '_$folder$', gsutil_api,
debug=debug, project_id=project_id).IterAll(
bucket_listing_fields=['name'])
for blr in folder_expansion:
return (storage_url, True)
blr_expansion = CreateWildcardIterator(url_str, gsutil_api,
debug=debug,
project_id=project_id).IterAll(
bucket_listing_fields=['name'])
expansion_empty = True
for blr in blr_expansion:
expansion_empty = False
if blr.IsPrefix():
return (storage_url, True)
return (storage_url,
expansion_empty and treat_nonexistent_object_as_subdir)
def FixWindowsNaming(src_url, dst_url):
"""Translates Windows pathnames to cloud pathnames.
Rewrites the destination URL built by ConstructDstUrl().
Args:
src_url: Source StorageUrl to be copied.
dst_url: The destination StorageUrl built by ConstructDstUrl().
Returns:
StorageUrl to use for copy.
"""
if (src_url.IsFileUrl() and src_url.delim == '\\'
and dst_url.IsCloudUrl()):
trans_url_str = re.sub(r'\\', '/', dst_url.url_string)
dst_url = StorageUrlFromString(trans_url_str)
return dst_url
def SrcDstSame(src_url, dst_url):
"""Checks if src_url and dst_url represent the same object or file.
We don't handle anything about hard or symbolic links.
Args:
src_url: Source StorageUrl.
dst_url: Destination StorageUrl.
Returns:
Bool indicator.
"""
if src_url.IsFileUrl() and dst_url.IsFileUrl():
# Translate a/b/./c to a/b/c, so src=dst comparison below works.
new_src_path = os.path.normpath(src_url.object_name)
new_dst_path = os.path.normpath(dst_url.object_name)
return new_src_path == new_dst_path
else:
return (src_url.url_string == dst_url.url_string and
src_url.generation == dst_url.generation)
def _LogCopyOperation(logger, src_url, dst_url, dst_obj_metadata):
"""Logs copy operation, including Content-Type if appropriate.
Args:
logger: logger instance to use for output.
src_url: Source StorageUrl.
dst_url: Destination StorageUrl.
dst_obj_metadata: Object-specific metadata that should be overidden during
the copy.
"""
if (dst_url.IsCloudUrl() and dst_obj_metadata and
dst_obj_metadata.contentType):
content_type_msg = ' [Content-Type=%s]' % dst_obj_metadata.contentType
else:
content_type_msg = ''
if src_url.IsFileUrl() and src_url.IsStream():
logger.info('Copying from <STDIN>%s...', content_type_msg)
else:
logger.info('Copying %s%s...', src_url.url_string, content_type_msg)
# pylint: disable=undefined-variable
def _CopyObjToObjInTheCloud(src_url, src_obj_metadata, dst_url,
dst_obj_metadata, preconditions, gsutil_api,
logger):
"""Performs copy-in-the cloud from specified src to dest object.
Args:
src_url: Source CloudUrl.
src_obj_metadata: Metadata for source object; must include etag and size.
dst_url: Destination CloudUrl.
dst_obj_metadata: Object-specific metadata that should be overidden during
the copy.
preconditions: Preconditions to use for the copy.
gsutil_api: gsutil Cloud API instance to use for the copy.
logger: logging.Logger for log message output.
Returns:
(elapsed_time, bytes_transferred, dst_url with generation,
md5 hash of destination) excluding overhead like initial GET.
Raises:
CommandException: if errors encountered.
"""
start_time = time.time()
progress_callback = FileProgressCallbackHandler(
ConstructAnnounceText('Copying', dst_url.url_string), logger).call
if global_copy_helper_opts.test_callback_file:
with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
progress_callback = pickle.loads(test_fp.read()).call
dst_obj = gsutil_api.CopyObject(
src_obj_metadata, dst_obj_metadata, src_generation=src_url.generation,
canned_acl=global_copy_helper_opts.canned_acl,
preconditions=preconditions, progress_callback=progress_callback,
provider=dst_url.scheme, fields=UPLOAD_RETURN_FIELDS)
end_time = time.time()
result_url = dst_url.Clone()
result_url.generation = GenerationFromUrlAndString(result_url,
dst_obj.generation)
return (end_time - start_time, src_obj_metadata.size, result_url,
dst_obj.md5Hash)
def _CheckFreeSpace(path):
"""Return path/drive free space (in bytes)."""
if IS_WINDOWS:
# pylint: disable=g-import-not-at-top
try:
# pylint: disable=invalid-name
get_disk_free_space_ex = WINFUNCTYPE(c_int, c_wchar_p,
POINTER(c_uint64),
POINTER(c_uint64),
POINTER(c_uint64))
get_disk_free_space_ex = get_disk_free_space_ex(
('GetDiskFreeSpaceExW', windll.kernel32), (
(1, 'lpszPathName'),
(2, 'lpFreeUserSpace'),
(2, 'lpTotalSpace'),
(2, 'lpFreeSpace'),))
except AttributeError:
get_disk_free_space_ex = WINFUNCTYPE(c_int, c_char_p,
POINTER(c_uint64),
POINTER(c_uint64),
POINTER(c_uint64))
get_disk_free_space_ex = get_disk_free_space_ex(
('GetDiskFreeSpaceExA', windll.kernel32), (
(1, 'lpszPathName'),
(2, 'lpFreeUserSpace'),
(2, 'lpTotalSpace'),
(2, 'lpFreeSpace'),))
def GetDiskFreeSpaceExErrCheck(result, unused_func, args):
if not result:
raise WinError()
return args[1].value
get_disk_free_space_ex.errcheck = GetDiskFreeSpaceExErrCheck
return get_disk_free_space_ex(os.getenv('SystemDrive'))
else:
(_, f_frsize, _, _, f_bavail, _, _, _, _, _) = os.statvfs(path)
return f_frsize * f_bavail
def _SetContentTypeFromFile(src_url, dst_obj_metadata):
"""Detects and sets Content-Type if src_url names a local file.
Args:
src_url: Source StorageUrl.
dst_obj_metadata: Object-specific metadata that should be overidden during
the copy.
"""
# contentType == '' if user requested default type.
if (dst_obj_metadata.contentType is None and src_url.IsFileUrl()
and not src_url.IsStream()):
# Only do content type recognition if src_url is a file. Object-to-object
# copies with no -h Content-Type specified re-use the content type of the
# source object.
object_name = src_url.object_name
content_type = None
# Streams (denoted by '-') are expected to be 'application/octet-stream'
# and 'file' would partially consume them.
if object_name != '-':
if config.getbool('GSUtil', 'use_magicfile', False):
p = subprocess.Popen(['file', '--mime-type', object_name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
p.stdout.close()
p.stderr.close()
if p.returncode != 0 or error:
raise CommandException(
'Encountered error running "file --mime-type %s" '
'(returncode=%d).\n%s' % (object_name, p.returncode, error))
# Parse output by removing line delimiter and splitting on last ":
content_type = output.rstrip().rpartition(': ')[2]
else:
content_type = mimetypes.guess_type(object_name)[0]
if not content_type:
content_type = DEFAULT_CONTENT_TYPE
dst_obj_metadata.contentType = content_type
# pylint: disable=undefined-variable
def _UploadFileToObjectNonResumable(src_url, src_obj_filestream,
src_obj_size, dst_url, dst_obj_metadata,
preconditions, gsutil_api, logger):
"""Uploads the file using a non-resumable strategy.
Args:
src_url: Source StorageUrl to upload.
src_obj_filestream: File pointer to uploadable bytes.
src_obj_size: Size of the source object.
dst_url: Destination StorageUrl for the upload.
dst_obj_metadata: Metadata for the target object.
preconditions: Preconditions for the upload, if any.
gsutil_api: gsutil Cloud API instance to use for the upload.
logger: For outputting log messages.
Returns:
Elapsed upload time, uploaded Object with generation, md5, and size fields
populated.
"""
progress_callback = FileProgressCallbackHandler(
ConstructAnnounceText('Uploading', dst_url.url_string), logger).call
if global_copy_helper_opts.test_callback_file:
with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
progress_callback = pickle.loads(test_fp.read()).call
start_time = time.time()
if src_url.IsStream():
# TODO: gsutil-beta: Provide progress callbacks for streaming uploads.
uploaded_object = gsutil_api.UploadObjectStreaming(
src_obj_filestream, object_metadata=dst_obj_metadata,
canned_acl=global_copy_helper_opts.canned_acl,
preconditions=preconditions, progress_callback=progress_callback,
provider=dst_url.scheme, fields=UPLOAD_RETURN_FIELDS)
else:
uploaded_object = gsutil_api.UploadObject(
src_obj_filestream, object_metadata=dst_obj_metadata,
canned_acl=global_copy_helper_opts.canned_acl, size=src_obj_size,
preconditions=preconditions, progress_callback=progress_callback,
provider=dst_url.scheme, fields=UPLOAD_RETURN_FIELDS)
end_time = time.time()
elapsed_time = end_time - start_time
return elapsed_time, uploaded_object
# pylint: disable=undefined-variable
def _UploadFileToObjectResumable(src_url, src_obj_filestream,
src_obj_size, dst_url, dst_obj_metadata,
preconditions, gsutil_api, logger):
"""Uploads the file using a resumable strategy.
Args:
src_url: Source FileUrl to upload. Must not be a stream.
src_obj_filestream: File pointer to uploadable bytes.
src_obj_size: Size of the source object.
dst_url: Destination StorageUrl for the upload.
dst_obj_metadata: Metadata for the target object.
preconditions: Preconditions for the upload, if any.
gsutil_api: gsutil Cloud API instance to use for the upload.
logger: for outputting log messages.
Returns:
Elapsed upload time, uploaded Object with generation, md5, and size fields
populated.
"""
tracker_file_name = GetTrackerFilePath(
dst_url, TrackerFileType.UPLOAD,
gsutil_api.GetApiSelector(provider=dst_url.scheme))
def _UploadTrackerCallback(serialization_data):
"""Creates a new tracker file for starting an upload from scratch.
This function is called by the gsutil Cloud API implementation and the
the serialization data is implementation-specific.
Args:
serialization_data: Serialization data used in resuming the upload.
"""
tracker_file = None
try:
tracker_file = open(tracker_file_name, 'w')
tracker_file.write(str(serialization_data))
except IOError as e:
RaiseUnwritableTrackerFileException(tracker_file_name, e.strerror)
finally:
if tracker_file:
tracker_file.close()
# This contains the upload URL, which will uniquely identify the
# destination object.
tracker_data = _GetUploadTrackerData(tracker_file_name, logger)
if tracker_data:
logger.info(
'Resuming upload for %s', src_url.url_string)
retryable = True
progress_callback = FileProgressCallbackHandler(
ConstructAnnounceText('Uploading', dst_url.url_string), logger).call
if global_copy_helper_opts.test_callback_file:
with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
progress_callback = pickle.loads(test_fp.read()).call
start_time = time.time()
num_startover_attempts = 0
# This loop causes us to retry when the resumable upload failed in a way that
# requires starting over with a new upload ID. Retries within a single upload
# ID within the current process are handled in
# gsutil_api.UploadObjectResumable, and retries within a single upload ID
# spanning processes happens if an exception occurs not caught below (which
# will leave the tracker file in place, and cause the upload ID to be reused
# the next time the user runs gsutil and attempts the same upload).
while retryable:
try:
uploaded_object = gsutil_api.UploadObjectResumable(
src_obj_filestream, object_metadata=dst_obj_metadata,
canned_acl=global_copy_helper_opts.canned_acl,
preconditions=preconditions, provider=dst_url.scheme,
size=src_obj_size, serialization_data=tracker_data,
fields=UPLOAD_RETURN_FIELDS,
tracker_callback=_UploadTrackerCallback,
progress_callback=progress_callback)
retryable = False
except ResumableUploadStartOverException, e:
# This can happen, for example, if the server sends a 410 response code.
# In that case the current resumable upload ID can't be reused, so delete
# the tracker file and try again up to max retries.
num_startover_attempts += 1
retryable = (num_startover_attempts < GetNumRetries())
if not retryable:
raise
# If the server sends a 404 response code, then the upload should only
# be restarted if it was the object (and not the bucket) that was missing.
try:
gsutil_api.GetBucket(dst_obj_metadata.bucket, provider=dst_url.scheme)
except NotFoundException:
raise
logger.info('Restarting upload from scratch after exception %s', e)
DeleteTrackerFile(tracker_file_name)
tracker_data = None
src_obj_filestream.seek(0)
# Reset the progress callback handler.
progress_callback = FileProgressCallbackHandler(
ConstructAnnounceText('Uploading', dst_url.url_string), logger).call
logger.info('\n'.join(textwrap.wrap(
'Resumable upload of %s failed with a response code indicating we '
'need to start over with a new resumable upload ID. Backing off '
'and retrying.' % src_url.url_string)))
time.sleep(min(random.random() * (2 ** num_startover_attempts),
GetMaxRetryDelay()))
except ResumableUploadAbortException:
retryable = False
raise
finally:
if not retryable:
DeleteTrackerFile(tracker_file_name)
end_time = time.time()
elapsed_time = end_time - start_time
return (elapsed_time, uploaded_object)
def _CompressFileForUpload(src_url, src_obj_filestream, src_obj_size, logger):
"""Compresses a to-be-uploaded local file to save bandwidth.
Args:
src_url: Source FileUrl.
src_obj_filestream: Read stream of the source file - will be consumed
and closed.
src_obj_size: Size of the source file.
logger: for outputting log messages.
Returns:
StorageUrl path to compressed file, compressed file size.
"""
# TODO: Compress using a streaming model as opposed to all at once here.
if src_obj_size >= MIN_SIZE_COMPUTE_LOGGING:
logger.info(
'Compressing %s (to tmp)...', src_url)
(gzip_fh, gzip_path) = tempfile.mkstemp()
gzip_fp = None
try:
# Check for temp space. Assume the compressed object is at most 2x
# the size of the object (normally should compress to smaller than
# the object)
if _CheckFreeSpace(gzip_path) < 2*int(src_obj_size):
raise CommandException('Inadequate temp space available to compress '
'%s. See the CHANGING TEMP DIRECTORIES section '
'of "gsutil help cp" for more info.' % src_url)
gzip_fp = gzip.open(gzip_path, 'wb')
data = src_obj_filestream.read(GZIP_CHUNK_SIZE)
while data:
gzip_fp.write(data)
data = src_obj_filestream.read(GZIP_CHUNK_SIZE)
finally:
if gzip_fp:
gzip_fp.close()
os.close(gzip_fh)
src_obj_filestream.close()
gzip_size = os.path.getsize(gzip_path)
return StorageUrlFromString(gzip_path), gzip_size
def _UploadFileToObject(src_url, src_obj_filestream, src_obj_size,
dst_url, dst_obj_metadata, preconditions, gsutil_api,
logger, command_obj, copy_exception_handler,
gzip_exts=None, allow_splitting=True):
"""Uploads a local file to an object.
Args:
src_url: Source FileUrl.
src_obj_filestream: Read stream of the source file to be read and closed.
src_obj_size: Size of the source file.
dst_url: Destination CloudUrl.
dst_obj_metadata: Metadata to be applied to the destination object.
preconditions: Preconditions to use for the copy.
gsutil_api: gsutil Cloud API to use for the copy.
logger: for outputting log messages.
command_obj: command object for use in Apply in parallel composite uploads.
copy_exception_handler: For handling copy exceptions during Apply.
gzip_exts: List of file extensions to gzip prior to upload, if any.
allow_splitting: Whether to allow the file to be split into component
pieces for an parallel composite upload.
Returns:
(elapsed_time, bytes_transferred, dst_url with generation,
md5 hash of destination) excluding overhead like initial GET.
Raises:
CommandException: if errors encountered.
"""
if not dst_obj_metadata or not dst_obj_metadata.contentLanguage:
content_language = config.get_value('GSUtil', 'content_language')
if content_language:
dst_obj_metadata.contentLanguage = content_language
fname_parts = src_url.object_name.split('.')
upload_url = src_url
upload_stream = src_obj_filestream
upload_size = src_obj_size
zipped_file = False
if gzip_exts and len(fname_parts) > 1 and fname_parts[-1] in gzip_exts:
upload_url, upload_size = _CompressFileForUpload(
src_url, src_obj_filestream, src_obj_size, logger)
upload_stream = open(upload_url.object_name, 'rb')
dst_obj_metadata.contentEncoding = 'gzip'
zipped_file = True
elapsed_time = None
uploaded_object = None
hash_algs = GetUploadHashAlgs()
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
parallel_composite_upload = _ShouldDoParallelCompositeUpload(
logger, allow_splitting, upload_url, dst_url, src_obj_size,
canned_acl=global_copy_helper_opts.canned_acl)
if (src_url.IsStream() and
gsutil_api.GetApiSelector(provider=dst_url.scheme) == ApiSelector.JSON):
orig_stream = upload_stream
# Add limited seekable properties to the stream via buffering.
upload_stream = ResumableStreamingJsonUploadWrapper(
orig_stream, GetJsonResumableChunkSize())
if not parallel_composite_upload and len(hash_algs):
# Parallel composite uploads calculate hashes per-component in subsequent
# calls to this function, but the composition of the final object is a
# cloud-only operation.
wrapped_filestream = HashingFileUploadWrapper(upload_stream, digesters,
hash_algs, upload_url, logger)
else:
wrapped_filestream = upload_stream
try:
if parallel_composite_upload:
elapsed_time, uploaded_object = _DoParallelCompositeUpload(
upload_stream, upload_url, dst_url, dst_obj_metadata,
global_copy_helper_opts.canned_acl, upload_size, preconditions,
gsutil_api, command_obj, copy_exception_handler)
elif upload_size < ResumableThreshold() or src_url.IsStream():
elapsed_time, uploaded_object = _UploadFileToObjectNonResumable(
upload_url, wrapped_filestream, upload_size, dst_url,
dst_obj_metadata, preconditions, gsutil_api, logger)
else:
elapsed_time, uploaded_object = _UploadFileToObjectResumable(
upload_url, wrapped_filestream, upload_size, dst_url,
dst_obj_metadata, preconditions, gsutil_api, logger)
finally:
if zipped_file:
try:
os.unlink(upload_url.object_name)
# Windows sometimes complains the temp file is locked when you try to
# delete it.
except Exception: # pylint: disable=broad-except
logger.warning(
'Could not delete %s. This can occur in Windows because the '
'temporary file is still locked.', upload_url.object_name)
# In the gzip case, this is the gzip stream. _CompressFileForUpload will
# have already closed the original source stream.
upload_stream.close()
if not parallel_composite_upload:
try:
digests = _CreateDigestsFromDigesters(digesters)
_CheckHashes(logger, dst_url, uploaded_object, src_url.object_name,
digests, is_upload=True)
except HashMismatchException:
if _RENAME_ON_HASH_MISMATCH:
corrupted_obj_metadata = apitools_messages.Object(
name=dst_obj_metadata.name,
bucket=dst_obj_metadata.bucket,
etag=uploaded_object.etag)
dst_obj_metadata.name = (dst_url.object_name +
_RENAME_ON_HASH_MISMATCH_SUFFIX)
gsutil_api.CopyObject(corrupted_obj_metadata,
dst_obj_metadata, provider=dst_url.scheme)
# If the digest doesn't match, delete the object.
gsutil_api.DeleteObject(dst_url.bucket_name, dst_url.object_name,
generation=uploaded_object.generation,
provider=dst_url.scheme)
raise
result_url = dst_url.Clone()
result_url.generation = uploaded_object.generation
result_url.generation = GenerationFromUrlAndString(
result_url, uploaded_object.generation)
return (elapsed_time, uploaded_object.size, result_url,
uploaded_object.md5Hash)
# TODO: Refactor this long function into smaller pieces.
# pylint: disable=too-many-statements
def _DownloadObjectToFile(src_url, src_obj_metadata, dst_url,
gsutil_api, logger, test_method=None):
"""Downloads an object to a local file.
Args:
src_url: Source CloudUrl.
src_obj_metadata: Metadata from the source object.
dst_url: Destination FileUrl.
gsutil_api: gsutil Cloud API instance to use for the download.
logger: for outputting log messages.
test_method: Optional test method for modifying the file before validation
during unit tests.
Returns:
(elapsed_time, bytes_transferred, dst_url, md5), excluding overhead like
initial GET.
Raises:
CommandException: if errors encountered.
"""
global open_files_map
file_name = dst_url.object_name
dir_name = os.path.dirname(file_name)
if dir_name and not os.path.exists(dir_name):
# Do dir creation in try block so can ignore case where dir already
# exists. This is needed to avoid a race condition when running gsutil
# -m cp.
try:
os.makedirs(dir_name)
except OSError, e:
if e.errno != errno.EEXIST:
raise
api_selector = gsutil_api.GetApiSelector(provider=src_url.scheme)
# For gzipped objects download to a temp file and unzip. For the XML API,
# the represents the result of a HEAD request. For the JSON API, this is
# the stored encoding which the service may not respect. However, if the
# server sends decompressed bytes for a file that is stored compressed
# (double compressed case), there is no way we can validate the hash and
# we will fail our hash check for the object.
if (src_obj_metadata.contentEncoding and
src_obj_metadata.contentEncoding.lower().endswith('gzip')):
# We can't use tempfile.mkstemp() here because we need a predictable
# filename for resumable downloads.
download_file_name = _GetDownloadZipFileName(file_name)
logger.info(
'Downloading to temp gzip filename %s', download_file_name)
need_to_unzip = True
else:
download_file_name = file_name
need_to_unzip = False
if download_file_name.endswith(dst_url.delim):
logger.warn('\n'.join(textwrap.wrap(
'Skipping attempt to download to filename ending with slash (%s). This '
'typically happens when using gsutil to download from a subdirectory '
'created by the Cloud Console (https://cloud.google.com/console)'
% download_file_name)))
return (0, 0, dst_url, '')
# Set up hash digesters.
hash_algs = GetDownloadHashAlgs(
logger, src_has_md5=src_obj_metadata.md5Hash,
src_has_crc32c=src_obj_metadata.crc32c)
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
fp = None
# Tracks whether the server used a gzip encoding.
server_encoding = None
download_complete = False
download_strategy = _SelectDownloadStrategy(dst_url)
download_start_point = 0
# This is used for resuming downloads, but also for passing the mediaLink
# and size into the download for new downloads so that we can avoid
# making an extra HTTP call.
serialization_data = None
serialization_dict = GetDownloadSerializationDict(src_obj_metadata)
open_files = []
try:
if download_strategy is CloudApi.DownloadStrategy.ONE_SHOT:
fp = open(download_file_name, 'wb')
elif download_strategy is CloudApi.DownloadStrategy.RESUMABLE:
# If this is a resumable download, we need to open the file for append and
# manage a tracker file.
if open_files_map.get(download_file_name, False):
# Ensure another process/thread is not already writing to this file.
raise FileConcurrencySkipError
open_files.append(download_file_name)
open_files_map[download_file_name] = True
fp = open(download_file_name, 'ab')
resuming = ReadOrCreateDownloadTrackerFile(
src_obj_metadata, dst_url, api_selector)
if resuming:
# Find out how far along we are so we can request the appropriate
# remaining range of the object.
existing_file_size = GetFileSize(fp, position_to_eof=True)
if existing_file_size > src_obj_metadata.size:
DeleteTrackerFile(GetTrackerFilePath(
dst_url, TrackerFileType.DOWNLOAD, api_selector))
raise CommandException(
'%s is larger (%d) than %s (%d).\nDeleting tracker file, so '
'if you re-try this download it will start from scratch' %
(download_file_name, existing_file_size, src_url.object_name,
src_obj_metadata.size))
else:
if existing_file_size == src_obj_metadata.size:
logger.info(
'Download already complete for file %s, skipping download but '
'will run integrity checks.', download_file_name)
download_complete = True
else:
download_start_point = existing_file_size
serialization_dict['progress'] = download_start_point
logger.info('Resuming download for %s', src_url.url_string)
# Catch up our digester with the hash data.
if existing_file_size > TEN_MIB:
for alg_name in digesters:
logger.info(
'Catching up %s for %s', alg_name, download_file_name)
with open(download_file_name, 'rb') as hash_fp:
while True:
data = hash_fp.read(DEFAULT_FILE_BUFFER_SIZE)
if not data:
break
for alg_name in digesters:
digesters[alg_name].update(data)
else:
# Starting a new download, blow away whatever is already there.
fp.truncate(0)
fp.seek(0)
else:
raise CommandException('Invalid download strategy %s chosen for'
'file %s' % (download_strategy, fp.name))
if not dst_url.IsStream():
serialization_data = json.dumps(serialization_dict)
progress_callback = FileProgressCallbackHandler(
ConstructAnnounceText('Downloading', dst_url.url_string),
logger).call
if global_copy_helper_opts.test_callback_file:
with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
progress_callback = pickle.loads(test_fp.read()).call
start_time = time.time()
# TODO: With gzip encoding (which may occur on-the-fly and not be part of
# the object's metadata), when we request a range to resume, it's possible
# that the server will just resend the entire object, which means our
# caught-up hash will be incorrect. We recalculate the hash on
# the local file in the case of a failed gzip hash anyway, but it would
# be better if we actively detected this case.
if not download_complete:
server_encoding = gsutil_api.GetObjectMedia(
src_url.bucket_name, src_url.object_name, fp,
start_byte=download_start_point, generation=src_url.generation,
object_size=src_obj_metadata.size,
download_strategy=download_strategy, provider=src_url.scheme,
serialization_data=serialization_data, digesters=digesters,
progress_callback=progress_callback)
end_time = time.time()
# If a custom test method is defined, call it here. For the copy command,
# test methods are expected to take one argument: an open file pointer,
# and are used to perturb the open file during download to exercise
# download error detection.
if test_method:
test_method(fp)
except ResumableDownloadException as e:
logger.warning('Caught ResumableDownloadException (%s) for file %s.',
e.reason, file_name)
raise
finally:
if fp:
fp.close()
for file_name in open_files:
open_files_map.delete(file_name)
# If we decompressed a content-encoding gzip file on the fly, this may not
# be accurate, but it is the best we can do without going deep into the
# underlying HTTP libraries. Note that this value is only used for
# reporting in log messages; inaccuracy doesn't impact the integrity of the
# download.
bytes_transferred = src_obj_metadata.size - download_start_point
server_gzip = server_encoding and server_encoding.lower().endswith('gzip')
local_md5 = _ValidateDownloadHashes(logger, src_url, src_obj_metadata,
dst_url, need_to_unzip, server_gzip,
digesters, hash_algs, api_selector,
bytes_transferred)
return (end_time - start_time, bytes_transferred, dst_url, local_md5)
def _GetDownloadZipFileName(file_name):
"""Returns the file name for a temporarily compressed downloaded file."""
return '%s_.gztmp' % file_name
def _ValidateDownloadHashes(logger, src_url, src_obj_metadata, dst_url,
need_to_unzip, server_gzip, digesters, hash_algs,
api_selector, bytes_transferred):
"""Validates a downloaded file's integrity.
Args:
logger: For outputting log messages.
src_url: StorageUrl for the source object.
src_obj_metadata: Metadata for the source object, potentially containing
hash values.
dst_url: StorageUrl describing the destination file.
need_to_unzip: If true, a temporary zip file was used and must be
uncompressed as part of validation.
server_gzip: If true, the server gzipped the bytes (regardless of whether
the object metadata claimed it was gzipped).
digesters: dict of {string, hash digester} that contains up-to-date digests
computed during the download. If a digester for a particular
algorithm is None, an up-to-date digest is not available and the
hash must be recomputed from the local file.
hash_algs: dict of {string, hash algorithm} that can be used if digesters
don't have up-to-date digests.
api_selector: The Cloud API implementation used (used tracker file naming).
bytes_transferred: Number of bytes downloaded (used for logging).
Returns:
An MD5 of the local file, if one was calculated as part of the integrity
check.
"""
file_name = dst_url.object_name
download_file_name = (_GetDownloadZipFileName(file_name) if need_to_unzip else
file_name)
digesters_succeeded = True
for alg in digesters:
# If we get a digester with a None algorithm, the underlying
# implementation failed to calculate a digest, so we will need to
# calculate one from scratch.
if not digesters[alg]:
digesters_succeeded = False
break
if digesters_succeeded:
local_hashes = _CreateDigestsFromDigesters(digesters)
else:
local_hashes = _CreateDigestsFromLocalFile(
logger, hash_algs, download_file_name, src_obj_metadata)
digest_verified = True
hash_invalid_exception = None
try:
_CheckHashes(logger, src_url, src_obj_metadata, download_file_name,
local_hashes)
DeleteTrackerFile(GetTrackerFilePath(
dst_url, TrackerFileType.DOWNLOAD, api_selector))
except HashMismatchException, e:
# If an non-gzipped object gets sent with gzip content encoding, the hash
# we calculate will match the gzipped bytes, not the original object. Thus,
# we'll need to calculate and check it after unzipping.
if server_gzip:
logger.debug(
'Hash did not match but server gzipped the content, will '
'recalculate.')
digest_verified = False
elif api_selector == ApiSelector.XML:
logger.debug(
'Hash did not match but server may have gzipped the content, will '
'recalculate.')
# Save off the exception in case this isn't a gzipped file.
hash_invalid_exception = e
digest_verified = False
else:
DeleteTrackerFile(GetTrackerFilePath(
dst_url, TrackerFileType.DOWNLOAD, api_selector))
if _RENAME_ON_HASH_MISMATCH:
os.rename(download_file_name,
download_file_name + _RENAME_ON_HASH_MISMATCH_SUFFIX)
else:
os.unlink(download_file_name)
raise
if server_gzip and not need_to_unzip:
# Server compressed bytes on-the-fly, thus we need to rename and decompress.
# We can't decompress on-the-fly because prior to Python 3.2 the gzip
# module makes a bunch of seek calls on the stream.
download_file_name = _GetDownloadZipFileName(file_name)
os.rename(file_name, download_file_name)
if need_to_unzip or server_gzip:
# Log that we're uncompressing if the file is big enough that
# decompressing would make it look like the transfer "stalled" at the end.
if bytes_transferred > TEN_MIB:
logger.info(
'Uncompressing downloaded tmp file to %s...', file_name)
# Downloaded gzipped file to a filename w/o .gz extension, so unzip.
gzip_fp = None
try:
gzip_fp = gzip.open(download_file_name, 'rb')
with open(file_name, 'wb') as f_out:
data = gzip_fp.read(GZIP_CHUNK_SIZE)
while data:
f_out.write(data)
data = gzip_fp.read(GZIP_CHUNK_SIZE)
except IOError, e:
# In the XML case where we don't know if the file was gzipped, raise
# the original hash exception if we find that it wasn't.
if 'Not a gzipped file' in str(e) and hash_invalid_exception:
# Linter improperly thinks we're raising None despite the above check.
# pylint: disable=raising-bad-type
raise hash_invalid_exception
finally:
if gzip_fp:
gzip_fp.close()
os.unlink(download_file_name)
if not digest_verified:
try:
# Recalculate hashes on the unzipped local file.
local_hashes = _CreateDigestsFromLocalFile(logger, hash_algs, file_name,
src_obj_metadata)
_CheckHashes(logger, src_url, src_obj_metadata, file_name, local_hashes)
DeleteTrackerFile(GetTrackerFilePath(
dst_url, TrackerFileType.DOWNLOAD, api_selector))
except HashMismatchException:
DeleteTrackerFile(GetTrackerFilePath(
dst_url, TrackerFileType.DOWNLOAD, api_selector))
if _RENAME_ON_HASH_MISMATCH:
os.rename(file_name,
file_name + _RENAME_ON_HASH_MISMATCH_SUFFIX)
else:
os.unlink(file_name)
raise
if 'md5' in local_hashes:
return local_hashes['md5']
def _CopyFileToFile(src_url, dst_url):
"""Copies a local file to a local file.
Args:
src_url: Source FileUrl.
dst_url: Destination FileUrl.
Returns:
(elapsed_time, bytes_transferred, dst_url, md5=None).
Raises:
CommandException: if errors encountered.
"""
src_fp = GetStreamFromFileUrl(src_url)
dir_name = os.path.dirname(dst_url.object_name)
if dir_name and not os.path.exists(dir_name):
os.makedirs(dir_name)
dst_fp = open(dst_url.object_name, 'wb')
start_time = time.time()
shutil.copyfileobj(src_fp, dst_fp)
end_time = time.time()
return (end_time - start_time, os.path.getsize(dst_url.object_name),
dst_url, None)
def _DummyTrackerCallback(_):
pass
# pylint: disable=undefined-variable
def _CopyObjToObjDaisyChainMode(src_url, src_obj_metadata, dst_url,
dst_obj_metadata, preconditions, gsutil_api,
logger):
"""Copies from src_url to dst_url in "daisy chain" mode.
See -D OPTION documentation about what daisy chain mode is.
Args:
src_url: Source CloudUrl
src_obj_metadata: Metadata from source object
dst_url: Destination CloudUrl
dst_obj_metadata: Object-specific metadata that should be overidden during
the copy.
preconditions: Preconditions to use for the copy.
gsutil_api: gsutil Cloud API to use for the copy.
logger: For outputting log messages.
Returns:
(elapsed_time, bytes_transferred, dst_url with generation,
md5 hash of destination) excluding overhead like initial GET.
Raises:
CommandException: if errors encountered.
"""
# We don't attempt to preserve ACLs across providers because
# GCS and S3 support different ACLs and disjoint principals.
if (global_copy_helper_opts.preserve_acl
and src_url.scheme != dst_url.scheme):
raise NotImplementedError(
'Cross-provider cp -p not supported')
if not global_copy_helper_opts.preserve_acl:
dst_obj_metadata.acl = []
# Don't use callbacks for downloads on the daisy chain wrapper because
# upload callbacks will output progress, but respect test hooks if present.
progress_callback = None
if global_copy_helper_opts.test_callback_file:
with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
progress_callback = pickle.loads(test_fp.read()).call
start_time = time.time()
upload_fp = DaisyChainWrapper(src_url, src_obj_metadata.size, gsutil_api,
progress_callback=progress_callback)
uploaded_object = None
if src_obj_metadata.size == 0:
# Resumable uploads of size 0 are not supported.
uploaded_object = gsutil_api.UploadObject(
upload_fp, object_metadata=dst_obj_metadata,
canned_acl=global_copy_helper_opts.canned_acl,
preconditions=preconditions, provider=dst_url.scheme,
fields=UPLOAD_RETURN_FIELDS, size=src_obj_metadata.size)
else:
# TODO: Support process-break resumes. This will resume across connection
# breaks and server errors, but the tracker callback is a no-op so this
# won't resume across gsutil runs.
# TODO: Test retries via test_callback_file.
uploaded_object = gsutil_api.UploadObjectResumable(
upload_fp, object_metadata=dst_obj_metadata,
canned_acl=global_copy_helper_opts.canned_acl,
preconditions=preconditions, provider=dst_url.scheme,
fields=UPLOAD_RETURN_FIELDS, size=src_obj_metadata.size,
progress_callback=FileProgressCallbackHandler(
ConstructAnnounceText('Uploading', dst_url.url_string),
logger).call,
tracker_callback=_DummyTrackerCallback)
end_time = time.time()
try:
_CheckCloudHashes(logger, src_url, dst_url, src_obj_metadata,
uploaded_object)
except HashMismatchException:
if _RENAME_ON_HASH_MISMATCH:
corrupted_obj_metadata = apitools_messages.Object(
name=dst_obj_metadata.name,
bucket=dst_obj_metadata.bucket,
etag=uploaded_object.etag)
dst_obj_metadata.name = (dst_url.object_name +
_RENAME_ON_HASH_MISMATCH_SUFFIX)
gsutil_api.CopyObject(corrupted_obj_metadata,
dst_obj_metadata, provider=dst_url.scheme)
# If the digest doesn't match, delete the object.
gsutil_api.DeleteObject(dst_url.bucket_name, dst_url.object_name,
generation=uploaded_object.generation,
provider=dst_url.scheme)
raise
result_url = dst_url.Clone()
result_url.generation = GenerationFromUrlAndString(
result_url, uploaded_object.generation)
return (end_time - start_time, src_obj_metadata.size, result_url,
uploaded_object.md5Hash)
# pylint: disable=undefined-variable
# pylint: disable=too-many-statements
def PerformCopy(logger, src_url, dst_url, gsutil_api, command_obj,
copy_exception_handler, allow_splitting=True,
headers=None, manifest=None, gzip_exts=None, test_method=None):
"""Performs copy from src_url to dst_url, handling various special cases.
Args:
logger: for outputting log messages.
src_url: Source StorageUrl.
dst_url: Destination StorageUrl.
gsutil_api: gsutil Cloud API instance to use for the copy.
command_obj: command object for use in Apply in parallel composite uploads.
copy_exception_handler: for handling copy exceptions during Apply.
allow_splitting: Whether to allow the file to be split into component
pieces for an parallel composite upload.
headers: optional headers to use for the copy operation.
manifest: optional manifest for tracking copy operations.
gzip_exts: List of file extensions to gzip for uploads, if any.
test_method: optional test method for modifying files during unit tests.
Returns:
(elapsed_time, bytes_transferred, version-specific dst_url) excluding
overhead like initial GET.
Raises:
ItemExistsError: if no clobber flag is specified and the destination
object already exists.
SkipUnsupportedObjectError: if skip_unsupported_objects flag is specified
and the source is an unsupported type.
CommandException: if other errors encountered.
"""
if headers:
dst_obj_headers = headers.copy()
else:
dst_obj_headers = {}
# Create a metadata instance for each destination object so metadata
# such as content-type can be applied per-object.
# Initialize metadata from any headers passed in via -h.
dst_obj_metadata = ObjectMetadataFromHeaders(dst_obj_headers)
if dst_url.IsCloudUrl() and dst_url.scheme == 'gs':
preconditions = PreconditionsFromHeaders(dst_obj_headers)
else:
preconditions = Preconditions()
src_obj_metadata = None
src_obj_filestream = None
if src_url.IsCloudUrl():
src_obj_fields = None
if dst_url.IsCloudUrl():
# For cloud or daisy chain copy, we need every copyable field.
# If we're not modifying or overriding any of the fields, we can get
# away without retrieving the object metadata because the copy
# operation can succeed with just the destination bucket and object
# name. But if we are sending any metadata, the JSON API will expect a
# complete object resource. Since we want metadata like the object size
# for our own tracking, we just get all of the metadata here.
src_obj_fields = ['cacheControl', 'componentCount',
'contentDisposition', 'contentEncoding',
'contentLanguage', 'contentType', 'crc32c',
'etag', 'generation', 'md5Hash', 'mediaLink',
'metadata', 'metageneration', 'size']
# We only need the ACL if we're going to preserve it.
if global_copy_helper_opts.preserve_acl:
src_obj_fields.append('acl')
if (src_url.scheme == dst_url.scheme
and not global_copy_helper_opts.daisy_chain):
copy_in_the_cloud = True
else:
copy_in_the_cloud = False
else:
# Just get the fields needed to validate the download.
src_obj_fields = ['crc32c', 'contentEncoding', 'contentType', 'etag',
'mediaLink', 'md5Hash', 'size']
if (src_url.scheme == 's3' and
global_copy_helper_opts.skip_unsupported_objects):
src_obj_fields.append('storageClass')
try:
src_generation = GenerationFromUrlAndString(src_url, src_url.generation)
src_obj_metadata = gsutil_api.GetObjectMetadata(
src_url.bucket_name, src_url.object_name,
generation=src_generation, provider=src_url.scheme,
fields=src_obj_fields)
except NotFoundException:
raise CommandException(
'NotFoundException: Could not retrieve source object %s.' %
src_url.url_string)
if (src_url.scheme == 's3' and
global_copy_helper_opts.skip_unsupported_objects and
src_obj_metadata.storageClass == 'GLACIER'):
raise SkipGlacierError()
src_obj_size = src_obj_metadata.size
dst_obj_metadata.contentType = src_obj_metadata.contentType
if global_copy_helper_opts.preserve_acl:
dst_obj_metadata.acl = src_obj_metadata.acl
# Special case for S3-to-S3 copy URLs using
# global_copy_helper_opts.preserve_acl.
# dst_url will be verified in _CopyObjToObjDaisyChainMode if it
# is not s3 (and thus differs from src_url).
if src_url.scheme == 's3':
acl_text = S3MarkerAclFromObjectMetadata(src_obj_metadata)
if acl_text:
AddS3MarkerAclToObjectMetadata(dst_obj_metadata, acl_text)
else:
try:
src_obj_filestream = GetStreamFromFileUrl(src_url)
except Exception, e: # pylint: disable=broad-except
raise CommandException('Error opening file "%s": %s.' % (src_url,
e.message))
if src_url.IsStream():
src_obj_size = None
else:
src_obj_size = os.path.getsize(src_url.object_name)
if global_copy_helper_opts.use_manifest:
# Set the source size in the manifest.
manifest.Set(src_url.url_string, 'size', src_obj_size)
if (dst_url.scheme == 's3' and src_obj_size > S3_MAX_UPLOAD_SIZE
and src_url != 's3'):
raise CommandException(
'"%s" exceeds the maximum gsutil-supported size for an S3 upload. S3 '
'objects greater than %s in size require multipart uploads, which '
'gsutil does not support.' % (src_url,
MakeHumanReadable(S3_MAX_UPLOAD_SIZE)))
# On Windows, stdin is opened as text mode instead of binary which causes
# problems when piping a binary file, so this switches it to binary mode.
if IS_WINDOWS and src_url.IsFileUrl() and src_url.IsStream():
msvcrt.setmode(GetStreamFromFileUrl(src_url).fileno(), os.O_BINARY)
if global_copy_helper_opts.no_clobber:
# There are two checks to prevent clobbering:
# 1) The first check is to see if the URL
# already exists at the destination and prevent the upload/download
# from happening. This is done by the exists() call.
# 2) The second check is only relevant if we are writing to gs. We can
# enforce that the server only writes the object if it doesn't exist
# by specifying the header below. This check only happens at the
# server after the complete file has been uploaded. We specify this
# header to prevent a race condition where a destination file may
# be created after the first check and before the file is fully
# uploaded.
# In order to save on unnecessary uploads/downloads we perform both
# checks. However, this may come at the cost of additional HTTP calls.
if preconditions.gen_match:
raise ArgumentException('Specifying x-goog-if-generation-match is '
'not supported with cp -n')
else:
preconditions.gen_match = 0
if dst_url.IsFileUrl() and os.path.exists(dst_url.object_name):
# The local file may be a partial. Check the file sizes.
if src_obj_size == os.path.getsize(dst_url.object_name):
raise ItemExistsError()
elif dst_url.IsCloudUrl():
try:
dst_object = gsutil_api.GetObjectMetadata(
dst_url.bucket_name, dst_url.object_name, provider=dst_url.scheme)
except NotFoundException:
dst_object = None
if dst_object:
raise ItemExistsError()
if dst_url.IsCloudUrl():
# Cloud storage API gets object and bucket name from metadata.
dst_obj_metadata.name = dst_url.object_name
dst_obj_metadata.bucket = dst_url.bucket_name
if src_url.IsCloudUrl():
# Preserve relevant metadata from the source object if it's not already
# provided from the headers.
CopyObjectMetadata(src_obj_metadata, dst_obj_metadata, override=False)
src_obj_metadata.name = src_url.object_name
src_obj_metadata.bucket = src_url.bucket_name
else:
_SetContentTypeFromFile(src_url, dst_obj_metadata)
else:
# Files don't have Cloud API metadata.
dst_obj_metadata = None
_LogCopyOperation(logger, src_url, dst_url, dst_obj_metadata)
if src_url.IsCloudUrl():
if dst_url.IsFileUrl():
return _DownloadObjectToFile(src_url, src_obj_metadata, dst_url,
gsutil_api, logger, test_method=test_method)
elif copy_in_the_cloud:
return _CopyObjToObjInTheCloud(src_url, src_obj_metadata, dst_url,
dst_obj_metadata, preconditions,
gsutil_api, logger)
else:
return _CopyObjToObjDaisyChainMode(src_url, src_obj_metadata,
dst_url, dst_obj_metadata,
preconditions, gsutil_api, logger)
else: # src_url.IsFileUrl()
if dst_url.IsCloudUrl():
return _UploadFileToObject(
src_url, src_obj_filestream, src_obj_size, dst_url,
dst_obj_metadata, preconditions, gsutil_api, logger, command_obj,
copy_exception_handler, gzip_exts=gzip_exts,
allow_splitting=allow_splitting)
else: # dst_url.IsFileUrl()
return _CopyFileToFile(src_url, dst_url)
class Manifest(object):
"""Stores the manifest items for the CpCommand class."""
def __init__(self, path):
# self.items contains a dictionary of rows
self.items = {}
self.manifest_filter = {}
self.lock = CreateLock()
self.manifest_path = os.path.expanduser(path)
self._ParseManifest()
self._CreateManifestFile()
def _ParseManifest(self):
"""Load and parse a manifest file.
This information will be used to skip any files that have a skip or OK
status.
"""
try:
if os.path.exists(self.manifest_path):
with open(self.manifest_path, 'rb') as f:
first_row = True
reader = csv.reader(f)
for row in reader:
if first_row:
try:
source_index = row.index('Source')
result_index = row.index('Result')
except ValueError:
# No header and thus not a valid manifest file.
raise CommandException(
'Missing headers in manifest file: %s' % self.manifest_path)
first_row = False
source = row[source_index]
result = row[result_index]
if result in ['OK', 'skip']:
# We're always guaranteed to take the last result of a specific
# source url.
self.manifest_filter[source] = result
except IOError:
raise CommandException('Could not parse %s' % self.manifest_path)
def WasSuccessful(self, src):
"""Returns whether the specified src url was marked as successful."""
return src in self.manifest_filter
def _CreateManifestFile(self):
"""Opens the manifest file and assigns it to the file pointer."""
try:
if ((not os.path.exists(self.manifest_path))
or (os.stat(self.manifest_path).st_size == 0)):
# Add headers to the new file.
with open(self.manifest_path, 'wb', 1) as f:
writer = csv.writer(f)
writer.writerow(['Source',
'Destination',
'Start',
'End',
'Md5',
'UploadId',
'Source Size',
'Bytes Transferred',
'Result',
'Description'])
except IOError:
raise CommandException('Could not create manifest file.')
def Set(self, url, key, value):
if value is None:
# In case we don't have any information to set we bail out here.
# This is so that we don't clobber existing information.
# To zero information pass '' instead of None.
return
if url in self.items:
self.items[url][key] = value
else:
self.items[url] = {key: value}
def Initialize(self, source_url, destination_url):
# Always use the source_url as the key for the item. This is unique.
self.Set(source_url, 'source_uri', source_url)
self.Set(source_url, 'destination_uri', destination_url)
self.Set(source_url, 'start_time', datetime.datetime.utcnow())
def SetResult(self, source_url, bytes_transferred, result,
description=''):
self.Set(source_url, 'bytes', bytes_transferred)
self.Set(source_url, 'result', result)
self.Set(source_url, 'description', description)
self.Set(source_url, 'end_time', datetime.datetime.utcnow())
self._WriteRowToManifestFile(source_url)
self._RemoveItemFromManifest(source_url)
def _WriteRowToManifestFile(self, url):
"""Writes a manifest entry to the manifest file for the url argument."""
row_item = self.items[url]
data = [
str(row_item['source_uri'].encode(UTF8)),
str(row_item['destination_uri'].encode(UTF8)),
'%sZ' % row_item['start_time'].isoformat(),
'%sZ' % row_item['end_time'].isoformat(),
row_item['md5'] if 'md5' in row_item else '',
row_item['upload_id'] if 'upload_id' in row_item else '',
str(row_item['size']) if 'size' in row_item else '',
str(row_item['bytes']) if 'bytes' in row_item else '',
row_item['result'],
row_item['description'].encode(UTF8)]
# Aquire a lock to prevent multiple threads writing to the same file at
# the same time. This would cause a garbled mess in the manifest file.
with self.lock:
with open(self.manifest_path, 'a', 1) as f: # 1 == line buffered
writer = csv.writer(f)
writer.writerow(data)
def _RemoveItemFromManifest(self, url):
# Remove the item from the dictionary since we're done with it and
# we don't want the dictionary to grow too large in memory for no good
# reason.
del self.items[url]
class ItemExistsError(Exception):
"""Exception class for objects that are skipped because they already exist."""
pass
class SkipUnsupportedObjectError(Exception):
"""Exception for objects skipped because they are an unsupported type."""
def __init__(self):
super(SkipUnsupportedObjectError, self).__init__()
self.unsupported_type = 'Unknown'
class SkipGlacierError(SkipUnsupportedObjectError):
"""Exception for objects skipped because they are an unsupported type."""
def __init__(self):
super(SkipGlacierError, self).__init__()
self.unsupported_type = 'GLACIER'
def GetPathBeforeFinalDir(url):
"""Returns the path section before the final directory component of the URL.
This handles cases for file system directories, bucket, and bucket
subdirectories. Example: for gs://bucket/dir/ we'll return 'gs://bucket',
and for file://dir we'll return file://
Args:
url: StorageUrl representing a filesystem directory, cloud bucket or
bucket subdir.
Returns:
String name of above-described path, sans final path separator.
"""
sep = url.delim
if url.IsFileUrl():
past_scheme = url.url_string[len('file://'):]
if past_scheme.find(sep) == -1:
return 'file://'
else:
return 'file://%s' % past_scheme.rstrip(sep).rpartition(sep)[0]
if url.IsBucket():
return '%s://' % url.scheme
# Else it names a bucket subdir.
return url.url_string.rstrip(sep).rpartition(sep)[0]
def _DivideAndCeil(dividend, divisor):
"""Returns ceil(dividend / divisor).
Takes care to avoid the pitfalls of floating point arithmetic that could
otherwise yield the wrong result for large numbers.
Args:
dividend: Dividend for the operation.
divisor: Divisor for the operation.
Returns:
Quotient.
"""
quotient = dividend // divisor
if (dividend % divisor) != 0:
quotient += 1
return quotient
def _GetPartitionInfo(file_size, max_components, default_component_size):
"""Gets info about a file partition for parallel composite uploads.
Args:
file_size: The number of bytes in the file to be partitioned.
max_components: The maximum number of components that can be composed.
default_component_size: The size of a component, assuming that
max_components is infinite.
Returns:
The number of components in the partitioned file, and the size of each
component (except the last, which will have a different size iff
file_size != 0 (mod num_components)).
"""
# num_components = ceil(file_size / default_component_size)
num_components = _DivideAndCeil(file_size, default_component_size)
# num_components must be in the range [2, max_components]
num_components = max(min(num_components, max_components), 2)
# component_size = ceil(file_size / num_components)
component_size = _DivideAndCeil(file_size, num_components)
return (num_components, component_size)
def _DeleteObjectFn(cls, url_to_delete, thread_state=None):
"""Wrapper function to be used with command.Apply()."""
gsutil_api = GetCloudApiInstance(cls, thread_state)
gsutil_api.DeleteObject(
url_to_delete.bucket_name, url_to_delete.object_name,
generation=url_to_delete.generation, provider=url_to_delete.scheme)
def _ParseParallelUploadTrackerFile(tracker_file, tracker_file_lock):
"""Parse the tracker file from the last parallel composite upload attempt.
If it exists, the tracker file is of the format described in
_CreateParallelUploadTrackerFile. If the file doesn't exist or cannot be
read, then the upload will start from the beginning.
Args:
tracker_file: The name of the file to parse.
tracker_file_lock: Lock protecting access to the tracker file.
Returns:
random_prefix: A randomly-generated prefix to the name of the
temporary components.
existing_objects: A list of ObjectFromTracker objects representing
the set of files that have already been uploaded.
"""
def GenerateRandomPrefix():
return str(random.randint(1, (10 ** 10) - 1))
existing_objects = []
try:
with tracker_file_lock:
with open(tracker_file, 'r') as fp:
lines = fp.readlines()
lines = [line.strip() for line in lines]
if not lines:
print('Parallel upload tracker file (%s) was invalid. '
'Restarting upload from scratch.' % tracker_file)
lines = [GenerateRandomPrefix()]
except IOError as e:
# We can't read the tracker file, so generate a new random prefix.
lines = [GenerateRandomPrefix()]
# Ignore non-existent file (happens first time an upload
# is attempted on a file), but warn user for other errors.
if e.errno != errno.ENOENT:
# Will restart because we failed to read in the file.
print('Couldn\'t read parallel upload tracker file (%s): %s. '
'Restarting upload from scratch.' % (tracker_file, e.strerror))
# The first line contains the randomly-generated prefix.
random_prefix = lines[0]
# The remaining lines were written in pairs to describe a single component
# in the form:
# object_name (without random prefix)
# generation
# Newlines are used as the delimiter because only newlines and carriage
# returns are invalid characters in object names, and users can specify
# a custom prefix in the config file.
i = 1
while i < len(lines):
(name, generation) = (lines[i], lines[i+1])
if not generation:
# Cover the '' case.
generation = None
existing_objects.append(ObjectFromTracker(name, generation))
i += 2
return (random_prefix, existing_objects)
def _AppendComponentTrackerToParallelUploadTrackerFile(tracker_file, component,
tracker_file_lock):
"""Appends info about the uploaded component to an existing tracker file.
Follows the format described in _CreateParallelUploadTrackerFile.
Args:
tracker_file: Tracker file to append to.
component: Component that was uploaded.
tracker_file_lock: Thread and process-safe Lock for the tracker file.
"""
lines = _GetParallelUploadTrackerFileLinesForComponents([component])
lines = [line + '\n' for line in lines]
with tracker_file_lock:
with open(tracker_file, 'a') as f:
f.writelines(lines)
def _CreateParallelUploadTrackerFile(tracker_file, random_prefix, components,
tracker_file_lock):
"""Writes information about components that were successfully uploaded.
This way the upload can be resumed at a later date. The tracker file has
the format:
random_prefix
temp_object_1_name
temp_object_1_generation
.
.
.
temp_object_N_name
temp_object_N_generation
where N is the number of components that have been successfully uploaded.
Args:
tracker_file: The name of the parallel upload tracker file.
random_prefix: The randomly-generated prefix that was used for
for uploading any existing components.
components: A list of ObjectFromTracker objects that were uploaded.
tracker_file_lock: The lock protecting access to the tracker file.
"""
lines = [random_prefix]
lines += _GetParallelUploadTrackerFileLinesForComponents(components)
lines = [line + '\n' for line in lines]
try:
with tracker_file_lock:
open(tracker_file, 'w').close() # Clear the file.
with open(tracker_file, 'w') as f:
f.writelines(lines)
except IOError as e:
RaiseUnwritableTrackerFileException(tracker_file, e.strerror)
def _GetParallelUploadTrackerFileLinesForComponents(components):
"""Return a list of the lines for use in a parallel upload tracker file.
The lines represent the given components, using the format as described in
_CreateParallelUploadTrackerFile.
Args:
components: A list of ObjectFromTracker objects that were uploaded.
Returns:
Lines describing components with their generation for outputting to the
tracker file.
"""
lines = []
for component in components:
generation = None
generation = component.generation
if not generation:
generation = ''
lines += [component.object_name, str(generation)]
return lines
def FilterExistingComponents(dst_args, existing_components, bucket_url,
gsutil_api):
"""Determines course of action for component objects.
Given the list of all target objects based on partitioning the file and
the list of objects that have already been uploaded successfully,
this function determines which objects should be uploaded, which
existing components are still valid, and which existing components should
be deleted.
Args:
dst_args: The map of file_name -> PerformParallelUploadFileToObjectArgs
calculated by partitioning the file.
existing_components: A list of ObjectFromTracker objects that have been
uploaded in the past.
bucket_url: CloudUrl of the bucket in which the components exist.
gsutil_api: gsutil Cloud API instance to use for retrieving object metadata.
Returns:
components_to_upload: List of components that need to be uploaded.
uploaded_components: List of components that have already been
uploaded and are still valid.
existing_objects_to_delete: List of components that have already
been uploaded, but are no longer valid
and are in a versioned bucket, and
therefore should be deleted.
"""
components_to_upload = []
existing_component_names = [component.object_name
for component in existing_components]
for component_name in dst_args:
if component_name not in existing_component_names:
components_to_upload.append(dst_args[component_name])
objects_already_chosen = []
# Don't reuse any temporary components whose MD5 doesn't match the current
# MD5 of the corresponding part of the file. If the bucket is versioned,
# also make sure that we delete the existing temporary version.
existing_objects_to_delete = []
uploaded_components = []
for tracker_object in existing_components:
if (tracker_object.object_name not in dst_args.keys()
or tracker_object.object_name in objects_already_chosen):
# This could happen if the component size has changed. This also serves
# to handle object names that get duplicated in the tracker file due
# to people doing things they shouldn't (e.g., overwriting an existing
# temporary component in a versioned bucket).
url = bucket_url.Clone()
url.object_name = tracker_object.object_name
url.generation = tracker_object.generation
existing_objects_to_delete.append(url)
continue
dst_arg = dst_args[tracker_object.object_name]
file_part = FilePart(dst_arg.filename, dst_arg.file_start,
dst_arg.file_length)
# TODO: calculate MD5's in parallel when possible.
content_md5 = CalculateB64EncodedMd5FromContents(file_part)
try:
# Get the MD5 of the currently-existing component.
dst_url = dst_arg.dst_url
dst_metadata = gsutil_api.GetObjectMetadata(
dst_url.bucket_name, dst_url.object_name,
generation=dst_url.generation, provider=dst_url.scheme,
fields=['md5Hash', 'etag'])
cloud_md5 = dst_metadata.md5Hash
except Exception: # pylint: disable=broad-except
# We don't actually care what went wrong - we couldn't retrieve the
# object to check the MD5, so just upload it again.
cloud_md5 = None
if cloud_md5 != content_md5:
components_to_upload.append(dst_arg)
objects_already_chosen.append(tracker_object.object_name)
if tracker_object.generation:
# If the old object doesn't have a generation (i.e., it isn't in a
# versioned bucket), then we will just overwrite it anyway.
invalid_component_with_generation = dst_arg.dst_url.Clone()
invalid_component_with_generation.generation = tracker_object.generation
existing_objects_to_delete.append(invalid_component_with_generation)
else:
url = dst_arg.dst_url.Clone()
url.generation = tracker_object.generation
uploaded_components.append(url)
objects_already_chosen.append(tracker_object.object_name)
if uploaded_components:
logging.info('Found %d existing temporary components to reuse.',
len(uploaded_components))
return (components_to_upload, uploaded_components,
existing_objects_to_delete)
|
mnick/offlineimap | refs/heads/master | offlineimap/CustomConfig.py | 6 | # Copyright (C) 2003-2012 John Goerzen & contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
try:
from ConfigParser import SafeConfigParser
except ImportError: #python3
from configparser import SafeConfigParser
from offlineimap.localeval import LocalEval
import os
class CustomConfigParser(SafeConfigParser):
def getdefault(self, section, option, default, *args, **kwargs):
"""Same as config.get, but returns the "default" option if there
is no such option specified."""
if self.has_option(section, option):
return self.get(*(section, option) + args, **kwargs)
else:
return default
def getdefaultint(self, section, option, default, *args, **kwargs):
if self.has_option(section, option):
return self.getint (*(section, option) + args, **kwargs)
else:
return default
def getdefaultfloat(self, section, option, default, *args, **kwargs):
if self.has_option(section, option):
return self.getfloat(*(section, option) + args, **kwargs)
else:
return default
def getdefaultboolean(self, section, option, default, *args, **kwargs):
if self.has_option(section, option):
return self.getboolean(*(section, option) + args, **kwargs)
else:
return default
def getmetadatadir(self):
metadatadir = os.path.expanduser(self.getdefault("general", "metadata", "~/.offlineimap"))
if not os.path.exists(metadatadir):
os.mkdir(metadatadir, 0o700)
return metadatadir
def getlocaleval(self):
if self.has_option("general", "pythonfile"):
path = os.path.expanduser(self.get("general", "pythonfile"))
else:
path = None
return LocalEval(path)
def getsectionlist(self, key):
"""Returns a list of sections that start with key + " ". That is,
if key is "Account", returns all section names that start with
"Account ", but strips off the "Account ". For instance, for
"Account Test", returns "Test"."""
key = key + ' '
return [x[len(key):] for x in self.sections() \
if x.startswith(key)]
def set_if_not_exists(self, section, option, value):
"""Set a value if it does not exist yet
This allows to set default if the user has not explicitly
configured anything."""
if not self.has_option(section, option):
self.set(section, option, value)
def CustomConfigDefault():
"""Just a constant that won't occur anywhere else.
This allows us to differentiate if the user has passed in any
default value to the getconf* functions in ConfigHelperMixin
derived classes."""
pass
class ConfigHelperMixin:
"""Allow comfortable retrieving of config values pertaining to a section.
If a class inherits from this cls:`ConfigHelperMixin`, it needs
to provide 2 functions: meth:`getconfig` (returning a
ConfigParser object) and meth:`getsection` (returning a string
which represents the section to look up). All calls to getconf*
will then return the configuration values for the ConfigParser
object in the specific section."""
def _confighelper_runner(self, option, default, defaultfunc, mainfunc):
"""Return config value for getsection()"""
if default == CustomConfigDefault:
return mainfunc(*[self.getsection(), option])
else:
return defaultfunc(*[self.getsection(), option, default])
def getconf(self, option,
default = CustomConfigDefault):
return self._confighelper_runner(option, default,
self.getconfig().getdefault,
self.getconfig().get)
def getconfboolean(self, option, default = CustomConfigDefault):
return self._confighelper_runner(option, default,
self.getconfig().getdefaultboolean,
self.getconfig().getboolean)
def getconfint(self, option, default = CustomConfigDefault):
return self._confighelper_runner(option, default,
self.getconfig().getdefaultint,
self.getconfig().getint)
def getconffloat(self, option, default = CustomConfigDefault):
return self._confighelper_runner(option, default,
self.getconfig().getdefaultfloat,
self.getconfig().getfloat)
|
rds0751/colinkers | refs/heads/master | env/Lib/site-packages/django/core/files/base.py | 59 | from __future__ import unicode_literals
import os
from io import BytesIO, StringIO, UnsupportedOperation
from django.core.files.utils import FileProxyMixin
from django.utils import six
from django.utils.encoding import (
force_bytes, force_str, force_text, python_2_unicode_compatible,
)
@python_2_unicode_compatible
class File(FileProxyMixin):
DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
def __init__(self, file, name=None):
self.file = file
if name is None:
name = getattr(file, 'name', None)
self.name = name
if hasattr(file, 'mode'):
self.mode = file.mode
def __str__(self):
return force_text(self.name or '')
def __repr__(self):
return force_str("<%s: %s>" % (self.__class__.__name__, self or "None"))
def __bool__(self):
return bool(self.name)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __len__(self):
return self.size
def _get_size_from_underlying_file(self):
if hasattr(self.file, 'size'):
return self.file.size
if hasattr(self.file, 'name'):
try:
return os.path.getsize(self.file.name)
except (OSError, TypeError):
pass
if hasattr(self.file, 'tell') and hasattr(self.file, 'seek'):
pos = self.file.tell()
self.file.seek(0, os.SEEK_END)
size = self.file.tell()
self.file.seek(pos)
return size
raise AttributeError("Unable to determine the file's size.")
def _get_size(self):
if hasattr(self, '_size'):
return self._size
self._size = self._get_size_from_underlying_file()
return self._size
def _set_size(self, size):
self._size = size
size = property(_get_size, _set_size)
def chunks(self, chunk_size=None):
"""
Read the file and yield chunks of ``chunk_size`` bytes (defaults to
``UploadedFile.DEFAULT_CHUNK_SIZE``).
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
try:
self.seek(0)
except (AttributeError, UnsupportedOperation):
pass
while True:
data = self.read(chunk_size)
if not data:
break
yield data
def multiple_chunks(self, chunk_size=None):
"""
Returns ``True`` if you can expect multiple chunks.
NB: If a particular file representation is in memory, subclasses should
always return ``False`` -- there's no good reason to read from memory in
chunks.
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
return self.size > chunk_size
def __iter__(self):
# Iterate over this file-like object by newlines
buffer_ = None
for chunk in self.chunks():
for line in chunk.splitlines(True):
if buffer_:
if endswith_cr(buffer_) and not equals_lf(line):
# Line split after a \r newline; yield buffer_.
yield buffer_
# Continue with line.
else:
# Line either split without a newline (line
# continues after buffer_) or with \r\n
# newline (line == b'\n').
line = buffer_ + line
# buffer_ handled, clear it.
buffer_ = None
# If this is the end of a \n or \r\n line, yield.
if endswith_lf(line):
yield line
else:
buffer_ = line
if buffer_ is not None:
yield buffer_
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def open(self, mode=None):
if not self.closed:
self.seek(0)
elif self.name and os.path.exists(self.name):
self.file = open(self.name, mode or self.mode)
else:
raise ValueError("The file cannot be reopened.")
def close(self):
self.file.close()
@python_2_unicode_compatible
class ContentFile(File):
"""
A File-like object that takes just raw content, rather than an actual file.
"""
def __init__(self, content, name=None):
if six.PY3:
stream_class = StringIO if isinstance(content, six.text_type) else BytesIO
else:
stream_class = BytesIO
content = force_bytes(content)
super(ContentFile, self).__init__(stream_class(content), name=name)
self.size = len(content)
def __str__(self):
return 'Raw content'
def __bool__(self):
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def open(self, mode=None):
self.seek(0)
def close(self):
pass
def endswith_cr(line):
"""
Return True if line (a text or byte string) ends with '\r'.
"""
return line.endswith('\r' if isinstance(line, six.text_type) else b'\r')
def endswith_lf(line):
"""
Return True if line (a text or byte string) ends with '\n'.
"""
return line.endswith('\n' if isinstance(line, six.text_type) else b'\n')
def equals_lf(line):
"""
Return True if line (a text or byte string) equals '\n'.
"""
return line == ('\n' if isinstance(line, six.text_type) else b'\n')
|
OptoFidelity/cerbero | refs/heads/master | cerbero/ide/xcode/fwlib.py | 7 | #!/usr/bin/env python
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import tempfile
import shutil
from collections import defaultdict
from cerbero.config import Architecture
from cerbero.ide.pkgconfig import PkgConfig
from cerbero.utils import shell
from cerbero.utils import messages as m
class FrameworkLibrary(object):
'''
Combine several shared library into a single shared library to be used
as a Framework.
The complete list of shared libraries needed are guessed with pkg-config
but full paths can be used too with use_pkgconfig=False
'''
def __init__(self, libname, install_name, libraries, arch):
self.libname = libname
self.install_name = install_name
self.libraries = libraries
self.arch = arch
self.use_pkgconfig = True
self.universal_archs = None
def create(self):
if self.arch == Architecture.X86:
self.arch = 'i386'
if self.use_pkgconfig:
libraries = self._libraries_paths(self.libraries)
else:
libraries = self.libraries
self._create_framework_library(libraries)
def _libraries_paths(self, libraries):
pkgconfig = PkgConfig(libraries)
libdirs = pkgconfig.libraries_dirs()
libs = pkgconfig.libraries()
libspaths = []
for lib in libs:
for libdir in libdirs:
libpath = os.path.join(libdir, self._get_lib_file_name (lib))
if not os.path.exists(libpath):
continue
libspaths.append(os.path.realpath(libpath))
break
return libspaths
def _create_framework_library(self, libraries):
raise NotImplemented
def _get_lib_file_name(self, lib):
return lib
class DynamicFrameworkLibrary(FrameworkLibrary):
def _create_framework_library(self, libraries):
libraries = ' '.join(['-Wl,-reexport_library %s' % x for x in libraries])
shell.call('clang -dynamiclib -o %s -arch %s -install_name %s %s' %
(self.libname, self.arch, self.install_name, libraries))
def _get_lib_file_name(self, lib):
return 'lib%s.dylib' % lib
class StaticFrameworkLibrary(FrameworkLibrary):
def _get_lib_file_name(self, lib):
return 'lib%s.a' % lib
def _split_static_lib(self, lib, thin_arch=None):
'''Splits the static lib @lib into its object files
Splits the static lib @lib into its object files and returns
a new temporary directory where the .o files should be found.
if @thin_arch was provided, it considers the @lib to be a fat
binary and takes its thin version for the @thin_arch specified
before retrieving the object files.
'''
lib_tmpdir = tempfile.mkdtemp()
shutil.copy(lib, lib_tmpdir)
tmplib = os.path.join(lib_tmpdir, os.path.basename(lib))
if thin_arch: #should be a fat file, split only to the arch we want
newname = '%s_%s' % (thin_arch, os.path.basename(lib))
shell.call('lipo %s -thin %s -output %s' % (tmplib,
thin_arch, newname), lib_tmpdir)
tmplib = os.path.join (lib_tmpdir, newname)
shell.call('ar -x %s' % tmplib, lib_tmpdir)
# object files with the same name in an archive are overwritten
# when they are extracted. osx's ar does not support the N count
# modifier so after extracting all the files we remove them from
# the archive to extract those with duplicated names.
# eg:
# ar t libavcodec.a -> mlpdsp.o mlpdsp.o (2 objects with the same name)
# ar d libavcodec.a mlpdsp.o (we remove the first one)
# ar t libavcodec.a -> mlpdsp.o (we only the second one now)
files = shell.check_call('ar -t %s' % tmplib, lib_tmpdir).split('\n')
# FIXME: We should use collections.Count but it's only available in
# python 2.7+
dups = defaultdict(int)
for f in files:
dups[f] += 1
for f in dups:
if dups[f] <= 1:
continue
for x in range(dups[f]):
path = os.path.join(lib_tmpdir, f)
new_path = os.path.join(lib_tmpdir, 'dup%d_' % x + f)
# The duplicated overwrote the first one, so extract it again
shell.call('ar -x %s %s' % (tmplib, f), lib_tmpdir)
shutil.move (path, new_path)
shell.call('ar -d %s %s' % (tmplib, f), lib_tmpdir)
return lib_tmpdir
def _check_duplicated_symbols(self, files, tmpdir):
for f in files:
syms = defaultdict(list)
symbols = shell.check_call('nm -UA %s' % f, tmpdir).split('\n')
# nm output is: test.o: 00000000 T _gzwrite
# (filename, address, symbol type, symbols_name)
for s in symbols:
s = s.split(' ')
if len(s) == 4 and s[2] == 'T':
syms[s[3]].append(s)
dups = {}
for k,v in syms.iteritems():
if len(v) > 1:
dups[k] = v
if dups:
m.warning ("The static library contains duplicated symbols")
for k, v in dups.iteritems():
m.message (k) # symbol name
for l in v:
m.message (" %s" % l[0]) # file
def _create_framework_library(self, libraries):
tmpdir = tempfile.mkdtemp()
libname = os.path.basename (self.libname) # just to make sure
if self.arch == Architecture.UNIVERSAL:
archs = self.universal_archs
else:
archs = [self.arch]
archs = [a if a != Architecture.X86 else 'i386' for a in archs]
for thin_arch in archs:
object_files_md5 = []
shell.call ('mkdir -p %s' % thin_arch, tmpdir)
tmpdir_thinarch = os.path.join(tmpdir, thin_arch)
for lib in libraries:
libprefix = os.path.split(lib)[-1].replace('.', '_')
if len(archs) > 1: #should be a fat file, split only to the arch we want
libprefix += '_%s_' % thin_arch
lib_tmpdir = self._split_static_lib(lib, thin_arch)
else:
lib_tmpdir = self._split_static_lib(lib)
obj_files = shell.ls_files(['*.o'], lib_tmpdir)
target_objs = []
for obj_f in obj_files:
obj_path = os.path.join(lib_tmpdir, obj_f)
md5 = shell.check_call('md5 -q %s' % obj_path).split('\n')[0]
md5 = '%s-%s' % (md5, os.path.getsize(obj_path))
if md5 not in object_files_md5:
target_name = '%s-%s' % (libprefix, obj_f)
try:
# Hard link source file to the target name
os.link(obj_path, tmpdir_thinarch + '/' + target_name)
except:
# Fall back to cp if hard link doesn't work for any reason
shell.call('cp %s %s' % (obj_path, target_name), tmpdir_thinarch)
# If we have a duplicate object, commit any collected ones
if target_name in target_objs:
m.warning ("Committing %d objects due to dup %s" % (len (target_objs), target_name))
shell.call('ar -cqS %s %s' % (libname, " ".join (target_objs)), tmpdir_thinarch)
target_objs = []
target_objs.append (target_name)
object_files_md5.append(md5)
# Put all the collected target_objs in the archive. cmdline limit is 262k args on OSX.
if len(target_objs):
shell.call('ar -cqS %s %s' % (libname, " ".join (target_objs)), tmpdir_thinarch)
shutil.rmtree(lib_tmpdir)
shell.call('ar -s %s' % (libname), tmpdir_thinarch)
files = [os.path.join(tmpdir, arch, libname) for arch in archs]
self._check_duplicated_symbols(files, tmpdir)
if len(archs) > 1:
#merge the final libs into a fat file again
shell.call('lipo %s -create -output %s' % (' '.join(files), self.install_name), tmpdir)
else:
shell.call('cp %s %s' % (os.path.join(tmpdir, self.arch, libname), self.install_name), tmpdir)
shutil.rmtree(tmpdir)
|
Vagab0nd/SiCKRAGE | refs/heads/master | lib3/cryptography/hazmat/backends/openssl/x448.py | 4 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.hazmat.backends.openssl.utils import _evp_pkey_derive
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.x448 import (
X448PrivateKey,
X448PublicKey,
)
_X448_KEY_SIZE = 56
@utils.register_interface(X448PublicKey)
class _X448PublicKey(object):
def __init__(self, backend, evp_pkey):
self._backend = backend
self._evp_pkey = evp_pkey
def public_bytes(self, encoding, format):
if (
encoding is serialization.Encoding.Raw
or format is serialization.PublicFormat.Raw
):
if (
encoding is not serialization.Encoding.Raw
or format is not serialization.PublicFormat.Raw
):
raise ValueError(
"When using Raw both encoding and format must be Raw"
)
return self._raw_public_bytes()
return self._backend._public_key_bytes(
encoding, format, self, self._evp_pkey, None
)
def _raw_public_bytes(self):
buf = self._backend._ffi.new("unsigned char []", _X448_KEY_SIZE)
buflen = self._backend._ffi.new("size_t *", _X448_KEY_SIZE)
res = self._backend._lib.EVP_PKEY_get_raw_public_key(
self._evp_pkey, buf, buflen
)
self._backend.openssl_assert(res == 1)
self._backend.openssl_assert(buflen[0] == _X448_KEY_SIZE)
return self._backend._ffi.buffer(buf, _X448_KEY_SIZE)[:]
@utils.register_interface(X448PrivateKey)
class _X448PrivateKey(object):
def __init__(self, backend, evp_pkey):
self._backend = backend
self._evp_pkey = evp_pkey
def public_key(self):
buf = self._backend._ffi.new("unsigned char []", _X448_KEY_SIZE)
buflen = self._backend._ffi.new("size_t *", _X448_KEY_SIZE)
res = self._backend._lib.EVP_PKEY_get_raw_public_key(
self._evp_pkey, buf, buflen
)
self._backend.openssl_assert(res == 1)
self._backend.openssl_assert(buflen[0] == _X448_KEY_SIZE)
return self._backend.x448_load_public_bytes(buf)
def exchange(self, peer_public_key):
if not isinstance(peer_public_key, X448PublicKey):
raise TypeError("peer_public_key must be X448PublicKey.")
return _evp_pkey_derive(self._backend, self._evp_pkey, peer_public_key)
def private_bytes(self, encoding, format, encryption_algorithm):
if (
encoding is serialization.Encoding.Raw
or format is serialization.PublicFormat.Raw
):
if (
format is not serialization.PrivateFormat.Raw
or encoding is not serialization.Encoding.Raw
or not isinstance(
encryption_algorithm, serialization.NoEncryption
)
):
raise ValueError(
"When using Raw both encoding and format must be Raw "
"and encryption_algorithm must be NoEncryption()"
)
return self._raw_private_bytes()
return self._backend._private_key_bytes(
encoding, format, encryption_algorithm, self, self._evp_pkey, None
)
def _raw_private_bytes(self):
buf = self._backend._ffi.new("unsigned char []", _X448_KEY_SIZE)
buflen = self._backend._ffi.new("size_t *", _X448_KEY_SIZE)
res = self._backend._lib.EVP_PKEY_get_raw_private_key(
self._evp_pkey, buf, buflen
)
self._backend.openssl_assert(res == 1)
self._backend.openssl_assert(buflen[0] == _X448_KEY_SIZE)
return self._backend._ffi.buffer(buf, _X448_KEY_SIZE)[:]
|
TonyJenkins/cfs2160-python | refs/heads/master | 04classes/bank_account.py | 1 | #!/usr/bin/env python3
# bank_account.py
#
# Simple Bank Account class example.
#
# AMJ
# 2017-04-01
class BankAccount:
def __init__ (self, account_number, account_holder, has_overdraft):
self.account_number = account_number
self.account_holder = account_holder
self.has_overdraft = has_overdraft
self.__balance = 0.0
@property
def balance (self):
return self.__balance
def deposit (self, deposit_amount):
try:
if deposit_amount > 0:
self.__balance += deposit
except TypeError:
pass
def withdraw (self, withdraw_amount):
try:
if withdraw_amount >= self.__balance or has_overdraft:
self.__balance -= withdraw_amount
except TypeError:
pass
def __str__ (self):
return "Account: {:} Holder: {:} Balance: {:}".format (self.account_number, self.account_holder, self.balance)
|
RedHatQE/cinch | refs/heads/master | cinch/library/line_match.py | 2 | #!/usr/bin/env python
import re
from ansible.module_utils.basic import AnsibleModule
from os import path
def main():
module = AnsibleModule(
argument_spec={
'file': {'required': True},
'line': {'required': False},
'method': {'choices': ['regex', 'simple'], 'default': 'simple'}
}
)
# First sanity check that the file exists and is not a directory or such
if not path.exists(module.params['file']) or \
not path.isfile(module.params['file']):
module.exit_json(changed=False, exists=False, present=False)
# Create the method that will do the matching
if module.params['method'] == 'regex':
expression = re.compile(module.params['line'])
def matcher(x):
return expression.search(x) is not None
else:
def matcher(x):
return x == module.params['line']
# Read the file, line by line, and check for matches
with open(module.params['file'], 'r') as reader:
for line in reader.readlines():
if matcher(line):
module.exit_json(changed=False, exists=True, present=True)
module.exit_json(changed=False, exists=True, present=False)
main()
|
tfroehlich82/EventGhost | refs/heads/master | _build/data/Py2ExeBootScript.py | 2 | # -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2020 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
"""
This script is executed by py2exe for the frozen application directly after
boot_common.py in the py2exe site package has been executed.
The drawback of the default boot_common.py is, that it directs sys.stderr
to a log file (that's fine so far) in the applications program folder (that's
bad for user accounts with limited priviliges).
So we redirect the sys.stderr to a log file in the applications data folder.
"""
import linecache
import sys
class StdErrReplacement(object):
softspace = 0
_file = None
_error = None
_logFilePath = None
_displayMessage = True
encoding = "mbcs"
def write(self, text):
if self._file is None and self._error is None:
if self._logFilePath is None:
import os
prgName = os.path.splitext(os.path.basename(sys.executable))[0] # NOQA
prgAppDataPath = os.path.join(os.environ["APPDATA"], prgName)
self._logFilePath = os.path.join(prgAppDataPath, "Log.txt")
try:
if not os.path.exists(prgAppDataPath):
os.mkdir(prgAppDataPath)
self._file = open(self._logFilePath, 'a')
except Exception, details:
self._error = details
if "-q" not in sys.argv and "-quiet" not in sys.argv: # NOQA
import atexit
import ctypes
atexit.register(
ctypes.windll.user32.MessageBoxA,
0,
"The logfile '%s' could not be opened:\n %s" % (
self._logFilePath,
details
),
"Error occurred in EventGhost",
0
)
else:
if "-q" not in sys.argv and "-quiet" not in sys.argv: # NOQA
import atexit
atexit.register(self.__DisplayMessage)
if self._file is not None:
self._file.write(text)
self._file.flush()
def flush(self):
if self._file is not None:
self._file.flush()
def __DisplayMessage(self):
if not self._displayMessage:
return
import ctypes
result = ctypes.windll.user32.MessageBoxA(
0,
(
'See the logfile "%s" for details.\n\n'
"Do you want to open the file now?"
) % self._logFilePath,
"Errors occurred in EventGhost",
4
)
if result == 6:
import subprocess
subprocess.Popen('Notepad.exe "%s"' % self._logFilePath)
# Replace stderr.
sys.stderr = StdErrReplacement()
# py2exe disables linecache.getline() in boot_common.py.
# py2exe disabls linecache.getline() which is called by
# traceback.extract_stack() when an exception occurs to try and read
# the filenames embedded in the packaged python code.
# We re-enable it here.
linecache.getline = linecache.orig_getline
# Clean up.
del linecache
del sys
del StdErrReplacement
|
lz1988/company-site | refs/heads/master | django/conf/locale/ar/formats.py | 234 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F، Y'
TIME_FORMAT = 'g:i:s A'
# DATETIME_FORMAT =
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd/m/Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
mapr/hue | refs/heads/hue-3.9.0-mapr | desktop/core/ext-py/Django-1.6.10/django/conf/locale/tr/formats.py | 118 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'd F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'd F'
SHORT_DATE_FORMAT = 'd M Y'
SHORT_DATETIME_FORMAT = 'd M Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Pazartesi
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%y-%m-%d', # '06-10-25'
# '%d %B %Y', '%d %b. %Y', # '25 Ekim 2006', '25 Eki. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
davidsminor/gaffer | refs/heads/master | python/GafferUI/SectionedCompoundPlugValueWidget.py | 5 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferUI
class _Section( GafferUI.CompoundPlugValueWidget ) :
def __init__( self, plug, collapsed, label, summary, names, **kw ) :
GafferUI.CompoundPlugValueWidget.__init__( self, plug, collapsed, label, summary, **kw )
self.__names = names
def _childPlugs( self ) :
return [ self.getPlug()[name] for name in self.__names ]
class SectionedCompoundPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, sections, **kw ) :
self.__column = GafferUI.ListContainer( spacing = 8 )
GafferUI.PlugValueWidget.__init__( self, self.__column, plug, **kw )
with self.__column :
for section in sections :
_Section(
self.getPlug(),
collapsed = section.get( "collapsed", True ),
label = section["label"],
summary = section.get( "summary", None ),
names = section["names"],
)
def hasLabel( self ) :
return True
def childPlugValueWidget( self, childPlug, lazy=True ) :
for section in self.__column :
result = section.childPlugValueWidget( childPlug, lazy )
if result is not None :
return result
return None
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
GafferUI.PlugValueWidget.setReadOnly( self, readOnly )
for section in self.__column :
section.setReadOnly( readOnly )
def _updateFromPlug( self ) :
pass
|
ashhher3/invenio | refs/heads/master | modules/webstyle/lib/template.py | 32 | ## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio templating framework."""
from __future__ import nested_scopes
import os, sys, inspect, getopt, new, cgi
try:
# This tool can be run before Invenio is installed:
# invenio files might then not exist.
from invenio.config import \
CFG_WEBSTYLE_TEMPLATE_SKIN, \
CFG_PREFIX, \
CFG_WEBSTYLE_INSPECT_TEMPLATES
CFG_WEBSTYLE_PYLIBDIR = CFG_PREFIX + os.sep + 'lib' + os.sep + 'python'
except ImportError:
CFG_WEBSTYLE_PYLIBDIR = None
# List of deprecated functions
# Eg. {'webstyle': {'tmpl_records_format_other':"Replaced by .."}}
CFG_WEBSTYLE_DEPRECATED_FUNCTIONS = {'webstyle': \
{'tmpl_records_format_other': "Replaced by " + \
"websearch_templates.tmpl_detailed_record_metadata(..), " + \
"websearch_templates.tmpl_detailed_record_references(..), " + \
"websearch_templates.tmpl_detailed_record_statistics(..), " + \
"webcomment_templates.tmpl_get_comments(..), " + \
"webcomment_templates.tmpl_mini_review(..)," + \
"websubmit_templates.tmpl_filelist(..) and " + \
"HDFILE + HDACT + HDREF output formats",
'detailed_record_container': "Replaced by " + \
"detailed_record_container_top and " + \
"detailed_record_container_bottom"},
'websearch': \
{'tmpl_detailed_record_citations': "Replaced by " + \
"tmpl_detailed_record_citations_prologue" + \
"tmpl_detailed_record_citations_epilogue" + \
"tmpl_detailed_record_citations_citing_list" + \
"tmpl_detailed_record_citations_citation_history" + \
"tmpl_detailed_record_citations_cociting" + \
"tmpl_detailed_record_citations_self_cited"}
}
# List of deprecated parameter
# Eg. {'webstyle': {'get_page':{'header': "replaced by 'title'"}}}
CFG_WEBSTYLE_DEPRECATED_PARAMETERS = {}
## Thanks to Python CookBook for this!
def enhance_method(module, klass, method_name, replacement):
old_method = getattr(klass, method_name)
try:
if type(old_method) is not new.instancemethod or old_method.__name__ == 'new_method':
## not a method or Already wrapped
return
except AttributeError:
raise '%s %s %s %s' % (module, klass, method_name, old_method)
def new_method(*args, **kwds):
return replacement(module, old_method, method_name, *args, **kwds)
setattr(klass, method_name, new.instancemethod(new_method, None, klass))
def method_wrapper(module, old_method, method_name, self, *args, **kwds):
def shortener(text):
if len(text) > 205:
return text[:100] + ' ... ' + text[-100:]
else:
return text
ret = old_method(self, *args, **kwds)
if ret and type(ret) is str:
params = ', '.join([shortener(repr(arg)) for arg in args] + ['%s=%s' % (item[0], shortener(repr(item[1]))) for item in kwds.items()])
signature = '%s_templates/%s(%s)' % (module, method_name, params)
signature_q = '%s_templates/%s' % (module, method_name)
return '<span title="%(signature)s" style="border: thin solid red;"><!-- BEGIN TEMPLATE %(signature_q)s BEGIN TEMPLATE --><span style="color: red; font-size: xx-small; font-style: normal; font-family: monospace; float: both">*</span>%(result)s<!-- END TEMPLATE %(signature_q)s END TEMPLATE --></span>' % {
'signature_q' : cgi.escape(signature_q),
'signature' : cgi.escape(signature, True),
'result' : ret
}
else:
return ret
def load(module=''):
""" Load and returns a template class, given a module name (like
'websearch', 'webbasket',...). The module corresponding to
the currently selected template model (see invenio.conf,
variable CFG_WEBSTYLE_TEMPLATE_SKIN) is tried first. In case it does
not exist, it returns the default template for that module.
"""
local = {}
# load the right template based on the CFG_WEBSTYLE_TEMPLATE_SKIN and the specified module
if CFG_WEBSTYLE_TEMPLATE_SKIN == "default":
mymodule = __import__("invenio.%s_templates" % (module), local, local,
["invenio.templates.%s" % (module)])
else:
try:
mymodule = __import__("invenio.%s_templates_%s" % (module, CFG_WEBSTYLE_TEMPLATE_SKIN), local, local,
["invenio.templates.%s_%s" % (module, CFG_WEBSTYLE_TEMPLATE_SKIN)])
except ImportError:
mymodule = __import__("invenio.%s_templates" % (module), local, local,
["invenio.templates.%s" % (module)])
if CFG_WEBSTYLE_INSPECT_TEMPLATES:
for method_name in dir(mymodule.Template):
if method_name.startswith('tmpl_'):
enhance_method(module, mymodule.Template, method_name, method_wrapper)
return mymodule.Template()
# Functions to check that customized templates functions conform to
# the default templates functions
##
def check(default_base_dir=None, custom_base_dir=None):
"""
Check that installed customized templates are conform to the
default templates interfaces.
Result of the analysis is reported back in 'messages' object
(see 'messages' structure description in print_messages(..) docstring)
"""
messages = []
if CFG_WEBSTYLE_PYLIBDIR is None:
# Nothing to check, since Invenio has not been installed
messages.append(('C', "Nothing to check. Run 'make install' first.",
'',
None,
0))
return messages
# Iterage over all customized templates
for (default_template_path, custom_template_path) in \
get_custom_templates(get_default_templates(default_base_dir), custom_base_dir):
# Load the custom and default templates
default_tpl_path, default_tpl_name = os.path.split(default_template_path)
if default_tpl_path not in sys.path:
sys.path.append(default_tpl_path)
custom_tpl_path, custom_tpl_name = os.path.split(custom_template_path)
if custom_tpl_path not in sys.path:
sys.path.append(custom_tpl_path)
default_template = __import__(default_tpl_name[:-3],
globals(),
locals(),
[''])
custom_template = __import__(custom_tpl_name[:-3],
globals(),
locals(),
[''])
# Check if Template class is in the file
classes = inspect.getmembers(custom_template, inspect.isclass)
if 'Template' not in [possible_class[0] for possible_class in classes]:
messages.append(('E', "'Template' class missing",
custom_template.__name__,
None,
0))
continue
# Check customized functions parameters
for (default_function_name, default_function) in \
inspect.getmembers(default_template.Template, inspect.isroutine):
if custom_template.Template.__dict__.has_key(default_function_name):
# Customized function exists
custom_function = custom_template.Template.__dict__[default_function_name]
(deft_args, deft_varargs, deft_varkw, deft_defaults) = \
inspect.getargspec(default_function.im_func)
(cust_args, cust_varargs, cust_varkw, cust_defaults) = \
inspect.getargspec(custom_function)
deft_args.reverse()
if deft_defaults is not None:
deft_defaults_list = list(deft_defaults)
deft_defaults_list.reverse()
else:
deft_defaults_list = []
cust_args.reverse()
if cust_defaults is not None:
cust_defaults_list = list(cust_defaults)
cust_defaults_list.reverse()
else:
cust_defaults_list = []
arg_errors = False
# Check for presence of missing parameters in custom template
for deft_arg in deft_args:
if deft_arg not in cust_args:
arg_errors = True
messages.append(('E', "missing '%s' parameter" % \
deft_arg,
custom_tpl_name,
default_function_name,
inspect.getsourcelines(custom_function)[1]))
# Check for presence of additional parameters in custom template
for cust_arg in cust_args:
if cust_arg not in deft_args:
arg_errors = True
messages.append(('E', "unknown parameter '%s'" % \
cust_arg,
custom_tpl_name,
custom_function.__name__,
inspect.getsourcelines(custom_function)[1]))
# If parameter is deprecated, report it
module_name = default_tpl_name.split("_")[0]
if CFG_WEBSTYLE_DEPRECATED_PARAMETERS.has_key(module_name) and \
CFG_WEBSTYLE_DEPRECATED_PARAMETERS[module_name].has_key(default_function_name) and \
CFG_WEBSTYLE_DEPRECATED_PARAMETERS[module_name][default_function_name].has_key(cust_arg):
messages.append(('C', CFG_WEBSTYLE_DEPRECATED_PARAMETERS[module_name][default_function_name][cust_arg],
custom_tpl_name,
custom_function.__name__,
inspect.getsourcelines(custom_function)[1]))
# Check for same ordering of parameters.
# Only raise warning if previous parameter tests did
# not generate errors
if not arg_errors:
for cust_arg, deft_arg in map(None, cust_args, deft_args):
if deft_arg != cust_arg:
arg_errors = True
messages.append(('W', "order of parameters is not respected",
custom_tpl_name,
custom_function.__name__,
inspect.getsourcelines(custom_function)[1]))
break
# Check for equality of default parameters values
# Only raise warning if previous parameter tests did
# not generate errors or warnings
if not arg_errors:
i = 0
for cust_default, deft_default in \
map(None, cust_defaults_list, deft_defaults_list):
if deft_default != cust_default:
messages.append(('W', "default value for parameter '%s' is not respected" % \
cust_args[i],
custom_tpl_name,
default_function_name,
inspect.getsourcelines(custom_function)[1]))
i += 1
else:
# Function is not in custom template. Generate warning?
pass
# Check for presence of additional functions in custom template
for (custom_function_name, custom_function) in \
inspect.getmembers(custom_template.Template, inspect.isroutine):
if not default_template.Template.__dict__.has_key(custom_function_name):
messages.append(('W', "unknown function",
custom_tpl_name,
custom_function_name,
inspect.getsourcelines(custom_function)[1]))
# If the function was deprecated, report it
module_name = default_tpl_name.split("_")[0]
if CFG_WEBSTYLE_DEPRECATED_FUNCTIONS.has_key(module_name) and \
CFG_WEBSTYLE_DEPRECATED_FUNCTIONS[module_name].has_key(custom_function_name):
messages.append(('C', CFG_WEBSTYLE_DEPRECATED_FUNCTIONS[module_name][custom_function_name],
custom_tpl_name,
custom_function_name,
inspect.getsourcelines(custom_function)[1]))
return messages
# Utility functions
##
def get_default_templates(base_dir=None):
"""
Returns the paths to all default Invenio templates.
base_dir - path to where templates should be recursively searched
"""
# If base_dir is not specified we assume that this template.py
# file is located in modules/webstyle/lib, which allows
# us to guess where base Invenio modules dir is.
# Note that by luck it also works if file is installed
# in /lib/python/invenio/
if base_dir is None:
# Retrieve path to Invenio 'modules' dir
this_pathname = os.path.abspath(sys.argv[0])
#this_pathname = inspect.getsourcefile(get_default_templates)
this_dir, this_name = os.path.split(this_pathname)
base_dir = this_dir + os.sep + os.pardir + \
os.sep + os.pardir
else:
base_dir = os.path.abspath(base_dir)
templates_path = []
for (dirpath, dirnames, filenames) in os.walk(base_dir):
for filename in filenames:
if filename.endswith("_templates.py"):
templates_path.append(os.path.join(dirpath, filename))
return templates_path
def get_custom_templates(default_templates_paths, base_dir=None):
"""
Returns the paths to customized templates among the given list of
templates paths.
"""
return [(default, get_custom_template(default, base_dir)) \
for default in default_templates_paths \
if get_custom_template(default, base_dir) is not None]
def get_custom_template(default_template_path, base_dir=None):
"""
Returns the path to the customized template of the default
template given as parameter. Returns None if customized does not
exist.
"""
default_tpl_path, default_tpl_name = os.path.split(default_template_path)
if base_dir is None:
custom_path = CFG_WEBSTYLE_PYLIBDIR + \
os.sep + "invenio" + os.sep + \
default_tpl_name[:-3] + '_' + \
CFG_WEBSTYLE_TEMPLATE_SKIN + '.py'
else:
custom_path = os.path.abspath(base_dir) + os.sep + \
default_tpl_name[:-3] + '_' + \
CFG_WEBSTYLE_TEMPLATE_SKIN + '.py'
if os.path.exists(custom_path):
return custom_path
else:
return None
def print_messages(messages,
verbose=2):
"""
Report errors and warnings to user.
messages - list of tuples (type, message, template, function, line)
where: - type : One of the strings:
- 'E': Error
- 'W': Warning
- 'C': Comment
- message : The string message
- template : template name where message occurred
- function : function name where message occurred
- line : line number where message occurred
verbose - int specifying the verbosity of the output.
0 - summary only
1 - summary + errors
2 - summary + errors + warnings
3 - summary + errors + warnings + comments
"""
last_template = '' # Remember last considered template in order to
# print separator between templates
for message in messages:
if message[0] == 'F' and verbose >= 0 or \
message[0] == 'E' and verbose >= 1 or \
message[0] == 'W' and verbose >= 2 or \
message[0] == 'C' and verbose >= 3:
# Print separator if we have moved to another template
if last_template != message[2]:
print "************* Template %s" % message[2]
last_template = message[2]
print '%s:%s:%s%s' % \
(message[0],
message[4],
# message[2].endswith('.py') and message[2][:-3] or \
# message[2],
message[3] and ("%s(): " % message[3]) or ' ',
message[1])
# Print summary
if verbose >= 0:
nb_errors = len([message for message in messages \
if message[0] == 'E'])
nb_warnings = len([message for message in messages \
if message[0] == 'W'])
nb_comments = len([message for message in messages \
if message[0] == 'C'])
if len(messages) > 0:
print '\nFAILED'
else:
print '\nOK'
print "%i error%s, %i warning%s, %i comment%s." % \
(nb_errors, nb_errors > 1 and 's' or '',
nb_warnings, nb_warnings > 1 and 's' or '',
nb_comments, nb_comments > 1 and 's' or '')
def usage(exitcode=1):
"""
Print usage of the template checking utility
"""
print """Usage: python templates.py --check-custom-templates [options]
Options:
-v, --verbose Verbose level (0=min, 2=default, 3=max).
-d, --default-templates-dir path to a directory with the default
template(s) (default: Invenio install
dir if run from Invenio install dir, or
Invenio source if run from Invenio sources)
-c, --custom-templates-dir path to a directory with your custom
template(s) (default: Invenio install dir)
-h, --help Prints this help
Check that your custom templates are synchronized with default Invenio templates.
Examples: $ python templates.py --check-custom-templates
$ python templates.py --check-custom-templates -c~/webstyle_template_ithaca.py
"""
sys.exit(exitcode)
if __name__ == "__main__" and \
'--check-custom-templates' in sys.argv:
default_base_dir = None
custom_base_dir = None
verbose = 2
try:
opts, args = getopt.getopt(sys.argv[1:], "hv:d:c:",
["help",
"verbose=",
"default-templates-dir=",
"custom-templates-dir=",
"check-custom-templates"])
except getopt.GetoptError, err:
usage(1)
try:
for opt in opts:
if opt[0] in ["-h", "--help"]:
usage()
elif opt[0] in ["-v", "--verbose"]:
verbose = opt[1]
elif opt[0] in ["-d", "--default-templates-dir"]:
default_base_dir = opt[1]
elif opt[0] in ["-c", "--custom-templates-dir"]:
custom_base_dir = opt[1]
except StandardError, e:
usage(1)
messages_ = check(default_base_dir, custom_base_dir)
print_messages(messages_, verbose=verbose)
|
PRJosh/kernel_msm | refs/heads/caf_LA.BF.1.1.1_rb1.26 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
|
nesdis/djongo | refs/heads/master | tests/django_tests/tests/v21/tests/check_framework/urls/include_contains_tuple.py | 94 | from django.urls import include, path
urlpatterns = [
path('', include([(r'^tuple/$', lambda x: x)])),
]
|
mj10777/QGIS | refs/heads/master | tests/code_layout/acceptable_missing_doc.py | 10 | # -*- coding: utf-8 -*-
"""
***************************************************************************
acceptable_missing_doc.py
---------------------
Date : March 2016
Copyright : (C) 2016 by Stéphane Brunner
Email : stephane dot brunner at camptocamp dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Stéphane Brunner'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Stéphane Brunner'
# -*- coding: utf-8 -*-
"""
The list of acceptable documentation missing
"""
__author__ = 'Stéphane Brunner'
__date__ = '18/03/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
ACCEPTABLE_MISSING_DOCS = {
"CharacterWidget": ['updateFontMerging(bool enable)'],
"CloughTocherInterpolator": ['setTriangulation(NormVecDecorator *tin)'],
"DualEdgeTriangulation": ['DualEdgeTriangulation(int nop, Triangulation *decorator)', 'setDecorator(Triangulation *d)'],
"HalfEdge": ['HalfEdge(int dual, int next, int point, bool mbreak, bool forced)'],
"NormVecDecorator": ['NormVecDecorator(Triangulation *tin)'],
"ParametricLine": ['add(ParametricLine *pl)=0', 'calcFirstDer(float t, Vector3D *v)=0', 'calcPoint(float t, QgsPoint *p)=0', 'calcSecDer(float t, Vector3D *v)=0', 'changeDirection()=0', 'getControlPoint(int number) const =0', 'getControlPoly() const =0', 'getDegree() const =0', 'getParent() const =0', 'remove(int i)=0', 'setControlPoly(QVector< QgsPoint *> *cp)=0', 'setParent(ParametricLine *paral)=0'],
"QgisInterface": ['actionAbout()=0', 'actionAddAllToOverview()=0', 'actionAddFeature()=0', 'actionAddOgrLayer()=0', 'actionAddPart()=0', 'actionAddPgLayer()=0', 'actionAddRasterLayer()=0', 'actionAddRing()=0', 'actionAddToOverview()=0', 'actionAddWmsLayer()=0', 'actionAllEdits()=0', 'actionCancelAllEdits()=0', 'actionCancelEdits()=0', 'actionCheckQgisVersion()=0', 'actionCopyFeatures()=0', 'actionCopyLayerStyle()=0', 'actionCustomProjection()=0', 'actionCutFeatures()=0', 'actionDeletePart()=0', 'actionDeleteRing()=0', 'actionDeleteSelected()=0', 'actionDuplicateLayer()=0', 'actionExit()=0', 'actionHelpContents()=0', 'actionHideAllLayers()=0', 'actionHideSelectedLayers()=0', 'actionLayerProperties()=0', 'actionLayerSaveAs()=0', 'actionManagePlugins()=0', 'actionMoveFeature()=0', 'actionNewProject()=0', 'actionNewVectorLayer()=0', 'actionOpenFieldCalculator()=0', 'actionOpenProject()=0', 'actionOpenTable()=0', 'actionOptions()=0', 'actionPasteFeatures()=0', 'actionPasteLayerStyle()=0', 'actionPluginListSeparator()=0', 'actionProjectProperties()=0', 'actionQgisHomePage()=0', 'actionRemoveAllFromOverview()=0', 'actionRollbackAllEdits()=0', 'actionRollbackEdits()=0', 'actionSaveActiveLayerEdits()=0', 'actionSaveAllEdits()=0', 'actionSaveEdits()=0', 'actionSaveMapAsImage()=0', 'actionSaveProject()=0', 'actionSaveProjectAs()=0', 'actionShowAllLayers()=0', 'actionShowPythonDialog()=0', 'actionShowSelectedLayers()=0', 'actionSimplifyFeature()=0', 'actionSplitFeatures()=0', 'actionSplitParts()=0', 'actionToggleEditing()=0', 'actionToggleFullScreen()=0', 'actionVertexTool()=0', 'advancedDigitizeToolBar()=0', 'attributesToolBar()=0', 'databaseMenu()=0', 'databaseToolBar()=0', 'digitizeToolBar()=0', 'editMenu()=0', 'fileToolBar()=0', 'firstRightStandardMenu()=0', 'helpMenu()=0', 'helpToolBar()=0', 'layerMenu()=0', 'layerToolBar()=0', 'layerTreeView()=0', 'mapNavToolToolBar()=0', 'newLayerMenu()=0', 'pluginManagerInterface()=0', 'pluginMenu()=0', 'pluginToolBar()=0', 'rasterMenu()=0', 'rasterToolBar()=0', 'settingsMenu()=0', 'shapeDigitizeToolBar()=0', 'vectorMenu()=0', 'vectorToolBar()=0', 'viewMenu()=0', 'webMenu()=0', 'webToolBar()=0', 'windowMenu()=0'],
"QgisPlugin": ['name()'],
"QgisVisitor": ['QgisVisitor(QList< QgsFeatureId > &list)'],
"QgsAbstractFeatureIteratorFromSource": ['QgsAbstractFeatureIteratorFromSource(T *source, bool ownSource, const QgsFeatureRequest &request)'],
"QgsAbstractFeatureSource": ['iteratorClosed(QgsAbstractFeatureIterator *it)', 'iteratorOpened(QgsAbstractFeatureIterator *it)'],
"QgsAbstractGeometry": ['QgsAbstractGeometry(const QgsAbstractGeometry &geom)'],
"QgsAbstractLabelProvider": ['Flag'],
"QgsAction": ['ActionType'],
"QgsActionMenu": ['ActionType', 'reinit()'],
"QgsAdvancedDigitizingCanvasItem": ['QgsAdvancedDigitizingCanvasItem(QgsMapCanvas *canvas, QgsAdvancedDigitizingDockWidget *cadDockWidget)'],
"QgsAlignRaster": ['gridOffset() const', 'setGridOffset(QPointF offset)'],
"QgsApplication": ['QgsApplication(int &argc, char **argv, bool GUIenabled, const QString &profileFolder=QString(), const QString &platformName="desktop")'],
"QgsAspectFilter": ['QgsAspectFilter(const QString &inputFile, const QString &outputFile, const QString &outputFormat)'],
"QgsAttributeDialog": ['attributeForm()', 'feature()'],
"QgsAttributeEditorContext": ['FormMode', 'QgsAttributeEditorContext(const QgsAttributeEditorContext &parentContext, FormMode formMode)', 'QgsAttributeEditorContext(const QgsAttributeEditorContext &parentContext, const QgsRelation &relation, RelationMode relationMode, FormMode widgetMode)', 'parentContext() const'],
"QgsAttributeEditorElement": ['AttributeEditorType'],
"QgsAttributeForm": ['QgsAttributeForm(QgsVectorLayer *vl, const QgsFeature &feature=QgsFeature(), const QgsAttributeEditorContext &context=QgsAttributeEditorContext(), QWidget *parent=nullptr)', 'feature()'],
"QgsAttributeFormInterface": ['QgsAttributeFormInterface(QgsAttributeForm *form)', 'acceptChanges(const QgsFeature &feature)', 'feature()', 'featureChanged()', 'form()', 'initForm()'],
"QgsAttributeFormLegacyInterface": ['QgsAttributeFormLegacyInterface(const QString &function, const QString &pyFormName, QgsAttributeForm *form)'],
"QgsAttributeTableAction": ['execute()', 'featureForm()'],
"QgsAttributeTableDelegate": ['setFeatureSelectionModel(QgsFeatureSelectionModel *featureSelectionModel)'],
"QgsAttributeTableFilterModel": ['fidToIndexList(QgsFeatureId fid)', 'mapFromMaster(const QModelIndex &sourceIndex) const', 'mapToMaster(const QModelIndex &proxyIndex) const'],
"QgsAttributeTableMapLayerAction": ['QgsAttributeTableMapLayerAction(const QString &name, QgsDualView *dualView, QgsMapLayerAction *action, const QModelIndex &fieldIdx)', 'execute()'],
"QgsAttributeTableModel": ['Role', 'finished()', 'idToIndex(QgsFeatureId id) const', 'idToIndexList(QgsFeatureId id) const'],
"QgsAttributeTableView": ['_q_selectRow(int row)', 'finished()', 'repaintRequested()', 'repaintRequested(const QModelIndexList &indexes)', 'selectRow(int row)', 'setModel(QgsAttributeTableFilterModel *filterModel)'],
"QgsAttributeTypeLoadDialog": ['QgsAttributeTypeLoadDialog(QgsVectorLayer *vl)'],
"QgsAuthCertInfo": ['trustCacheRebuilt()'],
"QgsAuthMethodConfig": ['setMethod(const QString &method)', 'setUri(const QString &uri)'],
"QgsBlurWidget": ['QgsBlurWidget(QWidget *parent=nullptr)', 'create()'],
"QgsBrightnessContrastFilter": ['QgsBrightnessContrastFilter(QgsRasterInterface *input=nullptr)', 'brightness() const', 'contrast() const', 'setBrightness(int brightness)', 'setContrast(int contrast)'],
"QgsBrowserModel": ['ItemDataRole', 'QgsBrowserModel(QObject *parent=nullptr)', 'beginInsertItems(QgsDataItem *parent, int first, int last)', 'beginRemoveItems(QgsDataItem *parent, int first, int last)', 'connectItem(QgsDataItem *item)', 'dataItem(const QModelIndex &idx) const', 'endInsertItems()', 'endRemoveItems()', 'findItem(QgsDataItem *item, QgsDataItem *parent=nullptr) const', 'itemDataChanged(QgsDataItem *item)', 'itemStateChanged(QgsDataItem *item, QgsDataItem::State oldState)', 'removeRootItems()', 'updateProjectHome()'],
"QgsBrowserTreeView": ['hasExpandedDescendant(const QModelIndex &index) const', 'setSettingsSection(const QString §ion)'],
"QgsBrowserWatcher": ['QgsBrowserWatcher(QgsDataItem *item)', 'finished(QgsDataItem *item, const QVector< QgsDataItem *> &items)', 'item() const'],
"QgsBrushStyleComboBox": ['QgsBrushStyleComboBox(QWidget *parent=nullptr)', 'brushStyle() const', 'iconForBrush(Qt::BrushStyle style)', 'setBrushStyle(Qt::BrushStyle style)'],
"QgsBusyIndicatorDialog": ['message() const', 'setMessage(const QString &message)'],
"QgsCacheIndexFeatureId": ['QgsCacheIndexFeatureId(QgsVectorLayerCache *)'],
"QgsCategorizedSymbolRenderer": ['QgsCategorizedSymbolRenderer(const QString &attrName=QString(), const QgsCategoryList &categories=QgsCategoryList())', 'addCategory(const QgsRendererCategory &category)', 'categories() const', 'classAttribute() const', 'deleteAllCategories()', 'deleteCategory(int catIndex)', 'rebuildHash()', 'setClassAttribute(const QString &attr)', 'sortByLabel(Qt::SortOrder order=Qt::AscendingOrder)', 'sortByValue(Qt::SortOrder order=Qt::AscendingOrder)', 'updateCategoryLabel(int catIndex, const QString &label)', 'updateCategorySymbol(int catIndex, QgsSymbol *symbol)', 'updateCategoryValue(int catIndex, const QVariant &value)'],
"QgsCategorizedSymbolRendererWidget": ['QgsCategorizedSymbolRendererWidget(QgsVectorLayer *layer, QgsStyle *style, QgsFeatureRenderer *renderer)', 'addCategories()', 'addCategory()', 'categoriesDoubleClicked(const QModelIndex &idx)', 'categoryColumnChanged(const QString &field)', 'changeCategorizedSymbol()', 'changeCategorySymbol()', 'create(QgsVectorLayer *layer, QgsStyle *style, QgsFeatureRenderer *renderer)', 'deleteAllCategories()', 'deleteCategories()', 'populateCategories()', 'rowsMoved()', 'selectedCategoryList()', 'showSymbolLevels()', 'updateCategorizedSymbolIcon()', 'updateUiFromRenderer()'],
"QgsCentroidFillSymbolLayer": ['create(const QgsStringMap &properties=QgsStringMap())', 'createFromSld(QDomElement &element)', 'pointOnSurface() const', 'setPointOnSurface(bool pointOnSurface)'],
"QgsCharacterSelectorDialog": ['QgsCharacterSelectorDialog(QWidget *parent=nullptr, Qt::WindowFlags fl=QgsGuiUtils::ModalDialogFlags)', 'selectCharacter(bool *gotChar, const QFont &font, const QString &style)'],
"QgsCheckBoxConfigDlg": ['QgsCheckBoxConfigDlg(QgsVectorLayer *vl, int fieldIdx, QWidget *parent=nullptr)'],
"QgsCheckboxWidgetFactory": ['QgsCheckboxWidgetFactory(const QString &name)'],
"QgsCheckboxWidgetWrapper": ['QgsCheckboxWidgetWrapper(QgsVectorLayer *vl, int fieldIdx, QWidget *editor=nullptr, QWidget *parent=nullptr)'],
"QgsClassificationWidgetWrapper": ['QgsClassificationWidgetWrapper(QgsVectorLayer *vl, int fieldIdx, QWidget *editor=nullptr, QWidget *parent=nullptr)'],
"QgsClassificationWidgetWrapperFactory": ['QgsClassificationWidgetWrapperFactory(const QString &name)'],
"QgsClipToMinMaxEnhancement": ['QgsClipToMinMaxEnhancement(Qgis::DataType, double, double)'],
"QgsClipper": ['trimPolygon(QPolygonF &pts, const QgsRectangle &clipRect)'],
"QgsCodeEditor": ['foldingVisible()', 'getMonospaceFont()', 'isFixedPitch(const QFont &font)', 'marginVisible()'],
"QgsCollapsibleGroupBox": ['QgsCollapsibleGroupBox(QWidget *parent=nullptr, QgsSettings *settings=nullptr)', 'QgsCollapsibleGroupBox(const QString &title, QWidget *parent=nullptr, QgsSettings *settings=nullptr)', 'init()', 'saveCheckedState()', 'saveCollapsedState()', 'saveKey() const', 'setSettings(QgsSettings *settings)'],
"QgsCollapsibleGroupBoxBasic": ['QgsCollapsibleGroupBoxBasic(QWidget *parent=nullptr)', 'QgsCollapsibleGroupBoxBasic(const QString &title, QWidget *parent=nullptr)', 'checkClicked(bool ckd)', 'checkToggled(bool ckd)', 'clearModifiers()', 'init()', 'titleRect() const', 'toggleCollapsed()', 'updateStyle()'],
"QgsColorBrewerPalette": ['listSchemeColors(const QString &schemeName, int colors)', 'listSchemeVariants(const QString &schemeName)', 'listSchemes()'],
"QgsColorEffectWidget": ['QgsColorEffectWidget(QWidget *parent=nullptr)', 'create()'],
"QgsColorSwatchDelegate": ['QgsColorSwatchDelegate(QWidget *parent=nullptr)'],
"QgsColorWidgetFactory": ['QgsColorWidgetFactory(const QString &name)'],
"QgsColorWidgetWrapper": ['QgsColorWidgetWrapper(QgsVectorLayer *vl, int fieldIdx, QWidget *editor=nullptr, QWidget *parent=nullptr)'],
"QgsCompoundCurve": ['QgsCompoundCurve(const QgsCompoundCurve &curve)'],
"QgsConditionalLayerStyles": ['rowStyles()'],
"QgsConditionalStyle": ['QgsConditionalStyle(const QString &rule)', 'QgsConditionalStyle(const QgsConditionalStyle &other)'],
"QgsConnectionPoolGroup": ['QgsConnectionPoolGroup(const QString &ci)', 'initTimer(QObject *parent)', 'invalidateConnections()', 'onConnectionExpired()', 'release(T conn)'],
"QgsConstWkbPtr": ['QgsConstWkbPtr(const unsigned char *p, int size)', 'operator const unsigned char *() const', 'operator+=(int n)', 'operator-=(int n)', 'operator>>(char &v) const', 'operator>>(double &v) const', 'operator>>(float &r) const', 'operator>>(int &v) const', 'operator>>(unsigned int &v) const'],
"QgsContrastEnhancement": ['QgsContrastEnhancement(Qgis::DataType datatype=Qgis::Byte)', 'QgsContrastEnhancement(const QgsContrastEnhancement &ce)', 'contrastEnhancementAlgorithm() const', 'readXml(const QDomElement &elem)', 'writeXml(QDomDocument &doc, QDomElement &parentElem) const'],
"QgsContrastEnhancementFunction": ['QgsContrastEnhancementFunction(Qgis::DataType, double, double)', 'QgsContrastEnhancementFunction(const QgsContrastEnhancementFunction &f)'],
"QgsCptCityAllRampsItem": ['QgsCptCityAllRampsItem(QgsCptCityDataItem *parent, const QString &name, const QVector< QgsCptCityDataItem *> &items)'],
"QgsCptCityArchive": ['QgsCptCityArchive(const QString &archiveName=DEFAULT_CPTCITY_ARCHIVE, const QString &baseDir=QString())', 'archiveName() const', 'archiveRegistry()', 'baseDir() const', 'baseDir(QString archiveName)', 'clearArchives()', 'copyingFileName(const QString &dirName) const', 'copyingInfo(const QString &fileName)', 'defaultArchive()', 'defaultBaseDir()', 'descFileName(const QString &dirName) const', 'description(const QString &fileName)', 'findFileName(const QString &target, const QString &startDir, const QString &baseDir)', 'initArchive(const QString &archiveName, const QString &archiveBaseDir)', 'initArchives(bool loadAll=false)', 'initDefaultArchive()', 'isEmpty()', 'rootItems() const', 'selectionItems() const', 'setBaseDir(const QString &dirName)'],
"QgsCptCityBrowserModel": ['QgsCptCityBrowserModel(QObject *parent=nullptr, QgsCptCityArchive *archive=QgsCptCityArchive::defaultArchive(), ViewType Type=Authors)', 'ViewType', 'addRootItems()', 'beginInsertItems(QgsCptCityDataItem *parent, int first, int last)', 'beginRemoveItems(QgsCptCityDataItem *parent, int first, int last)', 'connectItem(QgsCptCityDataItem *item)', 'endInsertItems()', 'endRemoveItems()', 'findItem(QgsCptCityDataItem *item, QgsCptCityDataItem *parent=nullptr) const', 'refresh(const QModelIndex &index=QModelIndex())', 'refresh(const QString &path)', 'reload()', 'removeRootItems()'],
"QgsCptCityCollectionItem": ['QgsCptCityCollectionItem(QgsCptCityDataItem *parent, const QString &name, const QString &path)', 'addChild(QgsCptCityDataItem *item)', 'childrenRamps(bool recursive)', 'setPopulated()'],
"QgsCptCityColorRamp": ['cloneGradientRamp() const', 'copy(const QgsCptCityColorRamp *other)', 'copyingFileName() const', 'copyingInfo() const', 'create(const QgsStringMap &properties=QgsStringMap())', 'descFileName() const', 'fileLoaded() const', 'fileName() const', 'hasMultiStops() const', 'loadFile()', 'loadPalette()', 'schemeName() const', 'setName(const QString &schemeName, const QString &variantName=QString(), const QStringList &variantList=QStringList())', 'setSchemeName(const QString &schemeName)', 'setVariantList(const QStringList &variantList)', 'setVariantName(const QString &variantName)', 'variantList() const', 'variantName() const'],
"QgsCptCityColorRampItem": ['QgsCptCityColorRampItem(QgsCptCityDataItem *parent, const QString &name, const QString &path, const QString &variantName=QString(), bool initialize=false)', 'QgsCptCityColorRampItem(QgsCptCityDataItem *parent, const QString &name, const QString &path, const QStringList &variantList, bool initialize=false)', 'init()', 'ramp() const'],
"QgsCptCityDataItem": ['QgsCptCityDataItem(QgsCptCityDataItem::Type type, QgsCptCityDataItem *parent, const QString &name, const QString &path)', 'Type', 'acceptDrop()', 'actions()', 'addChildItem(QgsCptCityDataItem *child, bool refresh=false)', 'beginInsertItems(QgsCptCityDataItem *parent, int first, int last)', 'beginRemoveItems(QgsCptCityDataItem *parent, int first, int last)', 'children() const', 'createChildren()', 'deleteChildItem(QgsCptCityDataItem *child)', 'endInsertItems()', 'endRemoveItems()', 'equal(const QgsCptCityDataItem *other)', 'findItem(QVector< QgsCptCityDataItem *> items, QgsCptCityDataItem *item)', 'handleDrop(const QMimeData *, Qt::DropAction)', 'hasChildren()', 'icon()', 'icon(QSize size)', 'info() const', 'isPopulated()', 'isValid()', 'leafCount() const', 'name() const', 'paramWidget()', 'parent() const', 'path() const', 'populate()', 'refresh()', 'removeChildItem(QgsCptCityDataItem *child)', 'rowCount()', 'setIcon(const QIcon &icon)', 'setParent(QgsCptCityDataItem *parent)', 'setToolTip(const QString &msg)', 'shortInfo() const', 'toolTip() const', 'type() const'],
"QgsCptCityDirectoryItem": ['QgsCptCityDirectoryItem(QgsCptCityDataItem *parent, const QString &name, const QString &path)', 'dataItem(QgsCptCityDataItem *parent, const QString &name, const QString &path)', 'dirEntries() const', 'rampsMap()'],
"QgsCptCitySelectionItem": ['QgsCptCitySelectionItem(QgsCptCityDataItem *parent, const QString &name, const QString &path)', 'parseXml()', 'selectionsList() const'],
"QgsCredentials": ['get(const QString &realm, QString &username, QString &password, const QString &message=QString())', 'getMasterPassword(QString &password, bool stored=false)', 'put(const QString &realm, const QString &username, const QString &password)'],
"QgsCurvePolygon": ['QgsCurvePolygon(const QgsCurvePolygon &p)', 'exteriorRing() const', 'interiorRing(int i) const', 'numInteriorRings() const'],
"QgsDartMeasurement": ['QgsDartMeasurement(const QString &name, Type type, const QString &value)', 'Type', 'send() const', 'toString() const'],
"QgsDashSpaceDialog": ['dashDotVector() const'],
"QgsDataCollectionItem": ['QgsDataCollectionItem(QgsDataItem *parent, const QString &name, const QString &path=QString())', 'addChild(QgsDataItem *item)', 'iconDataCollection()', 'iconDir()'],
"QgsDataDefinedRotationDialog": ['QgsDataDefinedRotationDialog(const QList< QgsSymbol *> &symbolList, QgsVectorLayer *layer)'],
"QgsDataDefinedSizeDialog": ['QgsDataDefinedSizeDialog(const QList< QgsSymbol *> &symbolList, QgsVectorLayer *layer)'],
"QgsDataDefinedValueDialog": ['dataDefinedChanged()'],
"QgsDataDefinedWidthDialog": ['QgsDataDefinedWidthDialog(const QList< QgsSymbol *> &symbolList, QgsVectorLayer *layer)'],
"QgsDataItem": ['Capability', 'State', 'Type', 'beginInsertItems(QgsDataItem *parent, int first, int last)', 'beginRemoveItems(QgsDataItem *parent, int first, int last)', 'capabilities2() const', 'children() const', 'childrenCreated()', 'dataChanged(QgsDataItem *item)', 'deleteLater(QVector< QgsDataItem *> &items)', 'endInsertItems()', 'endRemoveItems()', 'findItem(QVector< QgsDataItem *> items, QgsDataItem *item)', 'hasChildren()', 'icon()', 'paramWidget()', 'path() const', 'populate(bool foreground=false)', 'populate(const QVector< QgsDataItem *> &children)', 'refresh()', 'rowCount()', 'setIcon(const QIcon &icon)', 'setIconName(const QString &iconName)', 'setPath(const QString &path)', 'setToolTip(const QString &msg)', 'stateChanged(QgsDataItem *item, QgsDataItem::State oldState)', 'toolTip() const', 'type() const'],
"QgsDataProvider": ['DataCapability'],
"QgsDataSourceUri": ['SslMode'],
"QgsDateTimeEdit": ['allowNull() const'],
"QgsDateTimeEditConfig": ['QgsDateTimeEditConfig(QgsVectorLayer *vl, int fieldIdx, QWidget *parent=nullptr)'],
"QgsDateTimeEditFactory": ['QgsDateTimeEditFactory(const QString &name)'],
"QgsDateTimeEditWrapper": ['QgsDateTimeEditWrapper(QgsVectorLayer *vl, int fieldIdx, QWidget *editor, QWidget *parent=nullptr)'],
"QgsDefaultRasterLayerLegend": ['QgsDefaultRasterLayerLegend(QgsRasterLayer *rl)'],
"QgsDefaultVectorLayerLegend": ['QgsDefaultVectorLayerLegend(QgsVectorLayer *vl)'],
"QgsDerivativeFilter": ['QgsDerivativeFilter(const QString &inputFile, const QString &outputFile, const QString &outputFormat)'],
"QgsDetailedItemData": ['category() const', 'detail() const', 'icon() const', 'isCheckable() const', 'isChecked() const', 'isEnabled() const', 'isRenderedAsWidget() const', 'setCategory(const QString &category)', 'setCheckable(const bool flag)', 'setChecked(const bool flag)', 'setDetail(const QString &detail)', 'setEnabled(bool flag)', 'setIcon(const QPixmap &icon)', 'setTitle(const QString &title)', 'title() const'],
"QgsDetailedItemDelegate": ['horizontalSpacing() const', 'setHorizontalSpacing(int value)', 'setVerticalSpacing(int value)', 'verticalSpacing() const'],
"QgsDetailedItemWidget": ['setChecked(bool flag)', 'setData(const QgsDetailedItemData &data)'],
"QgsDiagram": ['QgsDiagram(const QgsDiagram &other)', 'clearCache()'],
"QgsDiagramLayerSettings": ['Placement'],
"QgsDiagramRenderer": ['QgsDiagramRenderer(const QgsDiagramRenderer &other)', 'diagram() const', 'rendererName() const =0', 'setDiagram(QgsDiagram *d)'],
"QgsDiagramSettings": ['LabelPlacementMethod'],
"QgsDial": ['setMaximum(const QVariant &max)', 'setMinimum(const QVariant &min)', 'setSingleStep(const QVariant &step)', 'setValue(const QVariant &value)', 'valueChanged(const QVariant &)', 'variantValue() const'],
"QgsDirectoryItem": ['Column', 'QgsDirectoryItem(QgsDataItem *parent, const QString &name, const QString &path)', 'dirPath() const', 'directoryChanged()', 'init()'],
"QgsDirectoryParamWidget": ['QgsDirectoryParamWidget(const QString &path, QWidget *parent=nullptr)', 'showHideColumn()'],
"QgsDrawSourceWidget": ['QgsDrawSourceWidget(QWidget *parent=nullptr)', 'create()'],
"QgsDualView": ['openConditionalStyles()'],
"QgsDummyConfigDlg": ['QgsDummyConfigDlg(QgsVectorLayer *vl, int fieldIdx, QWidget *parent, const QString &description)'],
"QgsDxfExport": ['QgsDxfExport(const QgsDxfExport &dxfExport)', 'SymbologyExport'],
"QgsDxfPaintDevice": ['QgsDxfPaintDevice(QgsDxfExport *dxf)', 'setDrawingSize(QSizeF size)', 'setLayer(const QString &layer)', 'setOutputSize(const QRectF &r)', 'setShift(QPointF shift)'],
"QgsDxfPaintEngine": ['QgsDxfPaintEngine(const QgsDxfPaintDevice *dxfDevice, QgsDxfExport *dxf)', 'layer() const', 'setLayer(const QString &layer)', 'setShift(QPointF shift)'],
"QgsEditorWidgetRegistry": ['createSearchWidget(const QString &widgetId, QgsVectorLayer *vl, int fieldIdx, const QVariantMap &config, QWidget *parent, const QgsAttributeEditorContext &context=QgsAttributeEditorContext())'],
"QgsEffectDrawModeComboBox": ['QgsEffectDrawModeComboBox(QWidget *parent SIP_TRANSFERTHIS=nullptr)'],
"QgsEffectStack": ['QgsEffectStack(const QgsEffectStack &other)'],
"QgsEllipseSymbolLayer": ['create(const QgsStringMap &properties=QgsStringMap())', 'createFromSld(QDomElement &element)', 'setStrokeStyle(Qt::PenStyle strokeStyle)', 'setStrokeWidth(double w)', 'setStrokeWidthMapUnitScale(const QgsMapUnitScale &scale)', 'setSymbolHeight(double h)', 'setSymbolHeightMapUnitScale(const QgsMapUnitScale &scale)', 'setSymbolName(const QString &name)', 'setSymbolWidth(double w)', 'setSymbolWidthMapUnitScale(const QgsMapUnitScale &scale)', 'strokeStyle() const', 'strokeWidth() const', 'strokeWidthMapUnitScale() const', 'symbolHeight() const', 'symbolHeightMapUnitScale() const', 'symbolName() const', 'symbolWidth() const', 'symbolWidthMapUnitScale() const'],
"QgsEncodingFileDialog": ['pbnCancelAll_clicked()', 'saveUsedEncoding()'],
"QgsEnumerationWidgetFactory": ['QgsEnumerationWidgetFactory(const QString &name)'],
"QgsEnumerationWidgetWrapper": ['QgsEnumerationWidgetWrapper(QgsVectorLayer *vl, int fieldIdx, QWidget *editor=nullptr, QWidget *parent=nullptr)'],
"QgsErrorItem": ['QgsErrorItem(QgsDataItem *parent, const QString &error, const QString &path)'],
"QgsErrorMessage": ['file() const', 'function() const', 'line() const', 'message() const', 'tag() const'],
"QgsExpression": ['BuiltinFunctions()', 'Functions()', 'SpatialOperator'],
"QgsExpressionBuilderDialog": ['QgsExpressionBuilderDialog(QgsVectorLayer *layer, const QString &startText=QString(), QWidget *parent SIP_TRANSFERTHIS=nullptr, const QString &key="generic", const QgsExpressionContext &context=QgsExpressionContext())', 'expressionText()', 'setExpressionText(const QString &text)'],
"QgsExpressionBuilderWidget": ['isExpressionValid()', 'loadFieldNames(const QgsFields &fields)'],
"QgsExpressionFieldBuffer": ['expressions() const'],
"QgsExpressionHighlighter": ['QgsExpressionHighlighter(QTextDocument *parent=nullptr)', 'addFields(const QStringList &fieldList)'],
"QgsExpressionItem": ['ItemType', 'QgsExpressionItem(const QString &label, const QString &expressionText, QgsExpressionItem::ItemType itemType=ExpressionNode)', 'QgsExpressionItem(const QString &label, const QString &expressionText, const QString &helpText, QgsExpressionItem::ItemType itemType=ExpressionNode)', 'getExpressionText() const'],
"QgsExternalResourceWidget": ['DocumentViewerContent', 'setDocumentPath(const QVariant &documentPath)'],
"QgsExternalResourceWidgetFactory": ['QgsExternalResourceWidgetFactory(const QString &name)'],
"QgsExternalResourceWidgetWrapper": ['QgsExternalResourceWidgetWrapper(QgsVectorLayer *vl, int fieldIdx, QWidget *editor=nullptr, QWidget *parent=nullptr)'],
"QgsFeatureIterator": ['close()', 'nextFeature(QgsFeature &f)', 'rewind()'],
"QgsFeatureIteratorDataStream": ['readNextEntry()'],
"QgsFeatureListModel": ['Role', 'displayExpression() const', 'featureByIndex(const QModelIndex &index, QgsFeature &feat)', 'fidToIdx(const QgsFeatureId fid) const', 'fidToIndexList(QgsFeatureId fid)', 'idxToFid(const QModelIndex &index) const', 'layerCache()', 'mapFromMaster(const QModelIndex &sourceIndex) const', 'mapSelectionFromMaster(const QItemSelection &selection) const', 'mapSelectionToMaster(const QItemSelection &selection) const', 'mapToMaster(const QModelIndex &proxyIndex) const', 'masterModel()', 'setSourceModel(QgsAttributeTableFilterModel *sourceModel)'],
"QgsFeatureListView": ['repaintRequested()', 'repaintRequested(const QModelIndexList &indexes)'],
"QgsFeatureListViewDelegate": ['Element', 'QgsFeatureListViewDelegate(QgsFeatureListModel *listModel, QObject *parent=nullptr)', 'editButtonClicked(QModelIndex &index)', 'positionToElement(QPoint pos)', 'setCurrentFeatureEdited(bool state)', 'setEditSelectionModel(QItemSelectionModel *editSelectionModel)', 'setFeatureSelectionModel(QgsFeatureSelectionModel *featureSelectionModel)'],
"QgsFeatureModel": ['fidToIndex(QgsFeatureId fid)=0'],
"QgsFeatureRenderer": ['QgsFeatureRenderer(const QString &type)', 'setUsingSymbolLevels(bool usingSymbolLevels)', 'type() const', 'usingSymbolLevels() const'],
"QgsFeatureRequest": ['Flag', 'flags() const'],
"QgsFeatureSelectionModel": ['QgsFeatureSelectionModel(QAbstractItemModel *model, QgsFeatureModel *featureModel, QgsIFeatureSelectionManager *featureSelectionHandler, QObject *parent)', 'setFeatureSelectionManager(QgsIFeatureSelectionManager *featureSelectionManager)'],
"QgsFieldComboBox": ['indexChanged(int i)'],
"QgsFieldExpressionWidget": ['currentFieldChanged()', 'isExpressionValid(const QString &expressionStr)', 'setLeftHandButtonStyle(bool isLeft)'],
"QgsFieldValidator": ['QgsFieldValidator(QObject *parent, const QgsField &field, const QString &defaultValue, const QString &dateFormat="yyyy-MM-dd")', 'dateFormat() const'],
"QgsFields": ['FieldOrigin'],
"QgsFillSymbol": ['QgsFillSymbol(const QgsSymbolLayerList &layers=QgsSymbolLayerList())', 'renderPolygon(const QPolygonF &points, QList< QPolygonF > *rings, const QgsFeature *f, QgsRenderContext &context, int layer=-1, bool selected=false)', 'setAngle(double angle)'],
"QgsFillSymbolLayer": ['QgsFillSymbolLayer(bool locked=false)', 'angle() const', 'renderPolygon(const QPolygonF &points, QList< QPolygonF > *rings, QgsSymbolRenderContext &context)=0', 'setAngle(double angle)'],
"QgsFontMarkerSymbolLayer": ['QgsFontMarkerSymbolLayer(const QString &fontFamily=DEFAULT_FONTMARKER_FONT, QChar chr=DEFAULT_FONTMARKER_CHR, double pointSize=DEFAULT_FONTMARKER_SIZE, const QColor &color=DEFAULT_FONTMARKER_COLOR, double angle=DEFAULT_FONTMARKER_ANGLE)', 'character() const', 'create(const QgsStringMap &properties=QgsStringMap())', 'createFromSld(QDomElement &element)', 'fontFamily() const', 'setCharacter(QChar ch)', 'setFontFamily(const QString &family)'],
"QgsFontMarkerSymbolLayerWidget": ['setAngle(double angle)', 'setCharacter(QChar chr)', 'setColor(const QColor &color)', 'setFontFamily(const QFont &font)', 'setSize(double size)'],
"QgsGenericFeatureSelectionManager": ['QgsGenericFeatureSelectionManager(QObject *parent=nullptr)', 'QgsGenericFeatureSelectionManager(const QgsFeatureIds &initialSelection, QObject *parent=nullptr)'],
"QgsGeometry::Error": ['Error(const QString &m)', 'Error(const QString &m, const QgsPointXY &p)', 'hasWhere()', 'what()', 'where()'],
"QgsGeometryCollection": ['QgsGeometryCollection(const QgsGeometryCollection &c)'],
"QgsGeometryEngine": ['QgsGeometryEngine(const QgsAbstractGeometry *geometry)', 'area(QString *errorMsg=nullptr) const =0', 'buffer(double distance, int segments, QString *errorMsg=nullptr) const =0', 'buffer(double distance, int segments, int endCapStyle, int joinStyle, double miterLimit, QString *errorMsg=nullptr) const =0', 'envelope(QString *errorMsg=nullptr) const =0', 'interpolate(double distance, QString *errorMsg=nullptr) const =0', 'isEmpty(QString *errorMsg) const =0', 'isValid(QString *errorMsg=nullptr) const =0', 'length(QString *errorMsg=nullptr) const =0', 'offsetCurve(double distance, int segments, int joinStyle, double miterLimit, QString *errorMsg=nullptr) const =0', 'simplify(double tolerance, QString *errorMsg=nullptr) const =0'],
"QgsGeometryGeneratorSymbolLayer": ['create(const QgsStringMap &properties)'],
"QgsGeometryRubberBand": ['IconType', 'QgsGeometryRubberBand(QgsMapCanvas *mapCanvas, QgsWkbTypes::GeometryType geomType=QgsWkbTypes::LineGeometry)'],
"QgsGeometryValidator": ['addError(const QgsGeometry::Error &)', 'errorFound(const QgsGeometry::Error &)', 'stop()'],
"QgsGeos": ['coordSeqPoint(const GEOSCoordSequence *cs, int i, bool hasZ, bool hasM)', 'fromGeosPolygon(const GEOSGeometry *geos)', 'getGEOSHandler()'],
"QgsGlowEffect": ['QgsGlowEffect(const QgsGlowEffect &other)'],
"QgsGlowWidget": ['QgsGlowWidget(QWidget *parent=nullptr)', 'create()'],
"QgsGml": ['QgsGml(const QString &typeName, const QString &geometryAttribute, const QgsFields &fields)', 'dataReadProgress(int progress)', 'totalStepsUpdate(int totalSteps)'],
"QgsGmlFeatureClass": ['QgsGmlFeatureClass(const QString &name, const QString &path)', 'fieldIndex(const QString &name)', 'fields()', 'geometryAttributes()', 'path() const'],
"QgsGpsConnection": ['Status', 'nmeaSentenceReceived(const QString &substring)', 'stateChanged(const QgsGpsInformation &info)'],
"QgsGpsConnectionRegistry": ['connectionList() const'],
"QgsGpsDetector": ['QgsGpsDetector(const QString &portName)', 'advance()', 'availablePorts()', 'connDestroyed(QObject *)', 'detected(QgsGpsConnection *)', 'detected(const QgsGpsInformation &)', 'detectionFailed()'],
"QgsGpsdConnection": ['QgsGpsdConnection(const QString &host, qint16 port, const QString &device)'],
"QgsGradientFillSymbolLayer": ['GradientColorType', 'GradientCoordinateMode', 'GradientSpread', 'GradientType', 'QgsGradientFillSymbolLayer(const QColor &color=DEFAULT_SIMPLEFILL_COLOR, const QColor &color2=Qt::white, GradientColorType gradientColorType=SimpleTwoColor, GradientType gradientType=Linear, GradientCoordinateMode coordinateMode=Feature, GradientSpread gradientSpread=Pad)', 'create(const QgsStringMap &properties=QgsStringMap())', 'offset() const', 'offsetMapUnitScale() const', 'offsetUnit() const', 'referencePoint1() const', 'referencePoint1IsCentroid() const', 'referencePoint2() const', 'referencePoint2IsCentroid() const', 'setColor2(const QColor &color2)', 'setCoordinateMode(GradientCoordinateMode coordinateMode)', 'setGradientColorType(GradientColorType gradientColorType)', 'setGradientSpread(GradientSpread gradientSpread)', 'setGradientType(GradientType gradientType)', 'setOffsetMapUnitScale(const QgsMapUnitScale &scale)'],
"QgsGradientFillSymbolLayerWidget": ['setColor(const QColor &color)', 'setColor2(const QColor &color)', 'setCoordinateMode(int index)', 'setGradientSpread(int index)', 'setGradientType(int index)'],
"QgsGraduatedSymbolRenderer": ['GraduatedMethod', 'Mode', 'QgsGraduatedSymbolRenderer(const QString &attrName=QString(), const QgsRangeList &ranges=QgsRangeList())', 'addClass(QgsSymbol *symbol)', 'classAttribute() const', 'deleteAllClasses()', 'deleteClass(int idx)', 'mode() const', 'ranges() const', 'setClassAttribute(const QString &attr)', 'setMode(Mode mode)', 'sortByLabel(Qt::SortOrder order=Qt::AscendingOrder)', 'sortByValue(Qt::SortOrder order=Qt::AscendingOrder)', 'updateRangeLabel(int rangeIndex, const QString &label)', 'updateRangeLowerValue(int rangeIndex, double value)', 'updateRangeSymbol(int rangeIndex, QgsSymbol *symbol)', 'updateRangeUpperValue(int rangeIndex, double value)'],
"QgsGraduatedSymbolRendererWidget": ['QgsGraduatedSymbolRendererWidget(QgsVectorLayer *layer, QgsStyle *style, QgsFeatureRenderer *renderer)', 'changeCurrentValue(QStandardItem *item)', 'changeGraduatedSymbol()', 'changeRange(int rangeIdx)', 'changeRangeSymbol(int rangeIdx)', 'changeSelectedSymbols()', 'classifyGraduated()', 'connectUpdateHandlers()', 'create(QgsVectorLayer *layer, QgsStyle *style, QgsFeatureRenderer *renderer)', 'disconnectUpdateHandlers()', 'findSymbolForRange(double lowerBound, double upperBound, const QgsRangeList &ranges) const', 'graduatedColumnChanged(const QString &field)', 'labelFormatChanged()', 'modelDataChanged()', 'rangesClicked(const QModelIndex &idx)', 'rangesDoubleClicked(const QModelIndex &idx)', 'reapplyColorRamp()', 'reapplySizes()', 'refreshRanges(bool reset=false)', 'rowsMoved()', 'rowsOrdered()', 'selectedRanges()', 'showSymbolLevels()', 'updateGraduatedSymbolIcon()', 'updateUiFromRenderer(bool updateCount=true)'],
"QgsGroupBoxCollapseButton": ['QgsGroupBoxCollapseButton(QWidget *parent=nullptr)', 'altDown() const', 'setAltDown(bool updown)', 'setShiftDown(bool shiftdown)', 'shiftDown() const'],
"QgsHeatmapRenderer": ['convertFromRenderer(const QgsFeatureRenderer *renderer)'],
"QgsHiddenWidgetFactory": ['QgsHiddenWidgetFactory(const QString &name)'],
"QgsHiddenWidgetWrapper": ['QgsHiddenWidgetWrapper(QgsVectorLayer *vl, int fieldIdx, QWidget *editor=nullptr, QWidget *parent=nullptr)'],
"QgsHillshadeFilter": ['QgsHillshadeFilter(const QString &inputFile, const QString &outputFile, const QString &outputFormat, double lightAzimuth=300, double lightAngle=40)', 'lightAngle() const', 'lightAzimuth() const', 'setLightAngle(float angle)', 'setLightAzimuth(float azimuth)'],
"QgsHueSaturationFilter": ['GrayscaleMode', 'QgsHueSaturationFilter(QgsRasterInterface *input=nullptr)', 'colorizeColor() const', 'colorizeOn() const', 'colorizeStrength() const', 'grayscaleMode() const', 'saturation() const', 'setColorizeColor(const QColor &colorizeColor)', 'setColorizeOn(bool colorizeOn)', 'setColorizeStrength(int colorizeStrength)', 'setGrayscaleMode(QgsHueSaturationFilter::GrayscaleMode grayscaleMode)', 'setSaturation(int saturation)'],
"QgsIFeatureSelectionManager": ['QgsIFeatureSelectionManager(QObject *parent)'],
"QgsIdentifyMenu": ['MenuLevel', 'allowMultipleReturn()', 'execWithSingleResult()', 'maxFeatureDisplay()', 'maxLayerDisplay()', 'resultsIfExternalAction()', 'showFeatureActions()'],
"QgsImageFillSymbolLayer": ['applyDataDefinedSettings(QgsSymbolRenderContext &context)', 'setStrokeWidthMapUnitScale(const QgsMapUnitScale &scale)', 'strokeWidthMapUnitScale() const'],
"QgsInterpolator": ['QgsInterpolator(const QList< QgsInterpolator::LayerData > &layerData)'],
"QgsLUDialog": ['QgsLUDialog(QWidget *parent=nullptr, Qt::WindowFlags fl=QgsGuiUtils::ModalDialogFlags)', 'lowerValue() const', 'setLowerValue(const QString &val)', 'setUpperValue(const QString &val)', 'upperValue() const'],
"QgsLabelCandidate": ['QgsLabelCandidate(const QRectF &r, double c)'],
"QgsLabelPosition": ['QgsLabelPosition(QgsFeatureId id, double r, const QVector< QgsPointXY > &corners, const QgsRectangle &rect, double w, double h, const QString &layer, const QString &labeltext, const QFont &labelfont, bool upside_down, bool diagram=false, bool pinned=false, const QString &providerId=QString())'],
"QgsLabelSorter": ['QgsLabelSorter(const QgsMapSettings &mapSettings)', 'operator()(pal::LabelPosition *lp1, pal::LabelPosition *lp2) const'],
"QgsLabelingEngine": ['processProvider(QgsAbstractLabelProvider *provider, QgsRenderContext &context, pal::Pal &p)'],
"QgsLayerItem": ['LayerType', 'QgsLayerItem(QgsDataItem *parent, const QString &name, const QString &path, const QString &uri, LayerType layerType, const QString &providerKey)', 'iconDefault()', 'iconLine()', 'iconPoint()', 'iconPolygon()', 'iconRaster()', 'iconTable()'],
"QgsLayerPropertiesWidget": ['changeLayer(QgsSymbolLayer *)', 'changed()', 'emitSignalChanged()', 'layerTypeChanged()', 'populateLayerTypes()', 'updateSymbolLayerWidget(QgsSymbolLayer *layer)'],
"QgsLayerTreeGroup": ['QgsLayerTreeGroup(const QgsLayerTreeGroup &other)', 'nodeVisibilityChanged(QgsLayerTreeNode *node)'],
"QgsLayerTreeLayer": ['QgsLayerTreeLayer(QgsMapLayer *layer)', 'QgsLayerTreeLayer(const QgsLayerTreeLayer &other)', 'attachToLayer()', 'layer() const', 'layerId() const'],
"QgsLayerTreeMapCanvasBridge": ['autoSetupOnFirstLayer() const', 'mapCanvas() const', 'rootGroup() const'],
"QgsLayerTreeModel": ['Flag', 'addLegendToLayer(QgsLayerTreeLayer *nodeL)', 'connectToLayer(QgsLayerTreeLayer *nodeLayer)', 'connectToLayers(QgsLayerTreeGroup *parentGroup)', 'connectToRootNode()', 'disconnectFromLayer(QgsLayerTreeLayer *nodeLayer)', 'disconnectFromLayers(QgsLayerTreeGroup *parentGroup)', 'disconnectFromRootNode()', 'iconGroup()', 'indexOfParentLayerTreeNode(QgsLayerTreeNode *parentNode) const', 'invalidateLegendMapBasedData()', 'layerLegendChanged()', 'layerNeedsUpdate()', 'legendCleanup()', 'legendEmbeddedInParent(QgsLayerTreeLayer *nodeLayer) const', 'legendIconEmbeddedInParent(QgsLayerTreeLayer *nodeLayer) const', 'legendInvalidateMapBasedData()', 'legendNodeData(QgsLayerTreeModelLegendNode *node, int role) const', 'legendNodeDataChanged()', 'legendNodeFlags(QgsLayerTreeModelLegendNode *node) const', 'legendNodeIndex(int row, int column, QgsLayerTreeModelLegendNode *node) const', 'legendNodeRowCount(QgsLayerTreeModelLegendNode *node) const', 'legendParent(QgsLayerTreeModelLegendNode *legendNode) const', 'legendRootIndex(int row, int column, QgsLayerTreeLayer *nL) const', 'legendRootRowCount(QgsLayerTreeLayer *nL) const', 'nodeAddedChildren(QgsLayerTreeNode *node, int indexFrom, int indexTo)', 'nodeCustomPropertyChanged(QgsLayerTreeNode *node, const QString &key)', 'nodeLayerLoaded()', 'nodeLayerWillBeUnloaded()', 'nodeRemovedChildren()', 'nodeVisibilityChanged(QgsLayerTreeNode *node)', 'nodeWillAddChildren(QgsLayerTreeNode *node, int indexFrom, int indexTo)', 'nodeWillRemoveChildren(QgsLayerTreeNode *node, int indexFrom, int indexTo)', 'removeLegendFromLayer(QgsLayerTreeLayer *nodeLayer)'],
"QgsLayerTreeModelLegendNode": ['LegendNodeRoles', 'isEmbeddedInParent() const', 'isScaleOK(double scale) const', 'setEmbeddedInParent(bool embedded)', 'setUserLabel(const QString &userLabel)', 'userLabel() const'],
"QgsLayerTreeNode": ['QgsLayerTreeNode(const QgsLayerTreeNode &other)'],
"QgsLayerTreeRegistryBridge": ['groupRemovedChildren()', 'groupWillRemoveChildren(QgsLayerTreeNode *node, int indexFrom, int indexTo)', 'isEnabled() const', 'layersAdded(const QList< QgsMapLayer *> &layers)', 'layersWillBeRemoved(const QStringList &layerIds)', 'newLayersVisible() const', 'removeLayersFromRegistry(const QStringList &layerIds)', 'setEnabled(bool enabled)', 'setNewLayersVisible(bool enabled)'],
"QgsLayerTreeView": ['layerForIndex(const QModelIndex &index) const', 'modelRowsInserted(const QModelIndex &index, int start, int end)', 'modelRowsRemoved()', 'onCurrentChanged()', 'onExpandedChanged(QgsLayerTreeNode *node, bool expanded)', 'onModelReset()', 'updateExpandedStateFromNode(QgsLayerTreeNode *node)', 'updateExpandedStateToNode(const QModelIndex &index)'],
"QgsLayerTreeViewDefaultActions": ['QgsLayerTreeViewDefaultActions(QgsLayerTreeView *view)', 'actionAddGroup(QObject *parent=nullptr)', 'actionGroupSelected(QObject *parent=nullptr)', 'actionRemoveGroupOrLayer(QObject *parent=nullptr)', 'actionRenameGroupOrLayer(QObject *parent=nullptr)', 'actionShowFeatureCount(QObject *parent=nullptr)', 'actionShowInOverview(QObject *parent=nullptr)', 'actionZoomToGroup(QgsMapCanvas *canvas, QObject *parent=nullptr)', 'actionZoomToLayer(QgsMapCanvas *canvas, QObject *parent=nullptr)', 'addGroup()', 'groupSelected()', 'removeGroupOrLayer()', 'renameGroupOrLayer()', 'showFeatureCount()', 'showInOverview()', 'uniqueGroupName(QgsLayerTreeGroup *parentGroup)', 'zoomToGroup()', 'zoomToGroup(QgsMapCanvas *canvas)', 'zoomToLayer()', 'zoomToLayer(QgsMapCanvas *canvas)', 'zoomToLayers(QgsMapCanvas *canvas, const QList< QgsMapLayer *> &layers)'],
"QgsLegendRenderer": ['nodeLegendStyle(QgsLayerTreeNode *node, QgsLayerTreeModel *model)', 'setNodeLegendStyle(QgsLayerTreeNode *node, QgsLegendStyle::Style style)'],
"QgsLegendSettings": ['boxSpace() const', 'columnCount() const', 'columnSpace() const', 'dpi() const', 'equalColumnWidth() const', 'fontColor() const', 'lineSpacing() const', 'mmPerMapUnit() const', 'setBoxSpace(double s)', 'setColumnCount(int c)', 'setColumnSpace(double s)', 'setDpi(int dpi)', 'setEqualColumnWidth(bool s)', 'setFontColor(const QColor &c)', 'setLineSpacing(double s)', 'setMmPerMapUnit(double mmPerMapUnit)', 'setSplitLayer(bool s)', 'setStyle(QgsLegendStyle::Style s, const QgsLegendStyle &style)', 'setSymbolSize(QSizeF s)', 'setTitle(const QString &t)', 'setUseAdvancedEffects(bool use)', 'setWmsLegendSize(QSizeF s)', 'setWrapChar(const QString &t)', 'splitLayer() const', 'symbolSize() const', 'title() const', 'useAdvancedEffects() const', 'wmsLegendSize() const', 'wrapChar() const'],
"QgsLegendStyle": ['Style', 'margin(Side side)', 'readXml(const QDomElement &elem, const QDomDocument &doc)', 'setMargin(Side side, double margin)', 'writeXml(const QString &name, QDomElement &elem, QDomDocument &doc) const'],
"QgsLegendSymbolItem": ['QgsLegendSymbolItem(const QgsLegendSymbolItem &other)'],
"QgsLinePatternFillSymbolLayer": ['create(const QgsStringMap &properties=QgsStringMap())', 'createFromSld(QDomElement &element)', 'distanceMapUnitScale() const', 'lineAngle() const', 'lineWidth() const', 'lineWidthMapUnitScale() const', 'offset() const', 'offsetMapUnitScale() const', 'ogrFeatureStyleWidth(double widthScaleFactor) const', 'setDistanceMapUnitScale(const QgsMapUnitScale &scale)', 'setLineAngle(double a)', 'setLineWidth(double w)', 'setLineWidthMapUnitScale(const QgsMapUnitScale &scale)', 'setOffset(double offset)', 'setOffsetMapUnitScale(const QgsMapUnitScale &scale)'],
"QgsLineSymbol": ['QgsLineSymbol(const QgsSymbolLayerList &layers=QgsSymbolLayerList())', 'renderPolyline(const QPolygonF &points, const QgsFeature *f, QgsRenderContext &context, int layer=-1, bool selected=false)', 'setWidth(double width)', 'width() const'],
"QgsLineSymbolLayer": ['QgsLineSymbolLayer(bool locked=false)', 'offset() const', 'offsetMapUnitScale() const', 'renderPolygonStroke(const QPolygonF &points, QList< QPolygonF > *rings, QgsSymbolRenderContext &context)', 'renderPolyline(const QPolygonF &points, QgsSymbolRenderContext &context)=0', 'setOffset(double offset)', 'setOffsetMapUnitScale(const QgsMapUnitScale &scale)', 'setWidth(double width)', 'setWidthMapUnitScale(const QgsMapUnitScale &scale)', 'width() const', 'widthMapUnitScale() const'],
"QgsLinearMinMaxEnhancement": ['QgsLinearMinMaxEnhancement(Qgis::DataType, double, double)'],
"QgsLinearMinMaxEnhancementWithClip": ['QgsLinearMinMaxEnhancementWithClip(Qgis::DataType, double, double)'],
"QgsLinearlyInterpolatedDiagramRenderer": ['classificationAttributeExpression() const', 'classificationAttributeIsExpression() const', 'lowerSize() const', 'lowerValue() const', 'setClassificationAttributeExpression(const QString &expression)', 'setClassificationAttributeIsExpression(bool isExpression)', 'setDiagramSettings(const QgsDiagramSettings &s)', 'setLowerSize(QSizeF s)', 'setLowerValue(double val)', 'setUpperSize(QSizeF s)', 'setUpperValue(double val)', 'upperSize() const', 'upperValue() const'],
"QgsLongLongValidator": ['QgsLongLongValidator(QObject *parent)', 'QgsLongLongValidator(qint64 bottom, qint64 top, QObject *parent)', 'bottom() const', 'setBottom(qint64 bottom)', 'setRange(qint64 bottom, qint64 top)', 'setTop(qint64 top)', 'top() const'],
"QgsManageConnectionsDialog": ['Mode', 'Type', 'clearSelection()', 'doExportImport()', 'selectAll()', 'selectionChanged()'],
"QgsMapCanvas": ['clearExtentHistory()', 'setCurrentLayer(QgsMapLayer *layer)'],
"QgsMapCanvasSnappingUtils": ['QgsMapCanvasSnappingUtils(QgsMapCanvas *canvas, QObject *parent=nullptr)'],
"QgsMapLayer": ['readSld(const QDomNode &node, QString &errorMessage)'],
"QgsMapLayerAction": ['Target'],
"QgsMapLayerComboBox": ['indexChanged(int i)', 'rowsChanged()'],
"QgsMapLayerLegendUtils": ['hasLegendNodeOrder(QgsLayerTreeLayer *nodeLayer)', 'hasLegendNodeUserLabel(QgsLayerTreeLayer *nodeLayer, int originalIndex)', 'legendNodeOrder(QgsLayerTreeLayer *nodeLayer)', 'legendNodeUserLabel(QgsLayerTreeLayer *nodeLayer, int originalIndex)', 'setLegendNodeOrder(QgsLayerTreeLayer *nodeLayer, const QList< int > &order)', 'setLegendNodeUserLabel(QgsLayerTreeLayer *nodeLayer, int originalIndex, const QString &newLabel)'],
"QgsMapLayerModel": ['addLayers(const QList< QgsMapLayer *> &layers)', 'removeLayers(const QStringList &layerIds)'],
"QgsMapLayerProxyModel": ['Filter', 'filters() const'],
"QgsMapLayerRenderer": ['QgsMapLayerRenderer(const QString &layerID)'],
"QgsMapOverviewCanvas": ['QgsMapOverviewCanvas(QWidget *parent=nullptr, QgsMapCanvas *mapCanvas=nullptr)', 'enableAntiAliasing(bool flag)', 'mapRenderingFinished()', 'updateFullExtent()'],
"QgsMapRendererCustomPainterJob": ['QgsMapRendererCustomPainterJob(const QgsMapSettings &settings, QPainter *painter)'],
"QgsMapRendererJob": ['QgsMapRendererJob(const QgsMapSettings &settings)'],
"QgsMapRendererParallelJob": ['QgsMapRendererParallelJob(const QgsMapSettings &settings)'],
"QgsMapRendererQImageJob": ['QgsMapRendererQImageJob(const QgsMapSettings &settings)'],
"QgsMapRendererSequentialJob": ['QgsMapRendererSequentialJob(const QgsMapSettings &settings)', 'internalFinished()'],
"QgsMapSettings": ['mapToPixel() const', 'readXml(QDomNode &node)', 'updateDerived()', 'writeXml(QDomNode &node, QDomDocument &doc)'],
"QgsMapToPixel": ['toMapCoordinates(int x, int y) const', 'toMapPoint(double x, double y) const', 'transform() const', 'transform(QgsPointXY *p) const'],
"QgsMapToolAdvancedDigitizing": ['cadDockWidget() const'],
"QgsMapToolEdit": ['QgsMapToolEdit(QgsMapCanvas *canvas)', 'createGeometryRubberBand(QgsWkbTypes::GeometryType geometryType=QgsWkbTypes::LineGeometry, bool alternativeBand=false) const'],
"QgsMapToolIdentify": ['IdentifyMode', 'Type', 'changedRasterResults(QList< QgsMapToolIdentify::IdentifyResult > &)', 'formatChanged(QgsRasterLayer *layer)', 'identifyMessage(const QString &)', 'identifyProgress(int, int)', 'identifyRasterLayer(QList< QgsMapToolIdentify::IdentifyResult > *results, QgsRasterLayer *layer, QgsPointXY point, const QgsRectangle &viewExtent, double mapUnitsPerPixel)', 'identifyVectorLayer(QList< QgsMapToolIdentify::IdentifyResult > *results, QgsVectorLayer *layer, const QgsPointXY &point)'],
"QgsMapToolIdentifyFeature": ['featureIdentified(QgsFeatureId)', 'featureIdentified(const QgsFeature &)'],
"QgsMarkerLineSymbolLayer": ['QgsMarkerLineSymbolLayer(bool rotateMarker=DEFAULT_MARKERLINE_ROTATE, double interval=DEFAULT_MARKERLINE_INTERVAL)', 'intervalMapUnitScale() const', 'markerAngle(const QPolygonF &points, bool isRing, int vertex)', 'renderPolylineCentral(const QPolygonF &points, QgsSymbolRenderContext &context)', 'renderPolylineInterval(const QPolygonF &points, QgsSymbolRenderContext &context)', 'renderPolylineVertex(const QPolygonF &points, QgsSymbolRenderContext &context, Placement placement=Vertex)', 'setIntervalMapUnitScale(const QgsMapUnitScale &scale)'],
"QgsMarkerLineSymbolLayerWidget": ['setInterval(double val)', 'setOffsetAlongLine(double val)'],
"QgsMarkerSymbol": ['QgsMarkerSymbol(const QgsSymbolLayerList &layers=QgsSymbolLayerList())', 'renderPoint(QPointF point, const QgsFeature *f, QgsRenderContext &context, int layer=-1, bool selected=false)', 'scaleMethod()', 'setScaleMethod(QgsSymbol::ScaleMethod scaleMethod)'],
"QgsMasterPasswordResetDialog": ['QgsMasterPasswordResetDialog(QWidget *parent=nullptr)', 'requestMasterPasswordReset(QString *newpass, QString *oldpass, bool *keepbackup)'],
"QgsMessageBar": ['currentItem()'],
"QgsMessageBarItem": ['setDuration(int duration)', 'setIcon(const QIcon &icon)', 'setLevel(Qgis::MessageLevel level)', 'setText(const QString &text)', 'setTitle(const QString &title)', 'setWidget(QWidget *widget)'],
"QgsMessageLog": ['messageReceived(bool received)', 'messageReceived(const QString &message, const QString &tag, Qgis::MessageLevel level)'],
"QgsMessageLogConsole": ['logMessage(const QString &message, const QString &tag, Qgis::MessageLevel level)'],
"QgsMessageViewer": ['QgsMessageViewer(QWidget *parent=nullptr, Qt::WindowFlags fl=QgsGuiUtils::ModalDialogFlags, bool deleteOnClose=true)', 'checkBoxState()', 'setCheckBoxQgsSettingsLabel(const QString &label)', 'setCheckBoxState(Qt::CheckState state)', 'setCheckBoxText(const QString &text)', 'setCheckBoxVisible(bool visible)', 'setMessageAsHtml(const QString &msg)', 'setMessageAsPlainText(const QString &msg)'],
"QgsMimeDataUtils": ['decodeUriList(const QMimeData *data)', 'encodeUriList(const UriList &layers)', 'isUriList(const QMimeData *data)'],
"QgsMultiBandColorRenderer": ['QgsMultiBandColorRenderer(QgsRasterInterface *input, int redBand, int greenBand, int blueBand, QgsContrastEnhancement *redEnhancement=nullptr, QgsContrastEnhancement *greenEnhancement=nullptr, QgsContrastEnhancement *blueEnhancement=nullptr)', 'blueBand() const', 'blueContrastEnhancement() const', 'create(const QDomElement &elem, QgsRasterInterface *input)', 'greenBand() const', 'greenContrastEnhancement() const', 'redBand() const', 'redContrastEnhancement() const', 'setBlueBand(int band)', 'setGreenBand(int band)', 'setRedBand(int band)'],
"QgsMultiBandColorRendererWidget": ['QgsMultiBandColorRendererWidget(QgsRasterLayer *layer, const QgsRectangle &extent=QgsRectangle())', 'create(QgsRasterLayer *layer, const QgsRectangle &extent)', 'setFromRenderer(const QgsRasterRenderer *r)'],
"QgsMultiRenderChecker": ['setControlPathPrefix(const QString &prefix)'],
"QgsNetworkAccessManager": ['QgsNetworkAccessManager(QObject *parent=nullptr)', 'requestAboutToBeCreated(QNetworkAccessManager::Operation, const QNetworkRequest &, QIODevice *)', 'requestCreated(QNetworkReply *)', 'requestTimedOut(QNetworkReply *)'],
"QgsNewMemoryLayerDialog": ['QgsNewMemoryLayerDialog(QWidget *parent=nullptr, Qt::WindowFlags fl=QgsGuiUtils::ModalDialogFlags)'],
"QgsNewNameDialog": ['fullNames(const QString &name, const QStringList &extensions)', 'highlightText(const QString &text)', 'matching(const QStringList &newNames, const QStringList &existingNames, Qt::CaseSensitivity cs=Qt::CaseSensitive)', 'nameChanged()'],
"QgsNewVectorLayerDialog": ['QgsNewVectorLayerDialog(QWidget *parent=nullptr, Qt::WindowFlags fl=QgsGuiUtils::ModalDialogFlags)'],
"QgsNineCellFilter": ['cellSizeX() const', 'cellSizeY() const', 'inputNodataValue() const', 'outputNodataValue() const', 'setCellSizeX(double size)', 'setCellSizeY(double size)', 'setInputNodataValue(double value)', 'setOutputNodataValue(double value)', 'setZFactor(double factor)', 'zFactor() const'],
"QgsOWSSourceSelect": ['addWmsListItem(const QDomElement &el, int row, int column)', 'addWmsListRow(const QDomElement &item, int row)', 'enableLayersForCrs(QTreeWidgetItem *item)'],
"QgsOfflineEditing": ['ProgressMode'],
"QgsOptionsDialogBase": ['setSettings(QgsSettings *settings)', 'updateWindowTitle()', 'warnAboutMissingObjects()'],
"QgsPaintEffect": ['QgsPaintEffect(const QgsPaintEffect &other)'],
"QgsPaintEffectWidget": ['QgsPaintEffectWidget(QWidget *parent=nullptr)'],
"QgsPaintEngineHack": ['fixEngineFlags(QPaintEngine *engine)', 'fixFlags()'],
"QgsPalLayerSettings": ['DirectionSymbols', 'MultiLineAlign', 'QgsPalLayerSettings(const QgsPalLayerSettings &s)', 'QuadrantPosition', 'UpsideDownLabels', 'calculateLabelSize(const QFontMetricsF *fm, QString text, double &labelX, double &labelY, QgsFeature *f=nullptr, QgsRenderContext *context=nullptr)'],
"QgsPalettedRasterRenderer": ['create(const QDomElement &elem, QgsRasterInterface *input)'],
"QgsPalettedRendererWidget": ['QgsPalettedRendererWidget(QgsRasterLayer *layer, const QgsRectangle &extent=QgsRectangle())', 'create(QgsRasterLayer *layer, const QgsRectangle &extent)', 'setFromRenderer(const QgsRasterRenderer *r)'],
"QgsPenCapStyleComboBox": ['QgsPenCapStyleComboBox(QWidget *parent=nullptr)', 'penCapStyle() const', 'setPenCapStyle(Qt::PenCapStyle style)'],
"QgsPenJoinStyleComboBox": ['QgsPenJoinStyleComboBox(QWidget *parent=nullptr)', 'penJoinStyle() const', 'setPenJoinStyle(Qt::PenJoinStyle style)'],
"QgsPenStyleComboBox": ['QgsPenStyleComboBox(QWidget *parent=nullptr)', 'iconForPen(Qt::PenStyle style)', 'penStyle() const', 'setPenStyle(Qt::PenStyle style)'],
"QgsPixmapLabel": ['setPixmap(const QPixmap &)'],
"QgsPluginLayer": ['QgsPluginLayer(const QString &layerType, const QString &layerName=QString())'],
"QgsPluginLayerType": ['QgsPluginLayerType(const QString &name)', 'name()'],
"QgsPointDisplacementRendererWidget": ['QgsPointDisplacementRendererWidget(QgsVectorLayer *layer, QgsStyle *style, QgsFeatureRenderer *renderer)', 'create(QgsVectorLayer *layer, QgsStyle *style, QgsFeatureRenderer *renderer)'],
"QgsPointLocator": ['destroyIndex()', 'rebuildIndex(int maxFeaturesToIndex=-1)'],
"QgsPointLocator_Stream": ['QgsPointLocator_Stream(const QLinkedList< RTree::Data *> &dataList)'],
"QgsPointLocator_VisitorEdgesInRect": ['QgsPointLocator_VisitorEdgesInRect(QgsPointLocator *pl, QgsPointLocator::MatchList &lst, const QgsRectangle &srcRect, QgsPointLocator::MatchFilter *filter=nullptr)'],
"QgsPointLocator_VisitorNearestEdge": ['QgsPointLocator_VisitorNearestEdge(QgsPointLocator *pl, QgsPointLocator::Match &m, const QgsPointXY &srcPoint, QgsPointLocator::MatchFilter *filter=nullptr)'],
"QgsPointLocator_VisitorNearestVertex": ['QgsPointLocator_VisitorNearestVertex(QgsPointLocator *pl, QgsPointLocator::Match &m, const QgsPointXY &srcPoint, QgsPointLocator::MatchFilter *filter=nullptr)'],
"QgsPointPatternFillSymbolLayer": ['create(const QgsStringMap &properties=QgsStringMap())', 'createFromSld(QDomElement &element)', 'displacementX() const', 'displacementXMapUnitScale() const', 'displacementY() const', 'displacementYMapUnitScale() const', 'distanceX() const', 'distanceXMapUnitScale() const', 'distanceY() const', 'distanceYMapUnitScale() const', 'setDisplacementX(double d)', 'setDisplacementXMapUnitScale(const QgsMapUnitScale &scale)', 'setDisplacementY(double d)', 'setDisplacementYMapUnitScale(const QgsMapUnitScale &scale)', 'setDistanceX(double d)', 'setDistanceXMapUnitScale(const QgsMapUnitScale &scale)', 'setDistanceY(double d)', 'setDistanceYMapUnitScale(const QgsMapUnitScale &scale)'],
"QgsPreviewEffect": ['PreviewMode', 'QgsPreviewEffect(QObject *parent)'],
"QgsProject": ['readBoolEntry(const QString &scope, const QString &key, bool def=false, bool *ok=nullptr) const', 'readDoubleEntry(const QString &scope, const QString &key, double def=0, bool *ok=nullptr) const', 'readEntry(const QString &scope, const QString &key, const QString &def=QString(), bool *ok=nullptr) const', 'readNumEntry(const QString &scope, const QString &key, int def=0, bool *ok=nullptr) const', 'relationManager() const'],
"QgsProjectFileTransform": ['convertRasterProperties(QDomDocument &doc, QDomNode &parentNode, QDomElement &rasterPropertiesElem, QgsRasterLayer *rlayer)', 'updateRevision(const QgsProjectVersion &version)'],
"QgsProjectVersion": ['QgsProjectVersion(const QString &string)', 'QgsProjectVersion(int major, int minor, int sub, const QString &name="")', 'majorVersion()', 'minorVersion()', 'subVersion()', 'text()'],
"QgsProviderMetadata": ['QgsProviderMetadata(const QString &_key, const QString &_description, const QString &_library)'],
"QgsProviderRegistry": ['registerGuis(QWidget *widget)'],
"QgsPythonRunner": ['evalCommand(QString command, QString &result)=0', 'runCommand(QString command, QString messageOnError=QString())=0'],
"QgsQueryBuilder": ['clear()', 'setDatasourceDescription(const QString &uri)', 'setSql(const QString &sqlStatement)', 'sql()'],
"QgsRangeConfigDlg": ['QgsRangeConfigDlg(QgsVectorLayer *vl, int fieldIdx, QWidget *parent)', 'rangeWidgetChanged(int index)'],
"QgsRangeWidgetFactory": ['QgsRangeWidgetFactory(const QString &name)'],
"QgsRangeWidgetWrapper": ['QgsRangeWidgetWrapper(QgsVectorLayer *vl, int fieldIdx, QWidget *editor, QWidget *parent=nullptr)'],
"QgsRaster": ['ColorInterpretation', 'IdentifyFormat', 'RasterBuildPyramids', 'RasterProgressType', 'RasterPyramidsFormat'],
"QgsRasterBandStats": ['Stats'],
"QgsRasterBlock": ['applyNoDataValues(const QgsRasterRangeList &rangeList)', 'dataTypeSize() const', 'toString() const', 'typeSize(int dataType)'],
"QgsRasterCalcNode": ['QgsRasterCalcNode(Operator op, QgsRasterCalcNode *left, QgsRasterCalcNode *right)', 'QgsRasterCalcNode(QgsRasterMatrix *matrix)', 'QgsRasterCalcNode(const QString &rasterName)', 'QgsRasterCalcNode(double number)', 'parseRasterCalcString(const QString &str, QString &parserErrorMsg)', 'setLeft(QgsRasterCalcNode *left)', 'setRight(QgsRasterCalcNode *right)', 'type() const'],
"QgsRasterChecker": ['report()'],
"QgsRasterDataProvider": ['QgsRasterDataProvider(const QString &uri)', 'colorInterpretationName(int bandNo) const', 'colorName(int colorInterpretation) const', 'colorTable(int bandNo) const', 'identifyFormatFromName(const QString &formatName)', 'identifyFormatLabel(QgsRaster::IdentifyFormat format)', 'identifyFormatName(QgsRaster::IdentifyFormat format)', 'identifyFormatToCapability(QgsRaster::IdentifyFormat format)', 'setUserNoDataValue(int bandNo, const QgsRasterRangeList &noData)'],
"QgsRasterDrawer": ['QgsRasterDrawer(QgsRasterIterator *iterator)'],
"QgsRasterFileWriter": ['Mode', 'QgsRasterFileWriter(const QString &outputUrl)', 'WriterError', 'buildPyramidsFlag() const', 'createOptions() const', 'maxTileHeight() const', 'maxTileWidth() const', 'outputFormat() const', 'outputProviderKey() const', 'pyramidsConfigOptions() const', 'pyramidsFormat() const', 'pyramidsList() const', 'pyramidsResampling() const', 'setBuildPyramidsFlag(QgsRaster::RasterBuildPyramids f)', 'setCreateOptions(const QStringList &list)', 'setMaxTileHeight(int h)', 'setMaxTileWidth(int w)', 'setOutputFormat(const QString &format)', 'setOutputProviderKey(const QString &key)', 'setPyramidsConfigOptions(const QStringList &list)', 'setPyramidsFormat(QgsRaster::RasterPyramidsFormat f)', 'setPyramidsList(const QList< int > &list)', 'setPyramidsResampling(const QString &str)', 'setTiledMode(bool t)', 'tiledMode() const'],
"QgsRasterFillSymbolLayer": ['FillCoordinateMode', 'QgsRasterFillSymbolLayer(const QString &imageFilePath=QString())', 'create(const QgsStringMap &properties=QgsStringMap())'],
"QgsRasterFormatSaveOptionsWidget": ['QgsRasterFormatSaveOptionsWidget(QWidget *parent SIP_TRANSFERTHIS=nullptr, const QString &format="GTiff", QgsRasterFormatSaveOptionsWidget::Type type=Default, const QString &provider="gdal")', 'Type', 'apply()', 'optionsChanged()'],
"QgsRasterInterface": ['QgsRasterInterface(QgsRasterInterface *input=nullptr)', 'dataTypeSize(int bandNo)', 'yBlockSize() const', 'ySize() const'],
"QgsRasterIterator": ['QgsRasterIterator(QgsRasterInterface *input)', 'input() const', 'maximumTileHeight() const', 'maximumTileWidth() const', 'setMaximumTileHeight(int h)', 'setMaximumTileWidth(int w)', 'stopRasterRead(int bandNumber)'],
"QgsRasterLayer": ['brightnessFilter() const', 'hueSaturationFilter() const', 'isValidRasterFileName(const QString &fileNameQString)', 'renderer() const', 'showStatusMessage(const QString &message)'],
"QgsRasterLayerRenderer": ['QgsRasterLayerRenderer(QgsRasterLayer *layer, QgsRenderContext &rendererContext)'],
"QgsRasterLayerSaveAsDialog": ['CrsState', 'Mode', 'ResolutionState', 'addToCanvas() const', 'buildPyramidsFlag() const', 'createOptions() const', 'hideFormat()', 'hideOutput()', 'maximumTileSizeX() const', 'maximumTileSizeY() const', 'mode() const', 'nColumns() const', 'nRows() const', 'noData() const', 'outputCrs()', 'outputFileName() const', 'outputFormat() const', 'outputRectangle() const', 'pyramidsConfigOptions() const', 'pyramidsFormat() const', 'pyramidsList() const', 'pyramidsResamplingMethod() const', 'tileMode() const', 'xResolution() const', 'yResolution() const'],
"QgsRasterMatrix": ['OneArgOperator', 'QgsRasterMatrix(const QgsRasterMatrix &m)', 'TwoArgOperator', 'acosinus()', 'asinus()', 'atangens()', 'changeSign()', 'cosinus()', 'divide(const QgsRasterMatrix &other)', 'equal(const QgsRasterMatrix &other)', 'greaterEqual(const QgsRasterMatrix &other)', 'greaterThan(const QgsRasterMatrix &other)', 'lesserEqual(const QgsRasterMatrix &other)', 'lesserThan(const QgsRasterMatrix &other)', 'log()', 'log10()', 'logicalAnd(const QgsRasterMatrix &other)', 'logicalOr(const QgsRasterMatrix &other)', 'multiply(const QgsRasterMatrix &other)', 'nColumns() const', 'nRows() const', 'nodataValue() const', 'notEqual(const QgsRasterMatrix &other)', 'number() const', 'power(const QgsRasterMatrix &other)', 'setData(int cols, int rows, double *data, double nodataValue)', 'setNodataValue(double d)', 'sinus()', 'squareRoot()', 'tangens()'],
"QgsRasterMinMaxWidget": ['setBands(const QList< int > &bands)'],
"QgsRasterNuller": ['QgsRasterNuller(QgsRasterInterface *input=nullptr)', 'noData(int bandNo) const', 'setNoData(int bandNo, const QgsRasterRangeList &noData)'],
"QgsRasterPipe": ['QgsRasterPipe(const QgsRasterPipe &pipe)', 'Role', 'at(int idx) const', 'brightnessFilter() const', 'hueSaturationFilter() const', 'last() const', 'nuller() const', 'projector() const', 'provider() const', 'renderer() const', 'resampleFilter() const', 'size() const'],
"QgsRasterProjector": ['precision() const', 'precisionLabel(Precision precision)', 'setPrecision(Precision precision)'],
"QgsRasterPyramidsOptionsWidget": ['apply()', 'checkAllLevels(bool checked)', 'configOptions() const', 'createOptionsWidget()', 'overviewList() const', 'overviewListChanged()', 'pyramidsFormat() const', 'resamplingMethod() const', 'setRasterFileName(const QString &file)', 'setRasterLayer(QgsRasterLayer *rasterLayer)', 'someValueChanged()'],
"QgsRasterRange": ['max() const', 'min() const', 'setMax(double max)', 'setMin(double min)'],
"QgsRasterRenderer": ['alphaBand() const', 'rasterTransparency() const', 'setAlphaBand(int band)', 'setRasterTransparency(QgsRasterTransparency *t)', 'type() const', 'usesTransparency() const'],
"QgsRasterRendererRegistry": ['entries() const', 'insert(const QgsRasterRendererRegistryEntry &entry)', 'insertWidgetFunction(const QString &rendererName, QgsRasterRendererWidgetCreateFunc func)', 'rendererData(const QString &rendererName, QgsRasterRendererRegistryEntry &data) const', 'renderersList() const'],
"QgsRasterRendererWidget": ['QgsRasterRendererWidget(QgsRasterLayer *layer, const QgsRectangle &extent)', 'max(int index=0)', 'min(int index=0)', 'rasterLayer() const', 'renderer()=0', 'selectedBand(int index=0)', 'setMax(const QString &value, int index=0)', 'setMin(const QString &value, int index=0)', 'setRasterLayer(QgsRasterLayer *layer)', 'setStdDev(const QString &value)', 'stdDev()'],
"QgsRasterResampleFilter": ['QgsRasterResampleFilter(QgsRasterInterface *input=nullptr)', 'maxOversampling() const', 'setMaxOversampling(double os)', 'zoomedInResampler() const', 'zoomedOutResampler() const'],
"QgsRasterResampler": ['resample(const QImage &srcImage, QImage &dstImage)=0'],
"QgsRasterShader": ['QgsRasterShader(double minimumValue=0.0, double maximumValue=255.0)', 'rasterShaderFunction()', 'rasterShaderFunction() const'],
"QgsRasterShaderFunction": ['QgsRasterShaderFunction(double minimumValue=0.0, double maximumValue=255.0)', 'legendSymbologyItems(QList< QPair< QString, QColor > > &symbolItems) const', 'minimumMaximumRange() const'],
"QgsRelationEditorWidget": ['setEditorContext(const QgsAttributeEditorContext &context)', 'setFeature(const QgsFeature &feature)', 'setRelationFeature(const QgsRelation &relation, const QgsFeature &feature)'],
"QgsRelationReferenceConfigDlg": ['QgsRelationReferenceConfigDlg(QgsVectorLayer *vl, int fieldIdx, QWidget *parent)'],
"QgsRelationReferenceFactory": ['QgsRelationReferenceFactory(const QString &name, QgsMapCanvas *canvas, QgsMessageBar *messageBar)'],
"QgsRelationReferenceWidget": ['CanvasExtent', 'QgsRelationReferenceWidget(QWidget *parent)', 'foreignKeyChanged(const QVariant &)', 'init()', 'setAllowMapIdentification(bool allowMapIdentification)', 'setEditorContext(const QgsAttributeEditorContext &context, QgsMapCanvas *canvas, QgsMessageBar *messageBar)', 'setEmbedForm(bool display)', 'setOpenFormButtonVisible(bool openFormButtonVisible)', 'setReadOnlySelector(bool readOnly)', 'setRelation(const QgsRelation &relation, bool allowNullValue)', 'setRelationEditable(bool editable)'],
"QgsRelief": ['QgsRelief(const QString &inputFile, const QString &outputFile, const QString &outputFormat)', 'addReliefColorClass(const QgsRelief::ReliefColor &color)', 'clearReliefColors()', 'reliefColors() const', 'setReliefColors(const QList< QgsRelief::ReliefColor > &c)', 'setZFactor(double factor)', 'zFactor() const'],
"QgsRenderChecker": ['controlImagePath() const', 'elapsedTime()', 'matchPercent()', 'matchTarget()', 'mismatchCount()', 'report()', 'setControlPathSuffix(const QString &name)', 'setElapsedTimeTarget(int target)', 'setRenderedImage(const QString &imageFileName)'],
"QgsRenderContext": ['QgsRenderContext(const QgsRenderContext &rh)', 'drawEditingInformation() const', 'extent() const', 'forceVectorOutput() const', 'mapToPixel() const', 'renderingStopped() const', 'selectionColor() const', 'setDrawEditingInformation(bool b)', 'setExtent(const QgsRectangle &extent)', 'setForceVectorOutput(bool force)', 'setMapToPixel(const QgsMapToPixel &mtp)', 'setRenderingStopped(bool stopped)', 'setSelectionColor(const QColor &color)', 'setUseRenderingOptimization(bool enabled)', 'setVectorSimplifyMethod(const QgsVectorSimplifyMethod &simplifyMethod)'],
"QgsRendererAbstractMetadata": ['QgsRendererAbstractMetadata(const QString &name, const QString &visibleName, const QIcon &icon=QIcon())', 'createRendererFromSld(QDomElement &elem, QgsWkbTypes::GeometryType geomType)', 'icon() const', 'name() const', 'setIcon(const QIcon &icon)', 'visibleName() const'],
"QgsRendererCategory": ['dump() const', 'label() const', 'setLabel(const QString &label)', 'setSymbol(QgsSymbol *s)', 'setValue(const QVariant &value)', 'swap(QgsRendererCategory &other)', 'symbol() const', 'toSld(QDomDocument &doc, QDomElement &element, QgsStringMap props) const', 'value() const'],
"QgsRendererRange": ['QgsRendererRange(const QgsRendererRange &range)', 'QgsRendererRange(double lowerValue, double upperValue, QgsSymbol *symbol, const QString &label, bool render=true)', 'dump() const', 'label() const', 'lowerValue() const', 'operator<(const QgsRendererRange &other) const', 'renderState() const', 'setLabel(const QString &label)', 'setLowerValue(double lowerValue)', 'setRenderState(bool render)', 'setSymbol(QgsSymbol *s)', 'setUpperValue(double upperValue)', 'swap(QgsRendererRange &other)', 'symbol() const', 'upperValue() const'],
"QgsRendererRangeLabelFormat": ['QgsRendererRangeLabelFormat(const QString &format, int precision=4, bool trimTrailingZeroes=false)', 'format() const', 'formatNumber(double value) const', 'labelForRange(const QgsRendererRange &range) const', 'precision() const', 'saveToDomElement(QDomElement &element)', 'setFormat(const QString &format)', 'setFromDomElement(QDomElement &element)', 'setPrecision(int precision)', 'setTrimTrailingZeroes(bool trimTrailingZeroes)', 'trimTrailingZeroes() const'],
"QgsRendererRulePropsDialog": ['buildExpression()', 'rule()', 'testFilter()'],
"QgsRendererWidget": ['QgsRendererWidget(QgsVectorLayer *layer, QgsStyle *style)', 'contextMenuViewCategories(QPoint p)', 'copy()', 'paste()', 'refreshSymbolView()'],
"QgsRuggednessFilter": ['QgsRuggednessFilter(const QString &inputFile, const QString &outputFile, const QString &outputFormat)'],
"QgsRuleBasedLabelProvider": ['QgsRuleBasedLabelProvider(const QgsRuleBasedLabeling &rules, QgsVectorLayer *layer, bool withFeatureLoop=true)'],
"QgsRuleBasedLabeling": ['rootRule()', 'rootRule() const'],
"QgsRuleBasedRenderer": ['FeatureFlags', 'createFromSld(QDomElement &element, QgsWkbTypes::GeometryType geomType)', 'rootRule()'],
"QgsRuleBasedRenderer::Rule": ['dependsOnScale() const', 'initFilter()', 'label() const', 'save(QDomDocument &doc, QgsSymbolMap &symbolMap) const', 'setLabel(const QString &label)', 'symbol()', 'toSld(QDomDocument &doc, QDomElement &element, QgsStringMap props) const'],
"QgsRuleBasedRendererModel": ['clearFeatureCounts()', 'finishedAddingRules()', 'insertRule(const QModelIndex &parent, int before, QgsRuleBasedRenderer::Rule *newrule)', 'removeRule(const QModelIndex &index)', 'ruleForIndex(const QModelIndex &index) const', 'updateRule(const QModelIndex &index)', 'updateRule(const QModelIndex &parent, int row)', 'willAddRules(const QModelIndex &parent, int count)'],
"QgsRuleBasedRendererWidget": ['QgsRuleBasedRendererWidget(QgsVectorLayer *layer, QgsStyle *style, QgsFeatureRenderer *renderer)', 'addRule()', 'clearFeatureCounts()', 'countFeatures()', 'create(QgsVectorLayer *layer, QgsStyle *style, QgsFeatureRenderer *renderer)', 'currentRule()', 'currentRuleChanged(const QModelIndex ¤t=QModelIndex(), const QModelIndex &previous=QModelIndex())', 'editRule()', 'editRule(const QModelIndex &index)', 'refineRule(int type)', 'refineRuleCategories()', 'refineRuleRanges()', 'refineRuleScales()', 'refineRuleScalesGui(const QModelIndexList &index)', 'removeRule()', 'restoreSectionWidths()', 'saveSectionWidth(int section, int oldSize, int newSize)', 'selectedRules()', 'selectedRulesChanged()', 'setRenderingOrder()'],
"QgsRunProcess": ['create(const QString &action, bool capture)', 'dialogGone()', 'processError(QProcess::ProcessError)', 'processExit(int, QProcess::ExitStatus)', 'stderrAvailable()', 'stdoutAvailable()'],
"QgsSVGFillSymbolLayer": ['QgsSVGFillSymbolLayer(const QByteArray &svgData, double width=20, double rotation=0.0)', 'create(const QgsStringMap &properties=QgsStringMap())', 'createFromSld(QDomElement &element)', 'patternWidth() const', 'patternWidthMapUnitScale() const', 'setPatternWidth(double width)', 'setPatternWidthMapUnitScale(const QgsMapUnitScale &scale)', 'setSvgFilePath(const QString &svgPath)', 'setSvgFillColor(const QColor &c)', 'setSvgStrokeColor(const QColor &c)', 'setSvgStrokeWidth(double w)', 'setSvgStrokeWidthMapUnitScale(const QgsMapUnitScale &scale)', 'svgFilePath() const', 'svgFillColor() const', 'svgStrokeColor() const', 'svgStrokeWidth() const', 'svgStrokeWidthMapUnitScale() const'],
"QgsSVGFillSymbolLayerWidget": ['insertIcons()'],
"QgsScopeLogger": ['QgsScopeLogger(const char *file, const char *func, int line)'],
"QgsSearchQueryBuilder": ['loadQuery()', 'saveQuery()'],
"QgsShadowEffectWidget": ['QgsShadowEffectWidget(QWidget *parent=nullptr)', 'create()'],
"QgsShapeburstFillSymbolLayer": ['QgsShapeburstFillSymbolLayer(const QColor &color=DEFAULT_SIMPLEFILL_COLOR, const QColor &color2=Qt::white, ShapeburstColorType colorType=SimpleTwoColor, int blurRadius=0, bool useWholeShape=true, double maxDistance=5)', 'ShapeburstColorType', 'create(const QgsStringMap &properties=QgsStringMap())', 'distanceMapUnitScale() const', 'offsetMapUnitScale() const', 'setDistanceMapUnitScale(const QgsMapUnitScale &scale)', 'setOffsetMapUnitScale(const QgsMapUnitScale &scale)'],
"QgsShapeburstFillSymbolLayerWidget": ['setColor(const QColor &color)', 'setColor2(const QColor &color)'],
"QgsSimpleFillSymbolLayer": ['QgsSimpleFillSymbolLayer(const QColor &color=DEFAULT_SIMPLEFILL_COLOR, Qt::BrushStyle style=DEFAULT_SIMPLEFILL_STYLE, const QColor &strokeColor=DEFAULT_SIMPLEFILL_BORDERCOLOR, Qt::PenStyle strokeStyle=DEFAULT_SIMPLEFILL_BORDERSTYLE, double strokeWidth=DEFAULT_SIMPLEFILL_BORDERWIDTH, Qt::PenJoinStyle penJoinStyle=DEFAULT_SIMPLEFILL_JOINSTYLE)', 'brushStyle() const', 'create(const QgsStringMap &properties=QgsStringMap())', 'createFromSld(QDomElement &element)', 'offset()', 'offsetMapUnitScale() const', 'penJoinStyle() const', 'setBrushStyle(Qt::BrushStyle style)', 'setOffset(QPointF offset)', 'setOffsetMapUnitScale(const QgsMapUnitScale &scale)', 'setPenJoinStyle(Qt::PenJoinStyle style)', 'setStrokeStyle(Qt::PenStyle strokeStyle)', 'setStrokeWidth(double strokeWidth)', 'setStrokeWidthMapUnitScale(const QgsMapUnitScale &scale)', 'strokeStyle() const', 'strokeWidth() const', 'strokeWidthMapUnitScale() const'],
"QgsSimpleFillSymbolLayerWidget": ['setColor(const QColor &color)', 'setStrokeColor(const QColor &color)'],
"QgsSimpleLineSymbolLayer": ['QgsSimpleLineSymbolLayer(const QColor &color=DEFAULT_SIMPLELINE_COLOR, double width=DEFAULT_SIMPLELINE_WIDTH, Qt::PenStyle penStyle=DEFAULT_SIMPLELINE_PENSTYLE)', 'create(const QgsStringMap &properties=QgsStringMap())', 'createFromSld(QDomElement &element)', 'customDashPatternMapUnitScale() const', 'customDashVector() const', 'drawInsidePolygon() const', 'penCapStyle() const', 'penJoinStyle() const', 'penStyle() const', 'setCustomDashPatternMapUnitScale(const QgsMapUnitScale &scale)', 'setCustomDashVector(const QVector< qreal > &vector)', 'setDrawInsidePolygon(bool drawInsidePolygon)', 'setPenCapStyle(Qt::PenCapStyle style)', 'setPenJoinStyle(Qt::PenJoinStyle style)', 'setPenStyle(Qt::PenStyle style)', 'setUseCustomDashPattern(bool b)', 'useCustomDashPattern() const'],
"QgsSimpleLineSymbolLayerWidget": ['updatePatternIcon()'],
"QgsSimpleMarkerSymbolLayerWidget": ['setColorFill(const QColor &color)', 'setColorStroke(const QColor &color)'],
"QgsSimplifyMethod": ['MethodType'],
"QgsSingleBandColorDataRenderer": ['QgsSingleBandColorDataRenderer(QgsRasterInterface *input, int band)', 'create(const QDomElement &elem, QgsRasterInterface *input)'],
"QgsSingleBandGrayRenderer": ['Gradient', 'QgsSingleBandGrayRenderer(QgsRasterInterface *input, int grayBand)', 'contrastEnhancement() const', 'create(const QDomElement &elem, QgsRasterInterface *input)', 'gradient() const', 'grayBand() const', 'setGradient(Gradient gradient)', 'setGrayBand(int band)'],
"QgsSingleBandGrayRendererWidget": ['QgsSingleBandGrayRendererWidget(QgsRasterLayer *layer, const QgsRectangle &extent=QgsRectangle())', 'create(QgsRasterLayer *layer, const QgsRectangle &extent)', 'setFromRenderer(const QgsRasterRenderer *r)'],
"QgsSingleBandPseudoColorRenderer": ['classificationMax() const', 'classificationMin() const', 'create(const QDomElement &elem, QgsRasterInterface *input)', 'setClassificationMax(double max)', 'setClassificationMin(double min)'],
"QgsSingleBandPseudoColorRendererWidget": ['QgsSingleBandPseudoColorRendererWidget(QgsRasterLayer *layer, const QgsRectangle &extent=QgsRectangle())', 'create(QgsRasterLayer *layer, const QgsRectangle &extent)', 'setFromRenderer(const QgsRasterRenderer *r)'],
"QgsSingleCategoryDiagramRenderer": ['setDiagramSettings(const QgsDiagramSettings &s)'],
"QgsSingleSymbolRenderer": ['QgsSingleSymbolRenderer(QgsSymbol *symbol)', 'createFromSld(QDomElement &element, QgsWkbTypes::GeometryType geomType)', 'setSymbol(QgsSymbol *s)', 'symbol() const'],
"QgsSingleSymbolRendererWidget": ['QgsSingleSymbolRendererWidget(QgsVectorLayer *layer, QgsStyle *style, QgsFeatureRenderer *renderer)', 'create(QgsVectorLayer *layer, QgsStyle *style, QgsFeatureRenderer *renderer)'],
"QgsSlider": ['setMaximum(const QVariant &max)', 'setMinimum(const QVariant &min)', 'setSingleStep(const QVariant &step)', 'setValue(const QVariant &value)', 'valueChanged(const QVariant &)', 'variantValue() const'],
"QgsSlopeFilter": ['QgsSlopeFilter(const QString &inputFile, const QString &outputFile, const QString &outputFormat)'],
"QgsSmartGroupCondition": ['QgsSmartGroupCondition(int id, QWidget *parent=nullptr)', 'destruct()', 'removed(int)'],
"QgsSmartGroupEditorDialog": ['QgsSmartGroupEditorDialog(QgsStyle *style, QWidget *parent=nullptr)'],
"QgsSnappingUtils": ['IndexingStrategy', 'mapSettings() const', 'snapToMap(const QgsPointXY &pointMap, QgsPointLocator::MatchFilter *filter=nullptr)'],
"QgsSpatialIndexCopyVisitor": ['QgsSpatialIndexCopyVisitor(SpatialIndex::ISpatialIndex *newIndex)'],
"QgsSpatialIndexData": ['QgsSpatialIndexData(const QgsSpatialIndexData &other)', 'initTree(IDataStream *inputStream=nullptr)'],
"QgsStyleExportImportDialog": ['Mode', 'QgsStyleExportImportDialog(QgsStyle *style, QWidget *parent=nullptr, Mode mode=Export)', 'browse()', 'doExportImport()', 'importTypeChanged(int)'],
"QgsStyleGroupSelectionDialog": ['QgsStyleGroupSelectionDialog(QgsStyle *style, QWidget *parent=nullptr)'],
"QgsStyleManagerDialog": ['QgsStyleManagerDialog(QgsStyle *style, QWidget *parent SIP_TRANSFERTHIS=nullptr)', 'addColorRamp(QAction *action)', 'addItem()', 'currentItemName()', 'currentItemType()', 'editColorRamp()', 'editItem()', 'editSymbol()', 'exportItems()', 'exportItemsPNG()', 'exportItemsSVG()', 'exportSelectedItemsImages(const QString &dir, const QString &format, QSize size)', 'groupChanged(const QModelIndex &)', 'groupRenamed(QStandardItem *)', 'importItems()', 'itemChanged(QStandardItem *item)', 'removeColorRamp()', 'removeItem()', 'removeSymbol()'],
"QgsSublayersDialog": ['ProviderType'],
"QgsSvgMarkerSymbolLayer": ['create(const QgsStringMap &properties=QgsStringMap())', 'createFromSld(QDomElement &element)', 'setStrokeWidth(double w)', 'setStrokeWidthMapUnitScale(const QgsMapUnitScale &scale)', 'strokeWidth() const', 'strokeWidthMapUnitScale() const'],
"QgsSvgMarkerSymbolLayerWidget": ['populateList()', 'setGuiForSvg(const QgsSvgMarkerSymbolLayer *layer)'],
"QgsSvgSelectorGroupsModel": ['QgsSvgSelectorGroupsModel(QObject *parent)'],
"QgsSvgSelectorWidget": ['currentSvgPath() const', 'populateList()', 'svgSelected(const QString &path)'],
"QgsSymbol": ['QgsSymbol(SymbolType type, const QgsSymbolLayerList &layers)', 'color() const', 'dump() const', 'layer() const', 'mapUnitScale() const', 'setColor(const QColor &color)', 'setMapUnitScale(const QgsMapUnitScale &scale)', 'toSld(QDomDocument &doc, QDomElement &element, QgsStringMap props) const', 'type() const'],
"QgsSymbolLayer": ['QgsSymbolLayer(QgsSymbol::SymbolType type, bool locked=false)', 'drawPreviewIcon(QgsSymbolRenderContext &context, QSize size)=0', 'isLocked() const', 'mapUnitScale() const', 'ogrFeatureStyle(double mmScaleFactor, double mapUnitScaleFactor) const', 'renderingPass() const', 'setLocked(bool locked)', 'setMapUnitScale(const QgsMapUnitScale &scale)', 'setRenderingPass(int renderingPass)', 'startRender(QgsSymbolRenderContext &context)=0', 'stopRender(QgsSymbolRenderContext &context)=0', 'toSld(QDomDocument &doc, QDomElement &element, const QgsStringMap &props) const', 'type() const'],
"QgsSymbolLayerAbstractMetadata": ['QgsSymbolLayerAbstractMetadata(const QString &name, const QString &visibleName, QgsSymbol::SymbolType type)', 'name() const', 'type() const', 'visibleName() const'],
"QgsSymbolLayerUtils": ['clearSymbolMap(QgsSymbolMap &symbols)', 'convertPolygonSymbolizerToPointMarker(QDomElement &element, QgsSymbolLayerList &layerList)', 'createDisplacementElement(QDomDocument &doc, QDomElement &element, QPointF offset)', 'createFillLayerFromSld(QDomElement &element)', 'createFunctionElement(QDomDocument &doc, QDomElement &element, const QString &function)', 'createGeometryElement(QDomDocument &doc, QDomElement &element, const QString &geomFunc)', 'createLineLayerFromSld(QDomElement &element)', 'createMarkerLayerFromSld(QDomElement &element)', 'createOnlineResourceElement(QDomDocument &doc, QDomElement &element, const QString &path, const QString &format)', 'createOpacityElement(QDomDocument &doc, QDomElement &element, const QString &alphaFunc)', 'createRotationElement(QDomDocument &doc, QDomElement &element, const QString &rotationFunc)', 'createSvgParameterElement(QDomDocument &doc, const QString &name, const QString &value)', 'createSymbolLayerListFromSld(QDomElement &element, QgsWkbTypes::GeometryType geomType, QgsSymbolLayerList &layers)', 'createVendorOptionElement(QDomDocument &doc, const QString &name, const QString &value)', 'decodeBlendMode(const QString &s)', 'decodeBrushStyle(const QString &str)', 'decodeColor(const QString &str)', 'decodeMapUnitScale(const QString &str)', 'decodePenCapStyle(const QString &str)', 'decodePenJoinStyle(const QString &str)', 'decodePenStyle(const QString &str)', 'decodeRealVector(const QString &s)', 'decodeScaleMethod(const QString &str)', 'decodeSldAlpha(const QString &str)', 'decodeSldBrushStyle(const QString &str)', 'decodeSldFontStyle(const QString &str)', 'decodeSldFontWeight(const QString &str)', 'decodeSldLineCapStyle(const QString &str)', 'decodeSldLineJoinStyle(const QString &str)', 'decodeSldRealVector(const QString &s)', 'displacementFromSldElement(QDomElement &element, QPointF &offset)', 'drawStippledBackground(QPainter *painter, QRect rect)', 'encodeBrushStyle(Qt::BrushStyle style)', 'encodeColor(const QColor &color)', 'encodeMapUnitScale(const QgsMapUnitScale &mapUnitScale)', 'encodePenCapStyle(Qt::PenCapStyle style)', 'encodePenJoinStyle(Qt::PenJoinStyle style)', 'encodePenStyle(Qt::PenStyle style)', 'encodeRealVector(const QVector< qreal > &v)', 'encodeScaleMethod(QgsSymbol::ScaleMethod scaleMethod)', 'encodeSldAlpha(int alpha)', 'encodeSldBrushStyle(Qt::BrushStyle style)', 'encodeSldFontStyle(QFont::Style style)', 'encodeSldFontWeight(int weight)', 'encodeSldLineCapStyle(Qt::PenCapStyle style)', 'encodeSldLineJoinStyle(Qt::PenJoinStyle style)', 'encodeSldRealVector(const QVector< qreal > &v)', 'externalGraphicFromSld(QDomElement &element, QString &path, QString &mime, QColor &color, double &size)', 'externalGraphicToSld(QDomDocument &doc, QDomElement &element, const QString &path, const QString &mime, const QColor &color, double size=-1)', 'externalMarkerFromSld(QDomElement &element, QString &path, QString &format, int &markIndex, QColor &color, double &size)', 'externalMarkerToSld(QDomDocument &doc, QDomElement &element, const QString &path, const QString &format, int *markIndex=nullptr, const QColor &color=QColor(), double size=-1)', 'fillFromSld(QDomElement &element, Qt::BrushStyle &brushStyle, QColor &color)', 'fillToSld(QDomDocument &doc, QDomElement &element, Qt::BrushStyle brushStyle, const QColor &color=QColor())', 'functionFromSldElement(QDomElement &element, QString &function)', 'geometryFromSldElement(QDomElement &element, QString &geomFunc)', 'getSvgParameterList(QDomElement &element)', 'getVendorOptionList(QDomElement &element)', 'hasExternalGraphic(QDomElement &element)', 'hasWellKnownMark(QDomElement &element)', 'labelTextToSld(QDomDocument &doc, QDomElement &element, const QString &label, const QFont &font, const QColor &color=QColor(), double size=-1)', 'lineFromSld(QDomElement &element, Qt::PenStyle &penStyle, QColor &color, double &width, Qt::PenJoinStyle *penJoinStyle=nullptr, Qt::PenCapStyle *penCapStyle=nullptr, QVector< qreal > *customDashPattern=nullptr, double *dashOffset=nullptr)', 'needEllipseMarker(QDomElement &element)', 'needFontMarker(QDomElement &element)', 'needLinePatternFill(QDomElement &element)', 'needMarkerLine(QDomElement &element)', 'needPointPatternFill(QDomElement &element)', 'needSvgFill(QDomElement &element)', 'needSvgMarker(QDomElement &element)', 'onlineResourceFromSldElement(QDomElement &element, QString &path, QString &format)', 'opacityFromSldElement(QDomElement &element, QString &alphaFunc)', 'parseProperties(QDomElement &element)', 'rotationFromSldElement(QDomElement &element, QString &rotationFunc)', 'saveProperties(QgsStringMap props, QDomDocument &doc, QDomElement &element)', 'wellKnownMarkerToSld(QDomDocument &doc, QDomElement &element, const QString &name, const QColor &color, const QColor &strokeColor, Qt::PenStyle strokeStyle, double strokeWidth=-1, double size=-1)'],
"QgsSymbolLayerWidget": ['setSymbolLayer(QgsSymbolLayer *layer)=0', 'symbolLayer()=0', 'updateDataDefinedProperty()'],
"QgsSymbolLevelItem": ['QgsSymbolLevelItem(QgsSymbol *symbol, int layer)', 'layer()', 'symbol()'],
"QgsSymbolLevelsDialog": ['setForceOrderingEnabled(bool enabled)'],
"QgsSymbolRenderContext": ['mapUnitScale() const', 'outputLineWidth(double width) const', 'outputPixelSize(double size) const', 'selected() const', 'setFeature(const QgsFeature *f)', 'setMapUnitScale(const QgsMapUnitScale &scale)', 'setSelected(bool selected)'],
"QgsSymbolSelectorDialog": ['addLayer()', 'currentLayer()', 'layerChanged()', 'loadSymbol()', 'lockLayer()', 'moveLayerByOffset(int offset)', 'moveLayerDown()', 'moveLayerUp()', 'removeLayer()', 'setWidget(QWidget *widget)', 'symbolModified()', 'updateLayerPreview()', 'updateLockButton()', 'updatePreview()', 'updateUi()'],
"QgsSymbolsListWidget": ['addSymbolToStyle()', 'changed()', 'clipFeaturesToggled(bool checked)', 'openStyleManager()', 'saveSymbol()', 'setLineWidth(double width)', 'setMarkerAngle(double angle)', 'setMarkerSize(double size)', 'setSymbolColor(const QColor &color)', 'setSymbolFromStyle(const QModelIndex &index)', 'symbolAddedToStyle(const QString &name, QgsSymbol *symbol)', 'updateDataDefinedLineWidth()', 'updateDataDefinedMarkerAngle()', 'updateDataDefinedMarkerSize()'],
"QgsTextDiagram": ['Orientation', 'Shape'],
"QgsTextEditConfigDlg": ['QgsTextEditConfigDlg(QgsVectorLayer *vl, int fieldIdx, QWidget *parent=nullptr)'],
"QgsTextEditWidgetFactory": ['QgsTextEditWidgetFactory(const QString &name)'],
"QgsTextEditWrapper": ['QgsTextEditWrapper(QgsVectorLayer *vl, int fieldIdx, QWidget *editor=nullptr, QWidget *parent=nullptr)'],
"QgsTotalCurvatureFilter": ['QgsTotalCurvatureFilter(const QString &inputFile, const QString &outputFile, const QString &outputFormat)'],
"QgsTransaction": ['QgsTransaction(const QString &connString)'],
"QgsTransformWidget": ['QgsTransformWidget(QWidget *parent=nullptr)', 'create()'],
"QgsUniqueValueWidgetFactory": ['QgsUniqueValueWidgetFactory(const QString &name)'],
"QgsUniqueValuesConfigDlg": ['QgsUniqueValuesConfigDlg(QgsVectorLayer *vl, int fieldIdx, QWidget *parent=nullptr)'],
"QgsUniqueValuesWidgetWrapper": ['QgsUniqueValuesWidgetWrapper(QgsVectorLayer *vl, int fieldIdx, QWidget *editor=nullptr, QWidget *parent=nullptr)'],
"QgsUnitSelectionWidget": ['changed()'],
"QgsUuidWidgetFactory": ['QgsUuidWidgetFactory(const QString &name)'],
"QgsUuidWidgetWrapper": ['QgsUuidWidgetWrapper(QgsVectorLayer *vl, int fieldIdx, QWidget *editor=nullptr, QWidget *parent=nullptr)'],
"QgsValueMapConfigDlg": ['QgsValueMapConfigDlg(QgsVectorLayer *vl, int fieldIdx, QWidget *parent)', 'updateMap(const QMap< QString, QVariant > &map, bool insertNull)'],
"QgsValueMapWidgetFactory": ['QgsValueMapWidgetFactory(const QString &name)'],
"QgsValueMapWidgetWrapper": ['QgsValueMapWidgetWrapper(QgsVectorLayer *vl, int fieldIdx, QWidget *editor=nullptr, QWidget *parent=nullptr)'],
"QgsValueRelationConfigDlg": ['QgsValueRelationConfigDlg(QgsVectorLayer *vl, int fieldIdx, QWidget *parent=nullptr)', 'editExpression()'],
"QgsValueRelationSearchWidgetWrapper": ['value() const'],
"QgsValueRelationWidgetFactory": ['QgsValueRelationWidgetFactory(const QString &name)'],
"QgsValueRelationWidgetWrapper": ['QgsValueRelationWidgetWrapper(QgsVectorLayer *vl, int fieldIdx, QWidget *editor=nullptr, QWidget *parent=nullptr)'],
"QgsVectorDataProvider": ['convertValue(QVariant::Type type, const QString &value)'],
"QgsVectorFieldSymbolLayer": ['AngleOrientation', 'AngleUnits', 'VectorFieldType', 'angleOrientation() const', 'angleUnits() const', 'create(const QgsStringMap &properties=QgsStringMap())', 'createFromSld(QDomElement &element)', 'distanceMapUnitScale() const', 'scale() const', 'setAngleOrientation(AngleOrientation orientation)', 'setAngleUnits(AngleUnits units)', 'setDistanceMapUnitScale(const QgsMapUnitScale &scale)', 'setScale(double s)', 'setVectorFieldType(VectorFieldType type)', 'setXAttribute(const QString &attribute)', 'setYAttribute(const QString &attribute)', 'vectorFieldType() const', 'xAttribute() const', 'yAttribute() const'],
"QgsVectorFileWriter": ['OptionType', 'SymbologyExport', 'WriterError', 'driverMetadata(const QString &driverName, MetaData &driverMetadata)', 'setSymbologyExport(QgsVectorFileWriter::SymbologyExport symExport)', 'symbologyExport() const'],
"QgsVectorFileWriter::BoolOption": ['BoolOption(const QString &docString, bool defaultValue)'],
"QgsVectorFileWriter::HiddenOption": ['HiddenOption(const QString &value)'],
"QgsVectorFileWriter::IntOption": ['IntOption(const QString &docString, int defaultValue)'],
"QgsVectorFileWriter::Option": ['Option(const QString &docString, QgsVectorFileWriter::OptionType type)'],
"QgsVectorFileWriter::SetOption": ['SetOption(const QString &docString, const QStringList &values, const QString &defaultValue, bool allowNone=false)'],
"QgsVectorFileWriter::StringOption": ['StringOption(const QString &docString, const QString &defaultValue=QString())'],
"QgsVectorLayer": ['diagramLayerSettings() const', 'diagramRenderer() const', 'setDiagramLayerSettings(const QgsDiagramLayerSettings &s)', 'vectorJoins() const'],
"QgsVectorLayerCache": ['QgsVectorLayerCache(QgsVectorLayer *layer, int cacheSize, QObject *parent=nullptr)'],
"QgsVectorLayerEditBuffer": ['QgsVectorLayerEditBuffer(QgsVectorLayer *layer)', 'attributeAdded(int idx)', 'attributeDeleted(int idx)', 'attributeValueChanged(QgsFeatureId fid, int idx, const QVariant &)', 'committedAttributeValuesChanges(const QString &layerId, const QgsChangedAttributesMap &changedAttributesValues)', 'committedAttributesAdded(const QString &layerId, const QList< QgsField > &addedAttributes)', 'committedFeaturesAdded(const QString &layerId, const QgsFeatureList &addedFeatures)', 'committedFeaturesRemoved(const QString &layerId, const QgsFeatureIds &deletedFeatureIds)', 'committedGeometriesChanges(const QString &layerId, const QgsGeometryMap &changedGeometries)', 'featureAdded(QgsFeatureId fid)', 'featureDeleted(QgsFeatureId fid)', 'undoIndexChanged(int index)', 'updateFields(QgsFields &fields)', 'updateLayerFields()'],
"QgsVectorLayerEditPassthrough": ['QgsVectorLayerEditPassthrough(QgsVectorLayer *layer)'],
"QgsVectorLayerEditUtils": ['QgsVectorLayerEditUtils(QgsVectorLayer *layer)'],
"QgsVectorLayerFeatureIterator": ['QgsVectorLayerFeatureIterator(QgsVectorLayerFeatureSource *source, bool ownSource, const QgsFeatureRequest &request)'],
"QgsVectorLayerJoinBuffer": ['QgsVectorLayerJoinBuffer(QgsVectorLayer *layer=nullptr)', 'vectorJoins() const'],
"QgsVectorLayerRenderer": ['QgsVectorLayerRenderer(QgsVectorLayer *layer, QgsRenderContext &context)'],
"QgsVectorLayerSelectionManager": ['QgsVectorLayerSelectionManager(QgsVectorLayer *layer, QObject *parent=nullptr)'],
"QgsVertexId": ['QgsVertexId(int _part=-1, int _ring=-1, int _vertex=-1, VertexType _type=SegmentVertex)', 'VertexType', 'isValid(const QgsAbstractGeometry *geom) const', 'partEqual(QgsVertexId o) const', 'ringEqual(QgsVertexId o) const', 'vertexEqual(QgsVertexId o) const'],
"QgsVertexMarker": ['QgsVertexMarker(QgsMapCanvas *mapCanvas)', 'setCenter(const QgsPointXY &point)', 'setIconSize(int iconSize)', 'setIconType(int iconType)', 'setPenWidth(int width)'],
"QgsWkbException": ['QgsWkbException(QString const &what)'],
"QgsWkbPtr": ['QgsWkbPtr(unsigned char *p, int size)', 'operator unsigned char *() const', 'operator+=(int n)', 'operator>>(QgsWkbTypes::Type &v) const', 'operator>>(char &v) const', 'operator>>(double &v) const', 'operator>>(float &r) const', 'operator>>(int &v) const', 'operator>>(unsigned int &v) const'],
"QgsXmlUtils": ['readRectangle(const QDomElement &element)', 'writeRectangle(const QgsRectangle &rect, QDomDocument &doc)'],
"QgsZipItem": ['QgsZipItem(QgsDataItem *parent, const QString &name, const QString &filePath, const QString &path)', 'QgsZipItem(QgsDataItem *parent, const QString &name, const QString &path)', 'getZipFileList()', 'iconZip()', 'vsiPrefix(const QString &uri)'],
"TriDecorator": ['TriDecorator(Triangulation *t)'],
"pal::CostCalculator": ['setPolygonCandidatesCost(int nblp, QList< LabelPosition * > &lPos, RTree< pal::FeaturePart *, double, 2, double > *obstacles, double bbx[4], double bby[4])'],
"pal::FeaturePart": ['FeaturePart(const FeaturePart &other)', 'addSizePenalty(int nbp, QList< LabelPosition *> &lPos, double bbx[4], double bby[4])', 'getLabelDistance() const', 'getLabelHeight() const', 'getLabelWidth() const'],
"pal::GeomFunction": ['cross_product(double x1, double y1, double x2, double y2, double x3, double y3)', 'dist_euc2d(double x1, double y1, double x2, double y2)', 'dist_euc2d_sq(double x1, double y1, double x2, double y2)', 'findLineCircleIntersection(double cx, double cy, double radius, double x1, double y1, double x2, double y2, double &xRes, double &yRes)'],
"pal::LabelInfo": ['LabelInfo(int num, double height, double maxinangle=20.0, double maxoutangle=-20.0)'],
"pal::LabelPosition": ['countFullOverlapCallback(LabelPosition *lp, void *ctx)', 'countOverlapCallback(LabelPosition *lp, void *ctx)', 'getHeight() const', 'getNextPart() const', 'getNumOverlaps() const', 'getPartId() const', 'getProblemFeatureId() const', 'getQuadrant() const', 'getReversed() const', 'getUpsideDown() const', 'getWidth() const', 'insertIntoIndex(RTree< LabelPosition *, double, 2, double > *index)', 'isInConflictMultiPart(LabelPosition *lp)', 'isInConflictSinglePart(LabelPosition *lp)', 'polygonObstacleCallback(pal::FeaturePart *obstacle, void *ctx)', 'removeFromIndex(RTree< LabelPosition *, double, 2, double > *index)', 'removeOverlapCallback(LabelPosition *lp, void *ctx)', 'resetNumOverlaps()', 'setNextPart(LabelPosition *next)', 'setPartId(int id)'],
"pal::Layer": ['LabelMode', 'UpsideDownLabels', 'displayAll() const'],
"pal::Pal": ['FnIsCanceled)(void *ctx)', 'solveProblem(Problem *prob, bool displayAll)'],
"pal::PointSet": ['PointSet(const PointSet &ps)', 'PointSet(double x, double y)', 'PointSet(int nbPoints, double *x, double *y)', 'compute_chull_bbox()', 'createGeosGeom() const', 'deleteCoords()', 'extractShape(int nbPtSh, int imin, int imax, int fps, int fpe, double fptx, double fpty)', 'getBoundingBox(double min[2], double max[2]) const', 'getCentroid(double &px, double &py, bool forceInside=false) const', 'getGeosType() const', 'getNumPoints() const', 'invalidateGeos()', 'preparedGeom() const'],
"pal::PolygonCostCalculator": ['PolygonCostCalculator(LabelPosition *lp)', 'getCost()', 'getLabel()', 'update(pal::PointSet *pset)'],
"pal::PriorityQueue": ['decreaseKey(int key)', 'downheap(int id)', 'getBest()', 'getId(int key)', 'getSize()', 'getSizeByPos()', 'insert(int key, double p)', 'isIn(int key)', 'print()', 'remove(int key)', 'setPriority(int key, double new_p)', 'sort()', 'upheap(int key)'],
"pal::Problem": ['compareLabelArea(pal::LabelPosition *l1, pal::LabelPosition *l2)', 'compute_feature_cost(SubPart *part, int feat_id, int label_id, int *nbOverlap)', 'compute_subsolution_cost(SubPart *part, int *s, int *nbOverlap)', 'getFeatureCandidate(int fi, int ci)', 'getFeatureCandidateCount(int i)', 'getNumFeatures()', 'getSolution(bool returnInactive)', 'getStats()', 'init_sol_falp()', 'initialization()', 'popmusic_tabu(SubPart *part)', 'reduce()', 'subPart(int r, int featseed, int *isIn)'],
"pal::Util": ['unmulti(const GEOSGeometry *the_geom)'],
"QgsGeometryLineIntersectionCheck": ["QgsGeometryLineIntersectionCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)", "factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()"],
"QgsGeometryDangleCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryDangleCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometryDegeneratePolygonCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryDegeneratePolygonCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometryCheckerUtils": ["lineIntersections(const QgsLineString *line1, const QgsLineString *line2, double tol)", "filter1DTypes(QgsAbstractGeometry *geom)", "canDeleteVertex(const QgsAbstractGeometry *geom, int iPart, int iRing)", "polygonRings(const QgsPolygon *polygon)", "sharedEdgeLength(const QgsAbstractGeometry *geom1, const QgsAbstractGeometry *geom2, double tol)", "pointOnLine(const QgsPoint &p, const QgsLineString *line, double tol, bool excludeExtremities=false)", "getGeomPart(QgsAbstractGeometry *geom, int partIdx)", "getGeomPart(const QgsAbstractGeometry *geom, int partIdx)", "createGeomEngine(const QgsAbstractGeometry *geometry, double tolerance)"],
"QgsGeometryContainedCheckError": ["QgsGeometryContainedCheckError(const QgsGeometryCheck *check, const QgsGeometryCheckerUtils::LayerFeature &layerFeature, const QgsPointXY &errorLocation, const QgsGeometryCheckerUtils::LayerFeature &containingFeature)", "containingFeature() const"],
"QgsGeometryHoleCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryHoleCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometryDuplicateCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryDuplicateCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometrySelfContactCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometrySelfContactCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometryTypeCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryTypeCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration, int allowedTypes)"],
"QgsGeometryDuplicateCheckError": ["QgsGeometryDuplicateCheckError(const QgsGeometryCheck *check, const QgsGeometryCheckerUtils::LayerFeature &layerFeature, const QgsPointXY &errorLocation, const QMap< QString, QgsFeaturePool * > &featurePools, const QMap< QString, QList< QgsFeatureId >> &duplicates)", "duplicates() const"],
"QgsGeometryLineLayerIntersectionCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryLineLayerIntersectionCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometrySegmentLengthCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometrySegmentLengthCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometryTypeCheckError": ["QgsGeometryTypeCheckError(const QgsSingleGeometryCheck *check, const QgsGeometry &geometry, const QgsGeometry &errorLocation, QgsWkbTypes::Type flatType)"],
"QgsGeometryAngleCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryAngleCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometrySliverPolygonCheck": ["factoryDescription()", "factoryId()", "QgsGeometrySliverPolygonCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometryAreaCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryAreaCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometryChecker": [],
"QgsGeometryFollowBoundariesCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryFollowBoundariesCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration, QgsVectorLayer *checkLayer)"],
"QgsGeometryMultipartCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryMultipartCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometrySelfIntersectionCheckError": ["QgsGeometrySelfIntersectionCheckError(const QgsSingleGeometryCheck *check, const QgsGeometry &geometry, const QgsGeometry &errorLocation, QgsVertexId vertexId, const QgsGeometryUtils::SelfIntersection &intersection)", "intersection() const"],
"QgsGeometryGapCheckError": ["QgsGeometrySelfIntersectionCheckError(const QgsSingleGeometryCheck *check, const QgsGeometry &geometry, const QgsGeometry &errorLocation, QgsVertexId vertexId, const QgsGeometryUtils::SelfIntersection &intersection)", "intersection() const"],
"QgsGeometryPointCoveredByLineCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryPointCoveredByLineCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometrySelfIntersectionCheck": ["QgsGeometrySelfIntersectionCheck(const QgsGeometryCheckContext *context, const QVariantMap &configuration=QVariantMap())", "ResolutionMethod"],
"QgsGeometryContainedCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryContainedCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometryPointInPolygonCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryPointInPolygonCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometryDuplicateNodesCheck": ["factoryDescription()", "factoryId()", "factoryCompatibleGeometryTypes()", "factoryIsCompatible(QgsVectorLayer *layer)", "ResolutionMethod", "factoryCheckType()", "QgsGeometryDuplicateNodesCheck(QgsGeometryCheckContext *context, const QVariantMap &configuration)"],
"QgsGeometryChecker": ["getContext() const", "featurePools() const", "setMergeAttributeIndices(const QMap< QString, int > &mergeAttributeIndices)", "progressValue(int value)", "getChecks() const", "errorAdded(QgsGeometryCheckError *error)", "QgsGeometryChecker(const QList< QgsGeometryCheck * > &checks, QgsGeometryCheckContext *context, const QMap< QString, QgsFeaturePool * > &featurePools)", "fixError(QgsGeometryCheckError *error, int method, bool triggerRepaint=false)", "errorUpdated(QgsGeometryCheckError *error, bool statusChanged)", "execute(int *totalSteps=nullptr)", "getMessages() const"]
}
ACCEPTABLE_MISSING_ADDED_NOTE = [
"QgsDerivativeFilter",
"QgsLayerTreeEmbeddedConfigWidget",
"QgsCptCityDirectoryItem",
"QgsDataDefinedRotationDialog",
"QgsAttributeTypeLoadDialog",
"QgsProject",
"QgsDirectoryParamWidget",
"QgsLegendModel",
"QgsLayerTreeNode",
"QgsSlopeFilter",
"QgsPointLocator_VisitorArea",
"pal::InternalException::Full",
"QgsPluginLayer",
"QgsGeometryValidator",
"QgsLineSymbol",
"QgsFeatureListViewDelegate",
"QgsHighlight",
"pal::Sol",
"QgsServer",
"QgsOSMXmlImport",
"QgsDiagramLabelFeature",
"QgsPanelWidgetStack",
"QgsMapUnitScaleDialog",
"QgsDoubleSpinBox",
"QgsSingleBandPseudoColorRenderer",
"QgsRasterLayerSaveAsDialog",
"QgsSvgSelectorDialog",
"QgsRendererRulePropsWidget",
"QgsErrorItem",
"QgsCheckboxWidgetFactory",
"QgsComposerLayerItem",
"Triangulation",
"QgsGlowWidget",
"QgsVectorFileWriter::Option",
"QgsColorSwatchDelegate",
"QgsTextEditWrapper",
"QgsCaseInsensitiveLexerSQL",
"QgsServerInterface",
"QgsAuthMethodPlugins",
"QgsMapSettings",
"QgsLayerDefinition",
"pal::InternalException::WrongGeometry",
"QgsRasterDrawer",
"QgsOgcUtils",
"QgsMapLayerStyleManagerWidget",
"QgsCoordinateTransformCache",
"QgsRangeWidgetWrapper",
"QgsFeatureIteratorDataStream",
"QgsSimpleLineSymbolLayerWidget",
"QgsContrastEnhancement",
"QgsHillshadeFilter",
"QgsCachedFeatureIterator",
"QgsAuthCrypto",
"QgsAuthSslErrorsDialog",
"QgsRendererRegistry",
"QgsExpression",
"Node",
"QgsFeatureListModel",
"QgsAuthMethodMetadata",
"QgsComposerScaleBar",
"QgsExpression::Function",
"QgsMultiBandColorRendererWidget",
"QgsFeatureRequest::OrderByClause",
"QgsColorWidgetAction",
"Bezier3D",
"QgsSQLStatement::NodeJoin",
"QgsPoint",
"QgsSLConnect",
"QgsConditionalStyle",
"QgsExpression::NodeList",
"QgsSingleSymbolRendererWidget",
"QgsDbFilterProxyModel",
"QgsRasterShaderFunction",
"QgsBrushStyleComboBox",
"QgsSVGFillSymbolLayer",
"QgsPluginLayerRegistry",
"pal::PalException::UnknownFeature",
"QgsMapLayerActionRegistry",
"QgsMapToolIdentify",
"QgsTableWidgetItem",
"QgsFontMarkerSymbolLayerWidget",
"QgsLabel",
"QgsColorSwatchGridAction",
"QgsRelation",
"QgsVectorGradientColorRampDialog",
"QgsSQLStatement::NodeBetweenOperator",
"QgsScaleBarStyle",
"QgsMapToolIdentifyFeature",
"QgsMultiBandColorRenderer",
"QgsGraphBuilderInterface",
"QgsComposerHtml",
"QgsRasterPyramidsOptionsWidget",
"QgsColorRampShader",
"QgsMapUnitScale",
"pal::Util",
"QgsGpsdConnection",
"pal::PointSet",
"QgsDateTimeEditWrapper",
"NormVecDecorator",
"Qgis",
"QgsDetailedItemWidget",
"QgsDataDefinedSizeDialog",
"QgsDxfExport",
"QgsIdentifyMenu",
"QgsRendererRange",
"QgsDiagramSettings",
"QgsDataItemProviderRegistry",
"QgsSymbolSelectorDialog",
"QgsComposerFrame",
"QgsFontMarkerSymbolLayer",
"QgsSpinBox",
"QgsAction",
"QgsSQLComposerDialog::SQLValidatorCallback",
"QgsGpsDetector",
"QgsExpressionHighlighter",
"QgsMapToPixelSimplifier",
"QgsVectorColorBrewerColorRampDialog",
"QgsCptCityAllRampsItem",
"QgsOSMWay",
"QgsLabelingEngineInterface",
"QgsEditorConfigWidget",
"QgsDataProvider",
"QgsLayerTreeView",
"QgsActionManager",
"QgsSQLStatement::NodeColumnRef",
"QgsLocaleNumC",
"QgsRasterChecker",
"QgsNumericScaleBarStyle",
"QgsVectorLayerUndoCommandDeleteFeature",
"QgsTextEditConfigDlg",
"QgsOgcUtilsExprToFilter",
"QgsOSMNode",
"QgsAuthSslConfigWidget",
"QgsPixmapLabel",
"QgsAuthConfigIdEdit",
"QgsVectorLayerTools",
"QgsBlendModeComboBox",
"QgsConstWkbSimplifierPtr",
"QgsRelief",
"QgsFeature",
"QgsProjectBadLayerHandler",
"QgsCptCityCollectionItem",
"QgsGridFileWriter",
"QgsVectorLayerUndoCommandDeleteAttribute",
"QgsWebView",
"QgsAbstractCacheIndex",
"QgsIFeatureSelectionManager",
"QgsInvertedPolygonRenderer",
"QgsSQLComposerDialog",
"QgsNetworkAccessManager",
"QgsLayerPropertiesWidget",
"QgsRasterRendererRegistry",
"QgsRasterRendererWidget",
"pal::PalException::FeatureExists",
"QgsAttributeTableFilterModel",
"QgsComposerView",
"QgsVectorFileWriter::SetOption",
"QgsLinearMinMaxEnhancementWithClip",
"QgsMapCanvas::CanvasProperties",
"QgsLabelFeature",
"QgsSymbolLayer",
"QgsMapRendererJob",
"QgsQueryBuilder",
"QgsSQLStatement::Visitor",
"QgsCentroidFillSymbolLayer",
"QgsTreeWidgetItem",
"QgsSvgMarkerSymbolLayerWidget",
"QgsSingleCategoryDiagramRenderer",
"QgsSurface",
"QgsOgcUtilsSQLStatementToFilter",
"QgsPalettedRendererWidget",
"QgsLabelSearchTree",
"QgsSQLStatement::NodeTableDef",
"QgsComposerUtils",
"QgsRasterCalcNode",
"QgsTextEditWidgetFactory",
"QgsUniqueValueWidgetFactory",
"QgsMapLayerAction",
"QgsNumericSortTreeWidgetItem",
"QgsPkiBundle",
"QgsVectorLayerLabelProvider",
"QgsTextAnnotationItem",
"QgsPluginManagerInterface",
"QgsFeatureListView",
"QgsOSMDownload",
"QgsAdvancedDigitizingDockWidget::CadConstraint",
"QgsSimpleMarkerSymbolLayerWidget",
"QgsTolerance",
"QgsRasterLayer",
"QgsComposerObject",
"QgsMapHitTest",
"QgsLegendRenderer",
"QgsColorWidgetFactory",
"QgsWidgetWrapper",
"QgsRangeWidgetFactory",
"QgsPointCompare",
"QgsComposerAttributeTableCompareV2",
"QgsCptCityColorRamp",
"QgsAttributeEditorContext",
"QgsSymbologyConversion",
"QgsAttributeEditorRelation",
"QgsPointDisplacementRendererWidget",
"QgsOSMDatabase",
"QgsAbstractGeometrySimplifier",
"QgsComposerMapItem",
"QgsProviderExtentCalcEvent",
"pal::PolygonCostCalculator",
"pal::Pal",
"QgsMapRendererCustomPainterJob",
"QgsExternalResourceWidget",
"QgsRasterShader",
"QgsOverlayAnalyzer",
"QgsComposerLegend",
"QgsCachedFeatureWriterIterator",
"QgsComposerTextTable",
"QgsNineCellFilter",
"QgsSymbolLayerAbstractMetadata",
"QgsPointLocator",
"QgsAuthCertInfo",
"QgsAttributeTableMapLayerAction",
"QgsFeatureSelectionDlg",
"QgsAuthConfigEdit",
"QgsAuthMethodConfig",
"QgsVectorLayerUndoCommandAddFeature",
"QgsAuthConfigEditor",
"QgsCharacterSelectorDialog",
"QgsSpatialIndex",
"QgsAbstractFeatureIterator",
"QgsMapCanvasMap",
"QgsShapeburstFillSymbolLayer",
"QgsVectorLayerJoinBuffer",
"QgsTextLabelFeature",
"QgsPkiConfigBundle",
"QgsCredentialsNone",
"QgsPenCapStyleComboBox",
"QgsAuthConfigSelect",
"QgsFeatureRequest",
"QgsExpression::NodeInOperator",
"QgsTotalCurvatureFilter",
"QgsGeometry",
"QgsDummyConfigDlg",
"QgsBrowserModel",
"QgsRenderContext",
"QgsSingleBandGrayRendererWidget",
"QgsAttributeEditorElement",
"QgsAbstractLabelProvider",
"QgsSQLStatement::NodeInOperator",
"QgsComposerGroupItem",
"QgsLineSymbolLayer",
"QgsSublayersDialog",
"QgsMapOverviewCanvas",
"QgsRasterRenderer",
"QgsRelationReferenceFactory",
"QgsContrastEnhancementFunction",
"QgsMarkerSymbolLayer",
"QgsGroupWmsDataDialog",
"QgsVectorLayerInterruptionCheckerDuringCountSymbolFeatures",
"QgsSymbolRenderContext",
"QgsValueRelationSearchWidgetWrapper",
"DualEdgeTriangulation",
"QgsRasterFileWriter",
"QgsGraphDirector",
"QgsTransectSample",
"QgsFeatureRenderer",
"QgsExpressionBuilderDialog",
"QgsMapLayer",
"QgsOptionsDialogBase",
"QgsGraphArc",
"QgsMapCanvasLayer",
"QgsErrorMessage",
"QgsAuthImportIdentityDialog",
"QgsExpressionItem",
"pal::LabelInfo",
"QgsStatisticalSummary",
"pal::PalException::ValueNotInRange",
"QgsRendererAbstractMetadata",
"QgsLegendModel",
"QgsSQLStatement::NodeLiteral",
"QgsEnumerationWidgetWrapper",
"pal::PalStat",
"QgsRuleBasedRendererWidget",
"QgsOfflineEditing",
"GEOSGeomScopedPtr",
"QgsIDWInterpolator",
"QgsPointLocator_VisitorEdgesInRect",
"QgsEffectStack",
"QgsRasterTransparencyWidget",
"QgsRasterPipe",
"QgsSvgSelectorWidget",
"QgsEditorWidgetFactory",
"QgsExpressionContextScope",
"QgsVectorLayerSimpleLabeling",
"QgsLayerTreeEmbeddedWidgetRegistry",
"QgsOSMTags",
"QgsScaleUtils",
"QgsDataCollectionItem",
"QgsFeatureSelectionModel",
"QgsSimpleMarkerSymbolLayer",
"QgsRasterProjector",
"QgsGradientFillSymbolLayer",
"QgsAuthSslConfigDialog",
"QgsEditorWidgetWrapper",
"QgsDrawSourceWidget",
"QgsOWSSourceSelect",
"QgsException",
"QgsAttributeTableDelegate",
"QgsVectorLayerDiagramProvider",
"QgsVectorLayerUndoCommandAddAttribute",
"QgsDialog",
"QgsAuthSslImportDialog",
"QgsVectorColorRamp",
"QgsCustomLayerOrderWidget",
"QgsStyleGroupSelectionDialog",
"QgsAdvancedDigitizingCanvasItem",
"QgsComposerItemGroup",
"pal::CostCalculator",
"QgsAuthCertTrustPolicyComboBox",
"QgsExtentGroupBox",
"QgsValueRelationConfigDlg",
"QgsVectorLayerImport",
"QgsPenJoinStyleComboBox",
"pal::InternalException::NoLabelPosition",
"QgsDateTimeStatisticalSummary",
"QgsNewVectorLayerDialog",
"QgsLinearMinMaxEnhancement",
"QgsComposerModel",
"QgsSymbolLayerUtils",
"QgsConstWkbPtr",
"QgsLongLongValidator",
"QgsSimpleFillSymbolLayerWidget",
"QgsCptCityArchive",
"QgsComposerArrow",
"QgsAttributeFormLegacyInterface",
"QgsProjectItem",
"QgsMapToolEmitPoint",
"QgsClassificationWidgetWrapperFactory",
"QgsCollapsibleGroupBox",
"Qgs25DRendererWidget",
"QgsExpression::NodeColumnRef",
"QgsComposition",
"QgsMapCanvas",
"QgsLayerTreeEmbeddedWidgetProvider",
"QgsPaintEffect",
"QgsHiddenWidgetFactory",
"QgsLinePatternFillSymbolLayerWidget",
"QgsComposerLegendItem",
"QgsQtLocationConnection",
"QgsUnitSelectionWidget",
"QgsMapTip",
"QgsGraphBuilder",
"QgsMarkerSymbol",
"QgsDiagramRenderer",
"QgsExpression::NodeLiteral",
"QgsNewGeoPackageLayerDialog",
"pal::InternalException::UnknownGeometry",
"QgsVectorColorBrewerColorRamp",
"QgsProperty",
"QgsDoubleBoxScaleBarStyle",
"QgsPanelWidgetWrapper",
"QgsBusyIndicatorDialog",
"QgsSymbolSelectorWidget",
"QgsPieDiagram",
"QgsInternalGeometryEngine",
"QgsMasterPasswordResetDialog",
"QgsVectorFileWriter::StringOption",
"QgsComposerItem",
"QgsSvgAnnotationItem",
"QgsOSMElement",
"QgsSvgCacheEntry",
"QgsCptCityBrowserModel",
"QgsPointLocator_VisitorNearestVertex",
"QgsCheckboxWidgetWrapper",
"QgsAuthServersEditor",
"QgsGradientFillSymbolLayerWidget",
"QgsNetworkDiskCache",
"QgsFillSymbolLayer",
"QgsRendererRulePropsDialog",
"QgsSymbolLevelsDialog",
"QWebPage",
"QgsPointLocator_VisitorNearestEdge",
"QgsStringStatisticalSummary",
"QgsLabelComponent",
"QgsDateTimeEditFactory",
"CharacterWidget",
"QgsAdvancedDigitizingDockWidget",
"QgsColorEffectWidget",
"QgsCsException",
"QgsMapLayerStyleManager",
"QgsZipItem",
"QgsComposerMultiFrameCommand",
"QgsAttributeEditorField",
"QgsMessageOutput",
"QgsSQLStatement::NodeCast",
"QgsCoordinateReferenceSystem",
"QgsLayerTreeModelLegendNode",
"QgsScaleVisibilityDialog",
"QgsTransactionGroup",
"QgsDistanceArea",
"QgsLayerTreeGroup",
"QgsMapToolPan",
"QgsVectorLayerFeatureIterator",
"QgsDxfPaintEngine",
"QgsSvgMarkerSymbolLayer",
"QgsCategorizedSymbolRendererWidget",
"QgsAuthConfigSslServer",
"QgsBrightnessContrastFilter",
"QgsMultiRenderChecker",
"QgsLabelSorter",
"QgsComposerShape",
"QgsLayerItem",
"QgsAuthCertEditors",
"QgsMapRendererParallelJob",
"QgsRasterCalculator",
"QgsStyle",
"QgsLegacyHelpers",
"QgsRuleBasedRenderer",
"QgsDrawSourceEffect",
"QgsLayerTreeLayer",
"pal::PalException::NotImplemented",
"QgsDxfLabelProvider",
"QgsCptCitySelectionItem",
"QgsRendererCategory",
"QgsDataItem",
"QgsComposerAttributeTableColumnModelV2",
"QgsSQLStatement::NodeSelect",
"QgsSymbolsListWidget",
"QgsGraduatedSymbolRenderer",
"QgsRasterPyramid",
"QgsAnimatedIcon",
"QgsDataItemProvider",
"QgsScaleRangeWidget",
"QgsEncodingFileDialog",
"QgsProviderCountCalcEvent",
"QgsDxfPaintDevice",
"QgsRasterInterface",
"QgsExternalResourceWidgetFactory",
"QgsAuthIdentitiesEditor",
"QgsValueMapConfigDlg",
"QgsAbstractFeatureIteratorFromSource",
"QgsDirectoryItem",
"QgsRelationEditorWidget",
"QgsColorBrewerPalette",
"QgsTextDiagram",
"QgsRuleBasedRendererModel",
"QgsExpression::NodeFunction",
"QgsAuthCertInfoDialog",
"QgsColorWidgetWrapper",
"QgsPenStyleComboBox",
"QgsNewHttpConnection",
"QgsExpression::NodeCondition",
"QgsValueMapWidgetWrapper",
"QgsZonalStatistics",
"QgsRasterRange",
"QgsSymbol",
"QgsLegendInterface",
"QgsTopologyPreservingSimplifier",
"QgsInterpolator",
"QgsVirtualLayerDefinition::SourceLayer",
"QgsScaleComboBox",
"QgsMessageLogConsole",
"QgsMessageOutputConsole",
"QgsImageOperation",
"QgsRasterIdentifyResult",
"QgsMapThemeCollection::PresetRecord",
"QgsRunProcess",
"QgsRasterIterator",
"QgsExpression::StaticFunction",
"QgsProjectionSelector",
"QgsProjectVersion",
"QgsFieldValidator",
"QgsRasterFillSymbolLayerWidget",
"QgsUserInputDockWidget",
"QgsLabelAttributes",
"QgsAuthCertUtils",
"QgsSymbolLayerRegistry",
"QgsSlider",
"QgsColorSchemeList",
"QgsLayerTreeModel",
"QgsCategorizedSymbolRenderer",
"QgsLinearlyInterpolatedDiagramRenderer",
"QgsOwsConnection",
"QgsGenericProjectionSelector",
"QgsColorSchemeModel",
"QgsBlurWidget",
"QgsSQLStatement::NodeColumnSorted",
"QgsNewMemoryLayerDialog",
"QgsFileDropEdit",
"QgsRelationReferenceConfigDlg",
"QgsComposerMergeCommand",
"QgsApplication",
"QgsDateTimeEdit",
"QgsDxfRuleBasedLabelProvider",
"Line3D",
"QgsAuthGuiUtils",
"QgsVectorLayerRendererInterruptionChecker",
"pal::Problem",
"QgsComposerStyleItem",
"QgsValueMapSearchWidgetWrapper",
"QgsPalettedRasterRenderer",
"QgsArcProperter",
"QgsWkbException",
"QgsRuntimeProfiler",
"QgsSvgSelectorGroupsModel",
"QgsSingleBandGrayRenderer",
"QgsCubicRasterResampler",
"QgsEllipseSymbolLayerWidget",
"QgsField",
"QgsCheckBoxConfigDlg",
"ParametricLine",
"QgsDiagram",
"QgsCredentialsConsole",
"QgsLogger",
"QgsEllipseSymbolLayer",
"QgsMapServiceException",
"pal::Feats",
"QgsArrowSymbolLayerWidget",
"QgsVectorLayerEditBuffer",
"QgsEditFormConfig",
"QgsEnumerationWidgetFactory",
"QgsGraphVertex",
"QgsVector",
"QgsComposerTable",
"QgsDataDefinedWidthDialog",
"QgsRectangle",
"QgsSQLStatement::Node",
"QgsTransaction",
"QgsComposerTableColumn",
"QgsMarkerLineSymbolLayerWidget",
"QgsHttpTransaction",
"QgsImageFillSymbolLayer",
"QgsBilinearRasterResampler",
"QgsNetworkReplyParser",
"QgsRenderChecker",
"QgsFeatureIterator",
"pal::LabelPosition",
"QgsExpressionSelectionDialog",
"QgsAuthImportCertDialog",
"QgsSymbolLayerWidget",
"QgsColorDialog",
"QgsLabelPosition",
"QgsDiagramLayerSettings",
"QgsAnnotationItem",
"QgsStyleExportImportDialog",
"QgsHeatmapRendererWidget",
"QgsLabelCandidate",
"QgsLegendSettings",
"QgsColorRampComboBox",
"QgisPlugin",
"QgsRasterResampleFilter",
"QgsMapLayerStyle",
"QgsCoordinateTransform",
"QgsSizeScaleWidget",
"QgsPluginLayerType",
"QgsClipper",
"pal::GeomFunction",
"QgsMapToolZoom",
"QgsLUDialog",
"QgsScopeLogger",
"QgsMapMouseEvent",
"QgsMapToolEdit",
"QgsTicksScaleBarStyle",
"QgsFavouritesItem",
"QgsMapRendererQImageJob",
"QgsGraphAnalyzer",
"QgsMessageLog",
"QgsExpressionContext",
"QgsSingleSymbolRenderer",
"QgsMapRenderer",
"QgsPointLocator_Stream",
"QgsGroupBoxCollapseButton",
"QgsCredentials",
"QgsSnappingUtils",
"QgsDistanceArcProperter",
"QgsVectorLayerEditUtils",
"QgsHistogramDiagram",
"QgsVectorFileWriter",
"QgsDateTimeEditConfig",
"QgsUuidWidgetFactory",
"QgsMapRendererSequentialJob",
"QgsLinePatternFillSymbolLayer",
"QgsPointSample",
"QgsObjectCustomProperties",
"QgsRelationReferenceWidgetWrapper",
"QgsMapToolAdvancedDigitizing",
"QgsValueRelationWidgetFactory",
"QgsGraduatedSymbolRendererWidget",
"QgsComposerItemCommand",
"QgsBrowserWatcher",
"pal::PalException",
"QgsIndexedFeature",
"QgsPropertyKey",
"QgsMessageViewer",
"QgsShadowEffectWidget",
"QgsEditorWidgetRegistry",
"QgsColorSwatchGrid",
"QgsDataDefinedValueDialog",
"QgsGmlStreamingParser::LayerProperties",
"QgsSmartGroupCondition",
"QgsAuthEditorWidgets",
"QgsFileWidget",
"QgsFeatureModel",
"QgsExpressionItemSearchProxy",
"QgsRuleBasedLabeling",
"QgsRendererMetadata",
"QgsExpressionBuilderWidget",
"pal::Layer",
"QgsSingleBandColorDataRenderer",
"QgsMapCanvasItem",
"QgsGmlFeatureClass",
"QgsVectorFieldSymbolLayer",
"HalfEdge",
"QgsAuthMethodRegistry",
"QgsGraph",
"QgsSingleBoxScaleBarStyle",
"pal::InternalException",
"QgsPalLayerSettings",
"TriangleInterpolator",
"QgsCptCityColorRampItem",
"QgsPaintEngineHack",
"QgsLineVectorLayerDirector",
"QgsSVGFillSymbolLayerWidget",
"QgsGradientStop",
"QgsDualView",
"QgsSpatialIndexCopyVisitor",
"QgsVectorGradientColorRamp",
"QgsNMEAConnection",
"QgsAttributeDialog",
"QgsProjectBadLayerGuiHandler",
"QgsComposerMouseHandles",
"QgsAuthCertManager",
"QgsGeometryCache",
"QgsAccessControlFilter",
"QgsVectorDataProvider",
"QgsSQLStatement::RecursiveVisitor",
"QgsAuthTrustedCAsDialog",
"pal::FeaturePart",
"QgsError",
"QgsComposerTableSortColumnsProxyModel",
"QgsRasterHistogramWidget",
"QgsOSMNodeIterator",
"pal::PalException::LayerExists",
"QgsMessageBar",
"QgsUuidWidgetWrapper",
"QgsExpression::Node",
"QgsUniqueValuesWidgetWrapper",
"pal::InternalException::Empty",
"QgisVisitor",
"QgsVectorLayerUndoCommandChangeAttribute",
"QgsFormAnnotationItem",
"QgsConnectionPool",
"QgsGpsConnectionRegistry",
"QgsExpression::NodeBinaryOperator",
"QgsAttributeEditor",
"QgsSingleBandPseudoColorRendererWidget",
"QgsVectorLayerUndoCommandChangeGeometry",
"QgsGenericFeatureSelectionManager",
"QgsExternalResourceConfigDlg",
"QgsWkbPtr",
"QgsSymbolLayerMetadata",
"QgsDiagramInterpolationSettings",
"QgsRasterBandStats",
"QgsSQLStatement::NodeBinaryOperator",
"QgsPanelWidget",
"QgsRasterResampler",
"QgsRelationManager",
"QgsGeometry::Error",
"QgsMessageLogViewer",
"QgsVectorLayer",
"QgsSpatialIndexData",
"QgsComposerRuler",
"QgsPointPatternFillSymbolLayerWidget",
"QgsCptCityDataItem",
"QgsRandomColors",
"QgsGeometryAnalyzer",
"QgsVectorRandomColorRamp",
"QgsComposerMultiFrame",
"QgsValueMapWidgetFactory",
"QgsXmlUtils",
"QgsAlignRaster",
"QgsVectorFileWriter::BoolOption",
"QgsPalLabeling",
"QgsRendererPropertiesDialog",
"QgsMessageBarItem",
"QgsRasterMinMaxWidget",
"QgsServerFilter",
"QgsVectorRandomColorRampDialog",
"QgsAttributeTableAction",
"QgsMarkerLineSymbolLayer",
"QgsHtmlAnnotationItem",
"QgsVectorLayerFeatureSource",
"QgsAtlasComposition",
"QgsSQLComposerDialog::TableSelectedCallback",
"QgsConnectionPoolGroup",
"QgisInterface",
"QgsFillSymbol",
"QgsPointPatternFillSymbolLayer",
"QgsComposerMap",
"QgsSourceSelectItemDelegate",
"LinTriangleInterpolator",
"QgsDefaultSearchWidgetWrapper",
"QgsAttributes",
"QgsSQLStatementCollectTableNames",
"QgsComposerSymbolItem",
"QgsLayerDefinition::DependencySorter",
"QWebFrame",
"QgsRaster",
"QgsDial",
"QgsRubberBand",
"QgsConditionalLayerStyles",
"QgsCacheIndexFeatureId",
"QgsCptCityColorRampDialog",
"QgsFilterLineEdit",
"QgsNetworkContentFetcher",
"QgsComposerPicture",
"QgsExpression::WhenThen",
"QgsSimpleLineSymbolLayer",
"QgsClipToMinMaxEnhancement",
"QgsAddRemoveMultiFrameCommand",
"QgsFields",
"QgsGeometryGeneratorSymbolLayerWidget",
"pal::PalException::UnknownLayer",
"QgsAttributeTableModel",
"QgsMapLayerRenderer",
"QgsDetailedItemData",
"QgsMapTool",
"QgsVectorLayerUndoCommand",
"QgsComposerMultiFrameMergeCommand",
"QgsRequestHandler",
"QgsAddRemoveItemCommand",
"QgsAuthManager",
"QgsGroupUngroupItemsCommand",
"QgsPaperItem",
"QgsSQLStatement::NodeList",
"QgsSvgCache",
"QgsDatumTransformDialog",
"QgsFieldExpressionWidget",
"QgsCollapsibleGroupBoxBasic",
"QgsHiddenWidgetWrapper",
"QgsCredentialDialog",
"QgsScaleCalculator",
"QgsVectorFileWriter::HiddenOption",
"QgsVectorLayerEditPassthrough",
"QgsSymbolLevelItem",
"QgsVertexMarker",
"QgsRasterFormatSaveOptionsWidget",
"QgsValueRelationWidgetWrapper",
"QgsComposerAttributeTableCompare",
"QgsRuggednessFilter",
"QgsTrackedVectorLayerTools",
"QgsLayerTreeMapCanvasBridge",
"QgsMapToolCapture",
"QgsSearchWidgetWrapper",
"QgsDetailedItemDelegate",
"QgsRasterMatrix",
"QgsGpsConnection",
"QgsMapCanvasTracer",
"QgsComposerAttributeTable",
"QgsSQLStatement::NodeSelectedColumn",
"QgsGeos",
"QgsRelationWidgetWrapper",
"QgsColorButton",
"QgsSvgSelectorListModel",
"QgsGmlSchema",
"QgsErrorDialog",
"QgsComposerTableSortColumnsProxyModelV2",
"QgsSourceSelectDialog",
"QgsMapToPixel",
"Vector3D",
"QgsGeometryGeneratorSymbolLayer",
"QgsAttributeEditorContainer",
"pal::PriorityQueue",
"QgsLegendStyle",
"QgsRasterDataProvider",
"QgsProjectBadLayerDefaultHandler",
"QgsPropertyValue",
"QgsVectorFileWriter::IntOption",
"QgsRasterBlock",
"QgsRasterNuller",
"QgsPythonRunner",
"QgsSQLStatement::NodeFunction",
"QgsSQLStatement::NodeUnaryOperator",
"QgsSearchQueryBuilder",
"QgsOSMWayIterator",
"QgsVirtualLayerDefinitionUtils",
"QgsAttributeForm",
"QgsVirtualLayerDefinition",
"QgsAspectFilter",
"QgsManageConnectionsDialog",
"QgsRasterTransparency",
"QgsCapabilitiesCache",
"Qgs25DRenderer",
"QgsMapCanvasRendererSync",
"QgsRendererRasterPropertiesWidget",
"QgsVectorLayerCache",
"QgsAuthConfigUriEdit",
"QgsProjectFileTransform",
"QgsFontUtils",
"QgsLayerTreeRegistryBridge",
"QgsPreviewEffect",
"QgsRasterHistogram",
"QgsComposerEffect",
"QgsRuleBasedLabelProvider",
"QgsPoint",
"QgsRangeConfigDlg",
"QgsShapeburstFillSymbolLayerWidget",
"QgsSimpleFillSymbolLayer",
"QgsVectorFieldSymbolLayerWidget",
"QgsClassificationWidgetWrapper",
"CloughTocherInterpolator",
"QgsUniqueValuesConfigDlg",
"QgsLabelingEngine",
"QgsMimeDataUtils",
"QgsTransformWidget",
"QgsExpression::NodeUnaryOperator",
"QgsStyleManagerDialog",
"QgsAttributeTableView",
"QgsAttributeFormInterface",
"QgsFeatureStore",
"QgsExternalResourceWidgetWrapper",
"TriDecorator",
"QgsAuthMethodEdit",
"QgsRelation::FieldPair",
"QgsTINInterpolator",
"QgsScaleWidget",
"QgsDataItemProviderFromPlugin",
"QgsExpression::Visitor",
"QgsGeometryRubberBand",
"QgsComposerAttributeTableV2",
"QgsCentroidFillSymbolLayerWidget",
"QgsRendererWidget",
"QgsComposerLabel",
"QgsRuleBasedRenderer::Rule",
"QgsMapRendererCache",
"QgsDataSourceUri",
"QgsVectorLayerSelectionManager",
"QgsPaperGrid",
"QgsDashSpaceDialog",
"QgsPointDisplacementRenderer",
"QgsSmartGroupEditorDialog",
"QgsMapToolTouch",
"QgsComposerRasterSymbolItem",
"QgsComposerAttributeTableColumnModel",
"QgsGml",
"QgsAuthAuthoritiesEditor",
"QgsHueSaturationFilter",
"QgsRelationReferenceWidget",
"QWebSettings",
"QgsPointLocator_DumpTree",
"QgsProviderRegistry",
"QgsProviderMetadata",
"QgsApplyDialog",
"QgsRuleBasedLabeling::Rule",
"QgsActionMenu",
"QgsAuthMethod",
"QgsDartMeasurement",
"QgsExpressionNode::NodeList",
"QgsExpressionFunction",
"QgsExpressionNode",
"QgsExpressionNodeBinaryOperator",
"QgsExpressionNodeColumnRef",
"QgsExpressionNodeCondition",
"QgsExpressionNodeFunction",
"QgsExpressionNodeInOperator",
"QgsExpressionNodeLiteral",
"QgsExpressionNodeUnaryOperator",
"QgsStaticExpressionFunction",
"QgsExpressionNodeCondition::WhenThen",
"QgsGeometryChecker",
"QgsGeometryGapCheckError",
"QgsGeometrySelfIntersectionCheckError",
"QgsGeometryDangleCheck",
"QgsGeometryCheckerUtils::LayerFeatures::iterator",
"QgsGeometrySelfIntersectionCheck",
"QgsGeometryDegeneratePolygonCheck",
"QgsGeometryIsValidCheckError",
"QgsGeometryPointInPolygonCheck",
"QgsGeometryDuplicateCheckError",
"QgsGeometryOverlapCheckError",
"QgsGeometryFollowBoundariesCheck",
"QgsGeometryContainedCheck",
"QgsGeometryDuplicateNodesCheck",
"QgsGeometrySegmentLengthCheck",
"QgsGeometryGapCheck",
"QgsGeometryCheckFactoryT",
"QgsGeometryHoleCheck",
"QgsGeometryDuplicateCheck",
"QgsGeometryCheckerUtils::LayerFeature",
"QgsGeometryCheckerUtils::LayerFeatures",
"QgsGeometrySelfContactCheck",
"QgsGeometryOverlapCheck",
"QgsGeometryAngleCheck",
"QgsGeometryPointCoveredByLineCheck",
"QgsGeometryLineIntersectionCheck",
"QgsGeometryTypeCheckError",
"QgsGeometryLineLayerIntersectionCheck",
"QgsGeometrySliverPolygonCheck",
"QgsGeometryContainedCheckError",
"QgsGeometryAreaCheck",
"QgsGeometryTypeCheck",
"QgsGeometryMultipartCheck"
]
ACCEPTABLE_MISSING_BRIEF = ['QgsBrushStyleComboBox',
'QgsHiddenWidgetFactory',
'QgsLabelCandidate',
'QgsIDWInterpolator',
'QgsFeatureSelectionModel',
'QgsColorWidgetFactory',
'QgsFieldValidator',
'QgsPointCompare',
'QgsSvgSelectorWidget',
'QgsGeometryGeneratorSymbolLayer',
'QgsSurface',
'QgsSvgAnnotationItem',
'QgsCptCityColorRampDialog',
'QgsRangeConfigDlg',
'QgsAttributeFormInterface',
'QgsExpression::NodeUnaryOperator',
'QgsSymbolLayerWidget',
'pal::PriorityQueue',
'QgsVectorLayerEditUtils',
'QgsArcProperter',
'QgsSimpleMarkerSymbolLayerWidget',
'QgsBrowserWatcher',
'QgsRandomColors',
'QgsVectorLayerEditPassthrough',
'QgsDial',
'QgsVectorColorBrewerColorRampDialog',
'QgsFontMarkerSymbolLayerWidget',
'QgsSymbolLevelItem',
'QgsGroupBoxCollapseButton',
'QgsVectorFieldSymbolLayerWidget',
'QgsCentroidFillSymbolLayerWidget',
'QgsEnumerationWidgetWrapper',
'QgsError',
'QgsRendererRangeLabelFormat',
'QgsMimeDataUtils',
'QgsRangeWidgetFactory',
'QgsCptCityArchive',
'QgsRasterRendererWidget',
'QgsGmlSchema',
'HalfEdge',
'QgsDateTimeEditFactory',
'QgsVectorFileWriter::BoolOption',
'QgsRasterFillSymbolLayerWidget',
'QgsVectorRandomColorRampDialog',
'QgsSymbolRenderContext',
'QgsErrorDialog',
'QgsExpressionHighlighter',
'QgsExpression::NodeLiteral',
'pal::CostCalculator',
'QgsFillSymbolLayer',
'QgsMultiBandColorRendererWidget',
'QgsRuleBasedLabeling::Rule',
'QgsSpatialIndexCopyVisitor',
'QgsSVGFillSymbolLayerWidget',
'QgsDataDefinedWidthDialog',
'QgsShapeburstFillSymbolLayer',
'QgsLegacyHelpers',
'QgsLineSymbolLayer',
'QgsWkbPtr',
'QgsSymbolLayer',
'QgsVectorFileWriter::StringOption',
'QgsSymbolLevelsDialog',
'QgsPenJoinStyleComboBox',
'QgsValueRelationWidgetFactory',
'QgsGlowWidget',
'QgsDummyConfigDlg',
'QgsExpression::NodeFunction',
'QgsSvgSelectorGroupsModel',
'QgsAttributeTypeLoadDialog',
'QgsDirectoryParamWidget',
'QgsCategorizedSymbolRenderer',
'QgsQtLocationConnection',
'QgsPropertyKey',
'QgsRuntimeProfiler',
'QgsVectorFileWriter::Option',
'QgsSymbol',
'QgsRendererRange',
'QgsRasterCalcNode',
'QgsMessageBarItem',
'QgsVectorFileWriter::SetOption',
'QgsCacheIndexFeatureId',
'QgsRasterProjector',
'QgsPropertyValue',
'QgsAttributeTableFilterModel',
'QgsSingleSymbolRendererWidget',
'QgsValueMapConfigDlg',
'QgsSmartGroupCondition',
'QgsMarkerLineSymbolLayerWidget',
'QgsExpression::NodeList',
'QgsSymbolSelectorDialog',
'QgsPalLayerSettings',
'QgsTextEditConfigDlg',
'QgsWkbException',
'QgsSingleBandPseudoColorRendererWidget',
'QgsRuleBasedLabeling',
'QgsDxfExport',
'pal::GeomFunction',
'QgsRasterLayerSaveAsDialog',
'QgsStyle',
'QgsSizeScaleWidget',
'QgsSymbolsListWidget',
'QgsFontMarkerSymbolLayer',
'QgsLUDialog',
'QgsLegendInterface',
'QgsSublayersDialog',
'QgsDrawSourceWidget',
'QgsSingleBandGrayRendererWidget',
'QgsRelationEditorWidget',
'QgsFeatureSelectionDlg',
'QgsDataDefinedRotationDialog',
'QgsRendererPropertiesDialog',
'QgsDistanceArcProperter',
'QgsComposerLayerItem',
'QgsRelationReferenceFactory',
'QgsLongLongValidator',
'QgsExpression::WhenThen',
'QgsVectorFileWriter::IntOption',
'QgsUniqueValueWidgetFactory',
'QgsRelationReferenceWidget',
'QgsSLConnect',
'pal::LabelPosition',
'Node',
'QgsRendererRulePropsDialog',
'Qgs25DRendererWidget',
'QgsPalLabeling',
'QgsTextDiagram',
'QgsMapToolCapture',
'QgsConstWkbSimplifierPtr',
'QgsTextEditWidgetFactory',
'QgsNewVectorLayerDialog',
'QgsLogger',
'CharacterWidget',
'QgsPointDisplacementRendererWidget',
'QgsProjectFileTransform',
'QgsExpression::NodeInOperator',
'QgsLocaleNumC',
'QgsDatumTransformDialog',
'QgsColorRampComboBox',
'QgsGeometryValidator',
'QgsValueRelationConfigDlg',
'QgsComposerSymbolItem',
'QgsScaleRangeWidget',
'QgsPieDiagram',
'QgsVectorGradientColorRampDialog',
'QgsPluginManagerInterface',
'QgsAttributeTableMapLayerAction',
'QgsConstWkbPtr',
'QgsStyleExportImportDialog',
'QgsBrowserModel',
'QgsUniqueValuesConfigDlg',
'QgsStyleGroupSelectionDialog',
'QgsScaleVisibilityDialog',
'QgsSpatialIndex',
'QgsFeatureModel',
'QgsSvgMarkerSymbolLayerWidget',
'QgsFeatureListModel',
'QgsDataDefinedSizeDialog',
'QgsColorEffectWidget',
'QgsComposerStyleItem',
'QgsWebPage',
'QgsRelationReferenceConfigDlg',
'QgsVectorLayerEditBuffer',
'QgsGraduatedSymbolRendererWidget',
'QgsSimpleLineSymbolLayer',
'QgsSingleSymbolRenderer',
'QgsComposerHtml',
'QgisInterface',
'QgsRuleBasedLabelProvider',
'QgsPointPatternFillSymbolLayer',
'QgsGradientFillSymbolLayer',
'QgsLinearlyInterpolatedDiagramRenderer',
'QgsGradientFillSymbolLayerWidget',
'QgsSlider',
'QgsPointPatternFillSymbolLayerWidget',
'QgsAttributeForm',
'pal::Sol',
'QgsCptCityColorRamp',
'QgsComposerMultiFrameCommand',
'QgsSimpleLineSymbolLayerWidget',
'QgsValueMapWidgetFactory',
'QgsRelation',
'QgsInvertedPolygonRenderer',
'QgsExpression::Node',
'QgsTransformWidget',
'QgsGroupWmsDataDialog',
'QgsColorBrewerPalette',
'LinTriangleInterpolator',
'QgsFontUtils',
'QgsDxfPaintEngine',
'QgsPenStyleComboBox',
'QgsRendererRulePropsWidget',
'QgsSimpleFillSymbolLayer',
'QgsExpression::NodeCondition',
'QgsClassificationWidgetWrapperFactory',
'QgsClassificationWidgetWrapper',
'QgsErrorMessage',
'QgsRelationWidgetWrapper',
'Qgs25DRenderer',
'QgsTrackedVectorLayerTools',
'QgsSymbolLayerUtils',
'QgsComposerRasterSymbolItem',
'QgsPoint',
'QgsGeometryGeneratorSymbolLayerWidget',
'QgsVectorLayerFeatureIterator',
'QgsFeatureRenderer',
'QgsRasterMinMaxWidget',
'QgsDateTimeEditConfig',
'QgsSvgCacheEntry',
'QgsShapeburstFillSymbolLayerWidget',
'QgsMapLayerConfigWidgetFactory',
'QgsManageConnectionsDialog',
'QgsSvgSelectorListModel',
'QgsMarkerLineSymbolLayer',
'QgsScopeLogger',
'QgsExpression::NodeColumnRef',
'QgsCheckBoxConfigDlg',
'QgsDockWidget',
'QgsUuidWidgetFactory',
'QgsFeatureListViewDelegate',
'QgsOfflineEditing',
'QgsLabelPosition',
'QgsEnumerationWidgetFactory',
'QgsLinePatternFillSymbolLayerWidget',
'QgsSvgSelectorDialog',
'QgsGeometryCache',
'QgsRuleBasedRendererWidget',
'QgsScaleUtils',
'QgsMarkerSymbol',
'QgsPalettedRendererWidget',
'QgsPenCapStyleComboBox',
'QgsVectorFileWriter::HiddenOption',
'QgsExternalResourceWidgetFactory',
'QgsComposerGroupItem',
'QgsAttributeTableAction',
'QgsEditFormConfig',
'QgsCategorizedSymbolRendererWidget',
'QgsNewMemoryLayerDialog',
'QgsEllipseSymbolLayerWidget',
'QgsExpression::NodeBinaryOperator',
'QgsCentroidFillSymbolLayer',
'DualEdgeTriangulation',
'QgsLineSymbol',
'QgsHillshadeFilter',
'QgsServerInterface',
'QgsLayerPropertiesWidget',
'QgsLinePatternFillSymbolLayer',
'QgsAttributeDialog',
'QgsGeometry::Error',
'QgsRasterMatrix',
'QgsComposerEffect',
'QgsArrowSymbolLayerWidget',
'QgsFillSymbol',
'QgsVectorLayerSelectionManager',
'pal::PointSet',
'QgsSimpleFillSymbolLayerWidget',
'ParametricLine',
'QgsGraduatedSymbolRenderer',
'QgsExternalResourceConfigDlg',
'QgsHistogramDiagram',
'QgsBlurWidget',
'QgsShadowEffectWidget',
'QgsRendererRasterPropertiesWidget',
'QgsVectorColorBrewerColorRamp',
'QgsTransactionGroup',
'pal::Util',
'QgsDartMeasurement',
'QgsSvgMarkerSymbolLayer',
'QgsAlignRaster',
'QgsCheckboxWidgetFactory',
'QgsAddRemoveMultiFrameCommand',
'QgsCptCityBrowserModel',
'QgsSmartGroupEditorDialog',
'QgsHeatmapRendererWidget',
'QgsStyleManagerDialog',
'QgsGeometrySelfIntersectionCheckError',
'QgsGeometryDangleCheck',
'QgsGeometrySelfIntersectionCheck',
'QgsGeometryDegeneratePolygonCheck',
'QgsGeometryPointInPolygonCheck',
'QgsGeometryDuplicateCheckError',
'QgsGeometryFollowBoundariesCheck',
'QgsGeometryContainedCheck',
'QgsGeometryDuplicateNodesCheck',
'QgsGeometrySegmentLengthCheck',
'QgsGeometryHoleCheck',
'QgsGeometryDuplicateCheck',
'QgsGeometrySelfContactCheck',
'QgsGeometryAngleCheck',
'QgsGeometryPointCoveredByLineCheck',
'QgsGeometryLineIntersectionCheck',
'QgsGeometryTypeCheckError',
'QgsGeometryLineLayerIntersectionCheck',
'QgsGeometrySliverPolygonCheck',
'QgsGeometryContainedCheckError',
'QgsGeometryAreaCheck',
'QgsGeometryTypeCheck',
'QgsGeometryMultipartCheck']
if __name__ == '__main__':
for k in sorted(list(ACCEPTABLE_MISSING_DOCS.keys())):
print(' "{}": {},'.format(k, sorted(ACCEPTABLE_MISSING_DOCS[k])))
|
hottwaj/django | refs/heads/master | django/db/backends/oracle/features.py | 356 | from django.db.backends.base.features import BaseDatabaseFeatures
from django.db.utils import InterfaceError
try:
import pytz
except ImportError:
pytz = None
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
interprets_empty_strings_as_nulls = True
uses_savepoints = True
has_select_for_update = True
has_select_for_update_nowait = True
can_return_id_from_insert = True
allow_sliced_subqueries = False
supports_subqueries_in_group_by = False
supports_transactions = True
supports_timezones = False
has_zoneinfo_database = pytz is not None
supports_bitwise_or = False
has_native_duration_field = True
can_defer_constraint_checks = True
supports_partially_nullable_unique_constraints = False
truncates_names = True
has_bulk_insert = True
supports_tablespaces = True
supports_sequence_reset = False
can_introspect_default = False # Pending implementation by an interested person.
can_introspect_max_length = False
can_introspect_time_field = False
atomic_transactions = False
supports_combined_alters = False
nulls_order_largest = True
requires_literal_defaults = True
closed_cursor_error_class = InterfaceError
bare_select_suffix = " FROM DUAL"
uppercases_column_names = True
# select for update with limit can be achieved on Oracle, but not with the current backend.
supports_select_for_update_with_limit = False
def introspected_boolean_field_type(self, field=None, created_separately=False):
"""
Some versions of Oracle -- we've seen this on 11.2.0.1 and suspect
it goes back -- have a weird bug where, when an integer column is
added to an existing table with a default, its precision is later
reported on introspection as 0, regardless of the real precision.
For Django introspection, this means that such columns are reported
as IntegerField even if they are really BigIntegerField or BooleanField.
The bug is solved in Oracle 11.2.0.2 and up.
"""
if self.connection.oracle_full_version < '11.2.0.2' and field and field.has_default() and created_separately:
return 'IntegerField'
return super(DatabaseFeatures, self).introspected_boolean_field_type(field, created_separately)
|
dfunckt/django | refs/heads/master | tests/auth_tests/test_hashers.py | 8 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import skipUnless
from django.conf.global_settings import PASSWORD_HASHERS
from django.contrib.auth.hashers import (
UNUSABLE_PASSWORD_PREFIX, UNUSABLE_PASSWORD_SUFFIX_LENGTH,
BasePasswordHasher, PBKDF2PasswordHasher, PBKDF2SHA1PasswordHasher,
check_password, get_hasher, identify_hasher, is_password_usable,
make_password,
)
from django.test import SimpleTestCase, mock
from django.test.utils import override_settings
from django.utils.encoding import force_bytes
try:
import crypt
except ImportError:
crypt = None
else:
# On some platforms (e.g. OpenBSD), crypt.crypt() always return None.
if crypt.crypt('', '') is None:
crypt = None
try:
import bcrypt
except ImportError:
bcrypt = None
try:
import argon2
except ImportError:
argon2 = None
class PBKDF2SingleIterationHasher(PBKDF2PasswordHasher):
iterations = 1
@override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS)
class TestUtilsHashPass(SimpleTestCase):
def test_simple(self):
encoded = make_password('lètmein')
self.assertTrue(encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
# Blank passwords
blank_encoded = make_password('')
self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
def test_pbkdf2(self):
encoded = make_password('lètmein', 'seasalt', 'pbkdf2_sha256')
self.assertEqual(encoded, 'pbkdf2_sha256$36000$seasalt$mEUPPFJkT/xtwDU8rB7Q+puHRZnR07WRjerTkt/3HI0=')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "pbkdf2_sha256")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'pbkdf2_sha256')
self.assertTrue(blank_encoded.startswith('pbkdf2_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
def test_sha1(self):
encoded = make_password('lètmein', 'seasalt', 'sha1')
self.assertEqual(encoded, 'sha1$seasalt$cff36ea83f5706ce9aa7454e63e431fc726b2dc8')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "sha1")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'sha1')
self.assertTrue(blank_encoded.startswith('sha1$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.MD5PasswordHasher'])
def test_md5(self):
encoded = make_password('lètmein', 'seasalt', 'md5')
self.assertEqual(encoded, 'md5$seasalt$3f86d0d3d465b7b458c231bf3555c0e3')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "md5")
# Blank passwords
blank_encoded = make_password('', 'seasalt', 'md5')
self.assertTrue(blank_encoded.startswith('md5$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.UnsaltedMD5PasswordHasher'])
def test_unsalted_md5(self):
encoded = make_password('lètmein', '', 'unsalted_md5')
self.assertEqual(encoded, '88a434c88cca4e900f7874cd98123f43')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_md5")
# Alternate unsalted syntax
alt_encoded = "md5$$%s" % encoded
self.assertTrue(is_password_usable(alt_encoded))
self.assertTrue(check_password('lètmein', alt_encoded))
self.assertFalse(check_password('lètmeinz', alt_encoded))
# Blank passwords
blank_encoded = make_password('', '', 'unsalted_md5')
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher'])
def test_unsalted_sha1(self):
encoded = make_password('lètmein', '', 'unsalted_sha1')
self.assertEqual(encoded, 'sha1$$6d138ca3ae545631b3abd71a4f076ce759c5700b')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "unsalted_sha1")
# Raw SHA1 isn't acceptable
alt_encoded = encoded[6:]
self.assertFalse(check_password('lètmein', alt_encoded))
# Blank passwords
blank_encoded = make_password('', '', 'unsalted_sha1')
self.assertTrue(blank_encoded.startswith('sha1$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(crypt, "no crypt module to generate password.")
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.CryptPasswordHasher'])
def test_crypt(self):
encoded = make_password('lètmei', 'ab', 'crypt')
self.assertEqual(encoded, 'crypt$$ab1Hv2Lg7ltQo')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(check_password('lètmei', encoded))
self.assertFalse(check_password('lètmeiz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "crypt")
# Blank passwords
blank_encoded = make_password('', 'ab', 'crypt')
self.assertTrue(blank_encoded.startswith('crypt$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt_sha256(self):
encoded = make_password('lètmein', hasher='bcrypt_sha256')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith('bcrypt_sha256$'))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt_sha256")
# password truncation no longer works
password = (
'VSK0UYV6FFQVZ0KG88DYN9WADAADZO1CTSIVDJUNZSUML6IBX7LN7ZS3R5'
'JGB3RGZ7VI7G7DJQ9NI8BQFSRPTG6UWTTVESA5ZPUN'
)
encoded = make_password(password, hasher='bcrypt_sha256')
self.assertTrue(check_password(password, encoded))
self.assertFalse(check_password(password[:72], encoded))
# Blank passwords
blank_encoded = make_password('', hasher='bcrypt_sha256')
self.assertTrue(blank_encoded.startswith('bcrypt_sha256$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt(self):
encoded = make_password('lètmein', hasher='bcrypt')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith('bcrypt$'))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, "bcrypt")
# Blank passwords
blank_encoded = make_password('', hasher='bcrypt')
self.assertTrue(blank_encoded.startswith('bcrypt$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt_upgrade(self):
hasher = get_hasher('bcrypt')
self.assertEqual('bcrypt', hasher.algorithm)
self.assertNotEqual(hasher.rounds, 4)
old_rounds = hasher.rounds
try:
# Generate a password with 4 rounds.
hasher.rounds = 4
encoded = make_password('letmein', hasher='bcrypt')
rounds = hasher.safe_summary(encoded)['work factor']
self.assertEqual(rounds, '04')
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
# No upgrade is triggered.
self.assertTrue(check_password('letmein', encoded, setter, 'bcrypt'))
self.assertFalse(state['upgraded'])
# Revert to the old rounds count and ...
hasher.rounds = old_rounds
# ... check if the password would get updated to the new count.
self.assertTrue(check_password('letmein', encoded, setter, 'bcrypt'))
self.assertTrue(state['upgraded'])
finally:
hasher.rounds = old_rounds
@skipUnless(bcrypt, "bcrypt not installed")
def test_bcrypt_harden_runtime(self):
hasher = get_hasher('bcrypt')
self.assertEqual('bcrypt', hasher.algorithm)
with mock.patch.object(hasher, 'rounds', 4):
encoded = make_password('letmein', hasher='bcrypt')
with mock.patch.object(hasher, 'rounds', 6), \
mock.patch.object(hasher, 'encode', side_effect=hasher.encode):
hasher.harden_runtime('wrong_password', encoded)
# Increasing rounds from 4 to 6 means an increase of 4 in workload,
# therefore hardening should run 3 times to make the timing the
# same (the original encode() call already ran once).
self.assertEqual(hasher.encode.call_count, 3)
# Get the original salt (includes the original workload factor)
algorithm, data = encoded.split('$', 1)
expected_call = (('wrong_password', force_bytes(data[:29])),)
self.assertEqual(hasher.encode.call_args_list, [expected_call] * 3)
def test_unusable(self):
encoded = make_password(None)
self.assertEqual(len(encoded), len(UNUSABLE_PASSWORD_PREFIX) + UNUSABLE_PASSWORD_SUFFIX_LENGTH)
self.assertFalse(is_password_usable(encoded))
self.assertFalse(check_password(None, encoded))
self.assertFalse(check_password(encoded, encoded))
self.assertFalse(check_password(UNUSABLE_PASSWORD_PREFIX, encoded))
self.assertFalse(check_password('', encoded))
self.assertFalse(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
with self.assertRaises(ValueError):
identify_hasher(encoded)
# Assert that the unusable passwords actually contain a random part.
# This might fail one day due to a hash collision.
self.assertNotEqual(encoded, make_password(None), "Random password collision?")
def test_unspecified_password(self):
"""
Makes sure specifying no plain password with a valid encoded password
returns `False`.
"""
self.assertFalse(check_password(None, make_password('lètmein')))
def test_bad_algorithm(self):
with self.assertRaises(ValueError):
make_password('lètmein', hasher='lolcat')
with self.assertRaises(ValueError):
identify_hasher('lolcat$salt$hash')
def test_bad_encoded(self):
self.assertFalse(is_password_usable('lètmein_badencoded'))
self.assertFalse(is_password_usable(''))
def test_low_level_pbkdf2(self):
hasher = PBKDF2PasswordHasher()
encoded = hasher.encode('lètmein', 'seasalt2')
self.assertEqual(encoded, 'pbkdf2_sha256$36000$seasalt2$QkIBVCvGmTmyjPJ5yox2y/jQB8isvgUNK98FxOU1UYo=')
self.assertTrue(hasher.verify('lètmein', encoded))
def test_low_level_pbkdf2_sha1(self):
hasher = PBKDF2SHA1PasswordHasher()
encoded = hasher.encode('lètmein', 'seasalt2')
self.assertEqual(encoded, 'pbkdf2_sha1$36000$seasalt2$GoU+9AubJ/xRkO0WD1Xf3WPxWfE=')
self.assertTrue(hasher.verify('lètmein', encoded))
@override_settings(
PASSWORD_HASHERS=[
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
],
)
def test_upgrade(self):
self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm)
for algo in ('sha1', 'md5'):
encoded = make_password('lètmein', hasher=algo)
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
self.assertTrue(check_password('lètmein', encoded, setter))
self.assertTrue(state['upgraded'])
def test_no_upgrade(self):
encoded = make_password('lètmein')
state = {'upgraded': False}
def setter():
state['upgraded'] = True
self.assertFalse(check_password('WRONG', encoded, setter))
self.assertFalse(state['upgraded'])
@override_settings(
PASSWORD_HASHERS=[
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
],
)
def test_no_upgrade_on_incorrect_pass(self):
self.assertEqual('pbkdf2_sha256', get_hasher('default').algorithm)
for algo in ('sha1', 'md5'):
encoded = make_password('lètmein', hasher=algo)
state = {'upgraded': False}
def setter():
state['upgraded'] = True
self.assertFalse(check_password('WRONG', encoded, setter))
self.assertFalse(state['upgraded'])
def test_pbkdf2_upgrade(self):
hasher = get_hasher('default')
self.assertEqual('pbkdf2_sha256', hasher.algorithm)
self.assertNotEqual(hasher.iterations, 1)
old_iterations = hasher.iterations
try:
# Generate a password with 1 iteration.
hasher.iterations = 1
encoded = make_password('letmein')
algo, iterations, salt, hash = encoded.split('$', 3)
self.assertEqual(iterations, '1')
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
# No upgrade is triggered
self.assertTrue(check_password('letmein', encoded, setter))
self.assertFalse(state['upgraded'])
# Revert to the old iteration count and ...
hasher.iterations = old_iterations
# ... check if the password would get updated to the new iteration count.
self.assertTrue(check_password('letmein', encoded, setter))
self.assertTrue(state['upgraded'])
finally:
hasher.iterations = old_iterations
def test_pbkdf2_harden_runtime(self):
hasher = get_hasher('default')
self.assertEqual('pbkdf2_sha256', hasher.algorithm)
with mock.patch.object(hasher, 'iterations', 1):
encoded = make_password('letmein')
with mock.patch.object(hasher, 'iterations', 6), \
mock.patch.object(hasher, 'encode', side_effect=hasher.encode):
hasher.harden_runtime('wrong_password', encoded)
# Encode should get called once ...
self.assertEqual(hasher.encode.call_count, 1)
# ... with the original salt and 5 iterations.
algorithm, iterations, salt, hash = encoded.split('$', 3)
expected_call = (('wrong_password', salt, 5),)
self.assertEqual(hasher.encode.call_args, expected_call)
def test_pbkdf2_upgrade_new_hasher(self):
hasher = get_hasher('default')
self.assertEqual('pbkdf2_sha256', hasher.algorithm)
self.assertNotEqual(hasher.iterations, 1)
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
with self.settings(PASSWORD_HASHERS=[
'auth_tests.test_hashers.PBKDF2SingleIterationHasher']):
encoded = make_password('letmein')
algo, iterations, salt, hash = encoded.split('$', 3)
self.assertEqual(iterations, '1')
# No upgrade is triggered
self.assertTrue(check_password('letmein', encoded, setter))
self.assertFalse(state['upgraded'])
# Revert to the old iteration count and check if the password would get
# updated to the new iteration count.
with self.settings(PASSWORD_HASHERS=[
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'auth_tests.test_hashers.PBKDF2SingleIterationHasher']):
self.assertTrue(check_password('letmein', encoded, setter))
self.assertTrue(state['upgraded'])
def test_check_password_calls_harden_runtime(self):
hasher = get_hasher('default')
encoded = make_password('letmein')
with mock.patch.object(hasher, 'harden_runtime'), \
mock.patch.object(hasher, 'must_update', return_value=True):
# Correct password supplied, no hardening needed
check_password('letmein', encoded)
self.assertEqual(hasher.harden_runtime.call_count, 0)
# Wrong password supplied, hardening needed
check_password('wrong_password', encoded)
self.assertEqual(hasher.harden_runtime.call_count, 1)
def test_load_library_no_algorithm(self):
with self.assertRaises(ValueError) as e:
BasePasswordHasher()._load_library()
self.assertEqual("Hasher 'BasePasswordHasher' doesn't specify a library attribute", str(e.exception))
def test_load_library_importerror(self):
PlainHasher = type(str('PlainHasher'), (BasePasswordHasher,), {'algorithm': 'plain', 'library': 'plain'})
# Python 3 adds quotes around module name
msg = "Couldn't load 'PlainHasher' algorithm library: No module named '?plain'?"
with self.assertRaisesRegex(ValueError, msg):
PlainHasher()._load_library()
@skipUnless(argon2, "argon2-cffi not installed")
@override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS)
class TestUtilsHashPassArgon2(SimpleTestCase):
def test_argon2(self):
encoded = make_password('lètmein', hasher='argon2')
self.assertTrue(is_password_usable(encoded))
self.assertTrue(encoded.startswith('argon2$'))
self.assertTrue(check_password('lètmein', encoded))
self.assertFalse(check_password('lètmeinz', encoded))
self.assertEqual(identify_hasher(encoded).algorithm, 'argon2')
# Blank passwords
blank_encoded = make_password('', hasher='argon2')
self.assertTrue(blank_encoded.startswith('argon2$'))
self.assertTrue(is_password_usable(blank_encoded))
self.assertTrue(check_password('', blank_encoded))
self.assertFalse(check_password(' ', blank_encoded))
# Old hashes without version attribute
encoded = (
'argon2$argon2i$m=8,t=1,p=1$c29tZXNhbHQ$gwQOXSNhxiOxPOA0+PY10P9QFO'
'4NAYysnqRt1GSQLE55m+2GYDt9FEjPMHhP2Cuf0nOEXXMocVrsJAtNSsKyfg'
)
self.assertTrue(check_password('secret', encoded))
self.assertFalse(check_password('wrong', encoded))
def test_argon2_upgrade(self):
self._test_argon2_upgrade('time_cost', 'time cost', 1)
self._test_argon2_upgrade('memory_cost', 'memory cost', 16)
self._test_argon2_upgrade('parallelism', 'parallelism', 1)
def test_argon2_version_upgrade(self):
hasher = get_hasher('argon2')
state = {'upgraded': False}
encoded = (
'argon2$argon2i$m=8,t=1,p=1$c29tZXNhbHQ$gwQOXSNhxiOxPOA0+PY10P9QFO'
'4NAYysnqRt1GSQLE55m+2GYDt9FEjPMHhP2Cuf0nOEXXMocVrsJAtNSsKyfg'
)
def setter(password):
state['upgraded'] = True
old_m = hasher.memory_cost
old_t = hasher.time_cost
old_p = hasher.parallelism
try:
hasher.memory_cost = 8
hasher.time_cost = 1
hasher.parallelism = 1
self.assertTrue(check_password('secret', encoded, setter, 'argon2'))
self.assertTrue(state['upgraded'])
finally:
hasher.memory_cost = old_m
hasher.time_cost = old_t
hasher.parallelism = old_p
def _test_argon2_upgrade(self, attr, summary_key, new_value):
hasher = get_hasher('argon2')
self.assertEqual('argon2', hasher.algorithm)
self.assertNotEqual(getattr(hasher, attr), new_value)
old_value = getattr(hasher, attr)
try:
# Generate hash with attr set to 1
setattr(hasher, attr, new_value)
encoded = make_password('letmein', hasher='argon2')
attr_value = hasher.safe_summary(encoded)[summary_key]
self.assertEqual(attr_value, new_value)
state = {'upgraded': False}
def setter(password):
state['upgraded'] = True
# No upgrade is triggered.
self.assertTrue(check_password('letmein', encoded, setter, 'argon2'))
self.assertFalse(state['upgraded'])
# Revert to the old rounds count and ...
setattr(hasher, attr, old_value)
# ... check if the password would get updated to the new count.
self.assertTrue(check_password('letmein', encoded, setter, 'argon2'))
self.assertTrue(state['upgraded'])
finally:
setattr(hasher, attr, old_value)
|
blindroot/django | refs/heads/master | tests/forms_tests/field_tests/test_floatfield.py | 38 | from __future__ import unicode_literals
from django.forms import FloatField, NumberInput, ValidationError
from django.test import SimpleTestCase
from django.utils import formats, translation
from . import FormFieldAssertionsMixin
class FloatFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_floatfield_1(self):
f = FloatField()
self.assertWidgetRendersTo(f, '<input step="any" type="number" name="f" id="id_f" required />')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
self.assertEqual(1.0, f.clean('1'))
self.assertIsInstance(f.clean('1'), float)
self.assertEqual(23.0, f.clean('23'))
self.assertEqual(3.1400000000000001, f.clean('3.14'))
self.assertEqual(3.1400000000000001, f.clean(3.14))
self.assertEqual(42.0, f.clean(42))
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('a')
self.assertEqual(1.0, f.clean('1.0 '))
self.assertEqual(1.0, f.clean(' 1.0'))
self.assertEqual(1.0, f.clean(' 1.0 '))
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('1.0a')
self.assertIsNone(f.max_value)
self.assertIsNone(f.min_value)
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('Infinity')
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('NaN')
with self.assertRaisesMessage(ValidationError, "'Enter a number.'"):
f.clean('-Inf')
def test_floatfield_2(self):
f = FloatField(required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertEqual(1.0, f.clean('1'))
self.assertIsNone(f.max_value)
self.assertIsNone(f.min_value)
def test_floatfield_3(self):
f = FloatField(max_value=1.5, min_value=0.5)
self.assertWidgetRendersTo(
f,
'<input step="any" name="f" min="0.5" max="1.5" type="number" id="id_f" required />',
)
with self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 1.5.'"):
f.clean('1.6')
with self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 0.5.'"):
f.clean('0.4')
self.assertEqual(1.5, f.clean('1.5'))
self.assertEqual(0.5, f.clean('0.5'))
self.assertEqual(f.max_value, 1.5)
self.assertEqual(f.min_value, 0.5)
def test_floatfield_widget_attrs(self):
f = FloatField(widget=NumberInput(attrs={'step': 0.01, 'max': 1.0, 'min': 0.0}))
self.assertWidgetRendersTo(
f,
'<input step="0.01" name="f" min="0.0" max="1.0" type="number" id="id_f" required />',
)
def test_floatfield_localized(self):
"""
A localized FloatField's widget renders to a text input without any
number input specific attributes.
"""
f = FloatField(localize=True)
self.assertWidgetRendersTo(f, '<input id="id_f" name="f" type="text" required />')
def test_floatfield_changed(self):
f = FloatField()
n = 4.35
self.assertFalse(f.has_changed(n, '4.3500'))
with translation.override('fr'), self.settings(USE_L10N=True):
f = FloatField(localize=True)
localized_n = formats.localize_input(n) # -> '4,35' in French
self.assertFalse(f.has_changed(n, localized_n))
|
willingc/pythondotorg | refs/heads/master | blogs/views.py | 10 | from django.conf import settings
from django.views.generic import TemplateView
from .models import BlogEntry, Translation, Contributor
class BlogHome(TemplateView):
""" Main blog view """
template_name = 'blogs/index.html'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
entries = BlogEntry.objects.order_by('-pub_date')[:6]
latest_entry = None
other_entries = []
if entries:
latest_entry = entries[0]
other_entries = entries[1:]
context.update({
'latest_entry': latest_entry,
'entries': other_entries,
'translations': Translation.objects.all(),
'contributors': Contributor.objects.all(),
'blog_url': settings.PYTHON_BLOG_URL,
})
return context
|
frappe/frappe | refs/heads/develop | frappe/custom/doctype/property_setter/property_setter.py | 1 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe
from frappe import _
from frappe.model.document import Document
not_allowed_fieldtype_change = ['naming_series']
class PropertySetter(Document):
def autoname(self):
self.name = '{doctype}-{field}-{property}'.format(
doctype = self.doc_type,
field = self.field_name or self.row_name or 'main',
property = self.property
)
def validate(self):
self.validate_fieldtype_change()
if self.is_new():
delete_property_setter(self.doc_type, self.property, self.field_name)
# clear cache
frappe.clear_cache(doctype = self.doc_type)
def validate_fieldtype_change(self):
if self.field_name in not_allowed_fieldtype_change and \
self.property == 'fieldtype':
frappe.throw(_("Field type cannot be changed for {0}").format(self.field_name))
def get_property_list(self, dt):
return frappe.db.get_all('DocField',
fields=['fieldname', 'label', 'fieldtype'],
filters={
'parent': dt,
'fieldtype': ['not in', ('Section Break', 'Column Break', 'HTML', 'Read Only', 'Fold') + frappe.model.table_fields],
'fieldname': ['!=', '']
},
order_by='label asc',
as_dict=1
)
def get_setup_data(self):
return {
'doctypes': [d[0] for d in frappe.db.sql("select name from tabDocType")],
'dt_properties': self.get_property_list('DocType'),
'df_properties': self.get_property_list('DocField')
}
def get_field_ids(self):
return frappe.db.sql("select name, fieldtype, label, fieldname from tabDocField where parent=%s", self.doc_type, as_dict = 1)
def get_defaults(self):
if not self.field_name:
return frappe.db.sql("select * from `tabDocType` where name=%s", self.doc_type, as_dict = 1)[0]
else:
return frappe.db.sql("select * from `tabDocField` where fieldname=%s and parent=%s",
(self.field_name, self.doc_type), as_dict = 1)[0]
def on_update(self):
if frappe.flags.in_patch:
self.flags.validate_fields_for_doctype = False
if not self.flags.ignore_validate and self.flags.validate_fields_for_doctype:
from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype
validate_fields_for_doctype(self.doc_type)
def make_property_setter(doctype, fieldname, property, value, property_type, for_doctype = False,
validate_fields_for_doctype=True):
# WARNING: Ignores Permissions
property_setter = frappe.get_doc({
"doctype":"Property Setter",
"doctype_or_field": for_doctype and "DocType" or "DocField",
"doc_type": doctype,
"field_name": fieldname,
"property": property,
"value": value,
"property_type": property_type
})
property_setter.flags.ignore_permissions = True
property_setter.flags.validate_fields_for_doctype = validate_fields_for_doctype
property_setter.insert()
return property_setter
def delete_property_setter(doc_type, property, field_name=None):
"""delete other property setters on this, if this is new"""
filters = dict(doc_type = doc_type, property=property)
if field_name:
filters['field_name'] = field_name
frappe.db.delete('Property Setter', filters)
|
AndroidOpenDevelopment/android_external_chromium_org | refs/heads/lp | chrome/common/extensions/docs/server2/link_converter.py | 96 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script converts old-style <a> links to API docs to the new $ref links.
# See reference_resolver.py for more info on the format of $ref links.
import optparse
import os
import re
from docs_server_utils import SanitizeAPIName
def _ReadFile(filename):
with open(filename) as f:
return f.read()
def _WriteFile(filename, contents):
with open(filename, 'w') as f:
f.write(contents)
def _Replace(matches, filename):
title = matches.group(3)
if matches.group(2).count('#') != 1:
return '<a%shref=%s>%s</a>' % (matches.group(1),
matches.group(2),
title)
clean = (matches.group(2).replace('\\', '')
.replace("'", '')
.replace('"', '')
.replace('/', ''))
page, link = clean.split('#')
if not page:
page = '%s.html' % SanitizeAPIName(filename.rsplit(os.sep, 1)[-1])
if (not link.startswith('property-') and
not link.startswith('type-') and
not link.startswith('method-') and
not link.startswith('event-')):
return '<a%shref=%s>%s</a>' % (matches.group(1),
matches.group(2),
title)
link = re.sub('^(property|type|method|event)-', '', link).replace('-', '.')
page = page.replace('.html', '.').replace('_', '.')
if matches.group(1) == ' ':
padding = ''
else:
padding = matches.group(1)
if link in title:
return '%s$(ref:%s%s)' % (padding, page, link)
else:
return '%s$(ref:%s%s %s)' % (padding, page, link, title)
def _ConvertFile(filename, use_stdout):
regex = re.compile(r'<a(.*?)href=(.*?)>(.*?)</a>', flags=re.DOTALL)
contents = _ReadFile(filename)
contents = re.sub(regex,
lambda m: _Replace(m, filename),
contents)
contents = contents.replace('$(ref:extension.lastError)',
'$(ref:runtime.lastError)')
if use_stdout:
print contents
else:
_WriteFile(filename, contents)
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Converts <a> links to $ref links.',
usage='usage: %prog [option] <directory>')
parser.add_option('-f', '--file', default='',
help='Convert links in single file.')
parser.add_option('-o', '--out', action='store_true', default=False,
help='Write to stdout.')
regex = re.compile(r'<a(.*?)href=(.*?)>(.*?)</a>', flags=re.DOTALL)
opts, argv = parser.parse_args()
if opts.file:
_ConvertFile(opts.file, opts.out)
else:
if len(argv) != 1:
parser.print_usage()
exit(0)
for root, dirs, files in os.walk(argv[0]):
for name in files:
_ConvertFile(os.path.join(root, name), opts.out)
|
MartinRiese/python_koans | refs/heads/master | python2/koans/about_none.py | 16 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutNil in the Ruby Koans
#
from runner.koan import *
class AboutNone(Koan):
def test_none_is_an_object(self):
"Unlike NULL in a lot of languages"
self.assertEqual(True, isinstance(None, object))
def test_none_is_universal(self):
"There is only one None"
self.assertEqual(True, None is None)
def test_what_exception_do_you_get_when_calling_nonexistent_methods(self):
"""
What is the Exception that is thrown when you call a method that does
not exist?
Hint: launch python command console and try the code in the
block below.
Don't worry about what 'try' and 'except' do, we'll talk about
this later
"""
try:
None.some_method_none_does_not_know_about()
except Exception as ex:
# What exception has been caught?
#
# Need a recap on how to evaluate __class__ attributes?
# https://github.com/gregmalcolm/python_koans/wiki/Class-Attribute
self.assertEqual(AttributeError, ex.__class__)
# What message was attached to the exception?
# (HINT: replace __ with part of the error message.)
self.assertMatch("'NoneType' object has no attribute 'some_method_none_does_not_know_about'", ex.args[0])
def test_none_is_distinct(self):
"""
None is distinct from other things which are False.
"""
self.assertEqual(True, None is not 0)
self.assertEqual(True, None is not False)
|
DIRACGrid/DIRAC | refs/heads/integration | src/DIRAC/Core/Utilities/ExecutorDispatcher.py | 2 | """ Used by the executors for dispatching events (IIUC)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.ReturnValues import isReturnStructure
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.FrameworkSystem.Client.MonitoringClient import MonitoringClient
from DIRAC.MonitoringSystem.Client.MonitoringReporter import MonitoringReporter
class ExecutorState(object):
def __init__(self, log=False):
if log:
self.__log = log
else:
self.__log = gLogger
self.__lock = threading.Lock()
self.__typeToId = {}
self.__maxTasks = {}
self.__execTasks = {}
self.__taskInExec = {}
def _internals(self):
return {'type2id': dict(self.__typeToId),
'maxTasks': dict(self.__maxTasks),
'execTasks': dict(self.__execTasks),
'tasksInExec': dict(self.__taskInExec),
'locked': self.__lock.locked()} # pylint: disable=no-member
def addExecutor(self, eId, eTypes, maxTasks=1):
self.__lock.acquire()
try:
self.__maxTasks[eId] = max(1, maxTasks)
if eId not in self.__execTasks:
self.__execTasks[eId] = set()
if not isinstance(eTypes, (list, tuple)):
eTypes = [eTypes]
for eType in eTypes:
if eType not in self.__typeToId:
self.__typeToId[eType] = set()
self.__typeToId[eType].add(eId)
finally:
self.__lock.release()
def removeExecutor(self, eId):
self.__lock.acquire()
try:
tasks = []
for eType in self.__typeToId:
if eId in self.__typeToId[eType]:
self.__typeToId[eType].remove(eId)
for taskId in self.__execTasks[eId]:
self.__taskInExec.pop(taskId)
tasks.append(taskId)
self.__execTasks.pop(eId)
self.__maxTasks.pop(eId)
return tasks
finally:
self.__lock.release()
def getTasksForExecutor(self, eId):
try:
return set(self.__execTasks[eId])
except KeyError:
return set()
def full(self, eId):
try:
return len(self.__execTasks[eId]) >= self.__maxTasks[eId]
except KeyError:
return True
def freeSlots(self, eId):
try:
return self.__maxTasks[eId] - len(self.__execTasks[eId])
except KeyError:
return 0
def getFreeExecutors(self, eType):
execs = {}
try:
eids = self.__typeToId[eType]
except KeyError:
return execs
try:
for eid in eids:
freeSlots = self.freeSlots(eid)
if freeSlots:
execs[eid] = freeSlots
except RuntimeError:
pass
return execs
def getIdleExecutor(self, eType):
idleId = None
maxFreeSlots = 0
try:
for eId in self.__typeToId[eType]:
freeSlots = self.freeSlots(eId)
if freeSlots > maxFreeSlots:
maxFreeSlots = freeSlots
idleId = eId
except KeyError:
pass
return idleId
def addTask(self, eId, taskId):
self.__lock.acquire()
try:
try:
self.__taskInExec[taskId] = eId
self.__execTasks[eId].add(taskId)
return len(self.__execTasks[eId])
except KeyError:
return 0
finally:
self.__lock.release()
def getExecutorOfTask(self, taskId):
try:
return self.__taskInExec[taskId]
except KeyError:
return None
def removeTask(self, taskId, eId=None):
self.__lock.acquire()
try:
try:
if eId is None:
eId = self.__taskInExec[taskId]
self.__execTasks[eId].remove(taskId)
self.__taskInExec.pop(taskId)
return True
except KeyError:
return False
finally:
self.__lock.release()
class ExecutorQueues(object):
def __init__(self, log=False):
if log:
self.__log = log
else:
self.__log = gLogger
self.__lock = threading.Lock()
self.__queues = {}
self.__lastUse = {}
self.__taskInQueue = {}
def _internals(self):
return {'queues': dict(self.__queues),
'lastUse': dict(self.__lastUse),
'taskInQueue': dict(self.__taskInQueue),
'locked': self.__lock.locked()} # pylint: disable=no-member
def getExecutorList(self):
return [eType for eType in self.__queues]
def pushTask(self, eType, taskId, ahead=False):
self.__log.verbose("Pushing task %s into waiting queue for executor %s" % (taskId, eType))
self.__lock.acquire()
try:
if taskId in self.__taskInQueue:
if self.__taskInQueue[taskId] != eType:
errMsg = "Task %s cannot be queued because it's already queued for %s" % (taskId,
self.__taskInQueue[taskId])
self.__log.fatal(errMsg)
return 0
return len(self.__queues[eType])
if eType not in self.__queues:
self.__queues[eType] = []
self.__lastUse[eType] = time.time()
if ahead:
self.__queues[eType].insert(0, taskId)
else:
self.__queues[eType].append(taskId)
self.__taskInQueue[taskId] = eType
return len(self.__queues[eType])
finally:
self.__lock.release()
def popTask(self, eTypes):
if not isinstance(eTypes, (list, tuple)):
eTypes = [eTypes]
self.__lock.acquire()
for eType in eTypes:
try:
taskId = self.__queues[eType].pop(0)
del self.__taskInQueue[taskId]
# Found! release and return!
self.__lock.release()
self.__lastUse[eType] = time.time()
self.__log.verbose("Popped task %s from executor %s waiting queue" % (taskId, eType))
return (taskId, eType)
except IndexError:
continue
except KeyError:
continue
self.__lock.release()
# Not found. release and return None
return None
def getState(self):
self.__lock.acquire()
try:
qInfo = {}
for qName in self.__queues:
qInfo[qName] = list(self.__queues[qName])
finally:
self.__lock.release()
return qInfo
def deleteTask(self, taskId):
self.__log.verbose("Deleting task %s from waiting queues" % taskId)
self.__lock.acquire()
try:
try:
eType = self.__taskInQueue[taskId]
del self.__taskInQueue[taskId]
self.__lastUse[eType] = time.time()
except KeyError:
return False
try:
iPos = self.__queues[eType].index(taskId)
except ValueError:
return False
del self.__queues[eType][iPos]
return True
finally:
self.__lock.release()
def waitingTasks(self, eType):
self.__lock.acquire()
try:
try:
return len(self.__queues[eType])
except KeyError:
return 0
finally:
self.__lock.release()
class ExecutorDispatcherCallbacks(object):
def cbDispatch(self, taskId, taskObj, pathExecuted):
return S_ERROR("No dispatch callback defined")
def cbSendTask(self, taskId, taskObj, eId, eType):
return S_ERROR("No send task callback defined")
def cbDisconectExecutor(self, eId):
return S_ERROR("No disconnect callback defined")
def cbTaskError(self, taskId, taskObj, errorMsg):
return S_ERROR("No error callback defined")
def cbTaskProcessed(self, taskId, taskObj, eType):
return S_OK()
def cbTaskFreeze(self, taskId, taskObj, eType):
return S_OK()
class ExecutorDispatcher(object):
class ETask(object):
def __init__(self, taskId, taskObj):
self.taskId = taskId
self.taskObj = taskObj
self.pathExecuted = []
self.freezeTime = 60
self.frozenTime = 0
self.frozenSince = 0
self.frozenCount = 0
self.frozenMsg = False
self.eType = False
self.sendTime = 0
self.retries = 0
def __repr__(self):
rS = "<ETask %s" % self.taskId
if self.eType:
rS += " eType=%s>" % self.eType
else:
rS += ">"
return rS
def __init__(self, monitor=None):
"""
:param monitor: good question.... what's meant to be used for monitoring.
Either a :py:class`DIRAC.FrameworkSystem.Client.MonitoringClient.MonitoringClient` or a
:py:class`DIRAC.MonitoringSystem.Client.MonitoringReporter.MonitoringReporter`
"""
self.__idMap = {}
self.__execTypes = {}
self.__executorsLock = threading.Lock()
self.__tasksLock = threading.Lock()
self.__freezerLock = threading.Lock()
self.__tasks = {}
self.__log = gLogger.getSubLogger("ExecMind")
self.__taskFreezer = []
self.__queues = ExecutorQueues(self.__log)
self.__states = ExecutorState(self.__log)
self.__cbHolder = ExecutorDispatcherCallbacks()
self.__monitor = None
if isinstance(monitor, MonitoringClient):
self.__monitor = monitor
elif isinstance(monitor, MonitoringReporter):
self.__monitoringReporter = monitor
gThreadScheduler.addPeriodicTask(60, self.__doPeriodicStuff)
# If a task is frozen too many times, send error or forget task?
self.__failedOnTooFrozen = True
# If a task fails to properly dispatch, freeze or forget task?
self.__freezeOnFailedDispatch = True
# If a task needs to go to an executor that has not connected. Freeze or forget the task?
self.__freezeOnUnknownExecutor = True
if self.__monitor:
self.__monitor.registerActivity("executors", "Executor reactors connected",
"Executors", "executors", self.__monitor.OP_MEAN, 300)
self.__monitor.registerActivity("tasks", "Tasks processed",
"Executors", "tasks", self.__monitor.OP_RATE, 300)
self.__monitor.registerActivity("taskTime", "Task processing time",
"Executors", "seconds", self.__monitor.OP_MEAN, 300)
def setFailedOnTooFrozen(self, value):
self.__failedOnTooFrozen = value
def setFreezeOnFailedDispatch(self, value):
self.__freezeOnFailedDispatch = value
def setFreezeOnUnknownExecutor(self, value):
self.__freezeOnUnknownExecutor = value
def _internals(self):
return {'idMap': dict(self.__idMap),
'execTypes': dict(self.__execTypes),
'tasks': sorted(self.__tasks),
'freezer': list(self.__taskFreezer),
'queues': self.__queues._internals(),
'states': self.__states._internals(),
'locked': {'exec': self.__executorsLock.locked(), # pylint: disable=no-member
'tasks': self.__tasksLock.locked(), # pylint: disable=no-member
'freezer': self.__freezerLock.locked()}, # pylint: disable=no-member
}
def setCallbacks(self, callbacksObj):
if not isinstance(callbacksObj, ExecutorDispatcherCallbacks):
return S_ERROR("Callbacks object does not inherit from ExecutorDispatcherCallbacks")
self.__cbHolder = callbacksObj
return S_OK()
def __doPeriodicStuff(self):
self.__unfreezeTasks()
for eType in self.__execTypes:
self.__fillExecutors(eType)
if not self.__monitor:
return
eTypes = self.__execTypes
for eType in eTypes:
try:
self.__monitor.addMark("executors-%s" % eType, self.__execTypes[eType])
except KeyError:
pass
self.__monitor.addMark("executors", len(self.__idMap))
def addExecutor(self, eId, eTypes, maxTasks=1):
self.__log.verbose("Adding new %s executor to the pool %s" % (eId, ", ".join(eTypes)))
self.__executorsLock.acquire()
try:
if eId in self.__idMap:
return
if not isinstance(eTypes, (list, tuple)):
eTypes = [eTypes]
self.__idMap[eId] = list(eTypes)
self.__states.addExecutor(eId, eTypes, maxTasks)
for eType in eTypes:
if eType not in self.__execTypes:
self.__execTypes[eType] = 0
if self.__monitor:
self.__monitor.registerActivity("executors-%s" % eType, "%s executor modules connected" % eType,
"Executors", "executors", self.__monitor.OP_MEAN, 300)
self.__monitor.registerActivity("tasks-%s" % eType, "Tasks processed by %s" % eType,
"Executors", "tasks", self.__monitor.OP_RATE, 300)
self.__monitor.registerActivity("taskTime-%s" % eType, "Task processing time for %s" % eType,
"Executors", "seconds", self.__monitor.OP_MEAN, 300)
self.__execTypes[eType] += 1
finally:
self.__executorsLock.release()
for eType in eTypes:
self.__fillExecutors(eType)
def removeExecutor(self, eId):
self.__log.verbose("Removing executor %s" % eId)
self.__executorsLock.acquire()
try:
if eId not in self.__idMap:
return
eTypes = self.__idMap.pop(eId)
for eType in eTypes:
self.__execTypes[eType] -= 1
tasksInExec = self.__states.removeExecutor(eId)
for taskId in tasksInExec:
try:
eTask = self.__tasks[taskId]
except KeyError:
# Task already removed
pass
if eTask.eType:
self.__queues.pushTask(eTask.eType, taskId, ahead=True)
else:
self.__dispatchTask(taskId)
finally:
self.__executorsLock.release()
try:
self.__cbHolder.cbDisconectExecutor(eId)
except Exception:
self.__log.exception("Exception while disconnecting agent %s" % eId)
for eType in eTypes:
self.__fillExecutors(eType)
def __freezeTask(self, taskId, errMsg, eType=False, freezeTime=60):
self.__log.verbose("Freezing task %s" % taskId)
self.__freezerLock.acquire()
try:
if taskId in self.__taskFreezer:
return False
try:
eTask = self.__tasks[taskId]
except KeyError:
return False
eTask.freezeTime = freezeTime
eTask.frozenMessage = errMsg
eTask.frozenSince = time.time()
eTask.frozenCount += 1
eTask.eType = eType
isFrozen = False
if eTask.frozenCount < 10:
self.__taskFreezer.append(taskId)
isFrozen = True
finally:
self.__freezerLock.release()
if not isFrozen:
self.removeTask(taskId)
if self.__failedOnTooFrozen:
self.__cbHolder.cbTaskError(taskId, eTask.taskObj, "Retried more than 10 times. Last error: %s" % errMsg)
return False
return True
def __isFrozen(self, taskId):
return taskId in self.__taskFreezer
def __removeFromFreezer(self, taskId):
self.__freezerLock.acquire()
try:
try:
iP = self.__taskFreezer.index(taskId)
except ValueError:
return False
self.__taskFreezer.pop(iP)
try:
eTask = self.__tasks[taskId]
except KeyError:
return False
eTask.frozenTime += time.time() - eTask.frozenSince
finally:
self.__freezerLock.release()
return True
def __unfreezeTasks(self, eType=False):
iP = 0
while iP < len(self.__taskFreezer):
self.__freezerLock.acquire()
try:
try:
taskId = self.__taskFreezer[iP]
except IndexError:
return
try:
eTask = self.__tasks[taskId]
except KeyError:
self.__log.notice("Removing task %s from the freezer. Somebody has removed the task" % taskId)
self.__taskFreezer.pop(iP)
continue
# Current taskId/eTask is the one to defrost
if eType and eType != eTask.eType:
iP += 1
continue
if time.time() - eTask.frozenSince < eTask.freezeTime:
iP += 1
continue
self.__taskFreezer.pop(iP)
finally:
self.__freezerLock.release()
# Out of the lock zone to minimize zone of exclusion
eTask.frozenTime += time.time() - eTask.frozenSince
self.__log.verbose("Unfreezed task %s" % taskId)
self.__dispatchTask(taskId, defrozeIfNeeded=False)
def __addTaskIfNew(self, taskId, taskObj):
self.__tasksLock.acquire()
try:
if taskId in self.__tasks:
self.__log.verbose("Task %s was already known" % taskId)
return False
self.__tasks[taskId] = ExecutorDispatcher.ETask(taskId, taskObj)
self.__log.verbose("Added task %s" % taskId)
return True
finally:
self.__tasksLock.release()
def getTask(self, taskId):
try:
return self.__tasks[taskId].taskObj
except KeyError:
return None
def __dispatchTask(self, taskId, defrozeIfNeeded=True):
self.__log.verbose("Dispatching task %s" % taskId)
# If task already in executor skip
if self.__states.getExecutorOfTask(taskId):
return S_OK()
self.__removeFromFreezer(taskId)
result = self.__getNextExecutor(taskId)
if not result['OK']:
self.__log.warn("Error while calling dispatch callback: %s" % result['Message'])
if self.__freezeOnFailedDispatch:
if self.__freezeTask(taskId, result['Message']):
return S_OK()
return result
taskObj = self.getTask(taskId)
self.removeTask(taskId)
self.__cbHolder.cbTaskError(taskId, taskObj, "Could not dispatch task: %s" % result['Message'])
return S_ERROR("Could not add task. Dispatching task failed")
eType = result['Value']
if not eType:
self.__log.verbose("No more executors for task %s" % taskId)
return self.removeTask(taskId)
self.__log.verbose("Next executor type is %s for task %s" % (eType, taskId))
if eType not in self.__execTypes:
if self.__freezeOnUnknownExecutor:
self.__log.verbose("Executor type %s has not connected. Freezing task %s" % (eType, taskId))
self.__freezeTask(taskId, "Unknown executor %s type" % eType,
eType=eType, freezeTime=0)
return S_OK()
self.__log.verbose("Executor type %s has not connected. Forgetting task %s" % (eType, taskId))
return self.removeTask(taskId)
self.__queues.pushTask(eType, taskId)
self.__fillExecutors(eType, defrozeIfNeeded=defrozeIfNeeded)
return S_OK()
def __taskProcessedCallback(self, taskId, taskObj, eType):
try:
result = self.__cbHolder.cbTaskProcessed(taskId, taskObj, eType)
except Exception:
self.__log.exception("Exception while calling taskDone callback")
return S_ERROR("Exception while calling taskDone callback")
if not isReturnStructure(result):
errMsg = "taskDone callback did not return a S_OK/S_ERROR structure"
self.__log.fatal(errMsg)
return S_ERROR(errMsg)
return result
def __taskFreezeCallback(self, taskId, taskObj, eType):
try:
result = self.__cbHolder.cbTaskFreeze(taskId, taskObj, eType)
except Exception:
self.__log.exception("Exception while calling taskFreeze callback")
return S_ERROR("Exception while calling taskFreeze callback")
if not isReturnStructure(result):
errMsg = "taskFreeze callback did not return a S_OK/S_ERROR structure"
self.__log.fatal(errMsg)
return S_ERROR(errMsg)
return result
def __getNextExecutor(self, taskId):
try:
eTask = self.__tasks[taskId]
except IndexError:
msg = "Task %s was deleted prematurely while being dispatched" % taskId
self.__log.error("Task was deleted prematurely while being dispatched", "%s" % taskId)
return S_ERROR(msg)
try:
result = self.__cbHolder.cbDispatch(taskId, eTask.taskObj, tuple(eTask.pathExecuted))
except Exception:
self.__log.exception("Exception while calling dispatch callback")
return S_ERROR("Exception while calling dispatch callback")
if not isReturnStructure(result):
errMsg = "Dispatch callback did not return a S_OK/S_ERROR structure"
self.__log.fatal(errMsg)
return S_ERROR(errMsg)
# Assign the next executor type to the task
if result['OK']:
eTask.eType = result['Value']
return result
def getTaskIds(self):
return list(self.__tasks)
def getExecutorsConnected(self):
return dict(self.__execTypes)
def addTask(self, taskId, taskObj):
if not self.__addTaskIfNew(taskId, taskObj):
self.__unfreezeTasks()
return S_OK()
return self.__dispatchTask(taskId)
def removeTask(self, taskId):
try:
self.__tasks.pop(taskId)
except KeyError:
self.__log.verbose("Task %s is already removed" % taskId)
return S_OK()
self.__log.verbose("Removing task %s" % taskId)
eId = self.__states.getExecutorOfTask(taskId)
self.__queues.deleteTask(taskId)
self.__states.removeTask(taskId)
self.__freezerLock.acquire()
try:
try:
self.__taskFreezer.pop(self.__taskFreezer.index(taskId))
except KeyError:
pass
except ValueError:
pass
finally:
self.__freezerLock.release()
if eId:
# Send task to executor if idle
return self.__sendTaskToExecutor(eId, checkIdle=True)
return S_OK()
def __taskReceived(self, taskId, eId):
try:
eTask = self.__tasks[taskId]
except KeyError:
errMsg = "Task %s is not known" % taskId
self.__log.error("Task is not known", "%s" % taskId)
return S_ERROR(errMsg)
if not self.__states.removeTask(taskId, eId):
self.__log.info("Executor %s says it's processed task %s but it didn't have it" % (eId, taskId))
return S_OK()
if eTask.eType not in self.__idMap[eId]:
errMsg = "Executor type invalid for %s. Redoing task %s" % (eId, taskId)
self.__log.error("Executor type invalid. Redoing task", "Type %s, Task %s" % (eId, taskId))
self.removeExecutor(eId)
self.__dispatchTask(taskId)
return S_ERROR(errMsg)
if self.__monitor:
tTime = time.time() - self.__tasks[taskId].sendTime
self.__monitor.addMark("taskTime-%s" % eTask.eType, tTime)
self.__monitor.addMark("taskTime", tTime)
self.__monitor.addMark("tasks-%s" % eTask.eType, 1)
self.__monitor.addMark("tasks", 1)
return S_OK(eTask.eType)
def freezeTask(self, eId, taskId, freezeTime, taskObj=False):
result = self.__taskReceived(taskId, eId)
if not result['OK']:
return result
eType = result['Value']
# Executor didn't have the task.
if not eType:
# Fill the executor
self.__sendTaskToExecutor(eId)
return S_OK()
if not taskObj:
taskObj = self.__tasks[taskId].taskObj
result = self.__taskFreezeCallback(taskId, taskObj, eType)
if not result['OK']:
# Fill the executor
self.__sendTaskToExecutor(eId)
return result
try:
self.__tasks[taskId].taskObj = taskObj
except KeyError:
self.__log.error("Task seems to have been removed while being processed!", "%s" % taskId)
self.__sendTaskToExecutor(eId, eType)
return S_OK()
self.__freezeTask(taskId, "Freeze request by %s executor" % eType,
eType=eType, freezeTime=freezeTime)
self.__sendTaskToExecutor(eId, eType)
return S_OK()
def taskProcessed(self, eId, taskId, taskObj=False):
result = self.__taskReceived(taskId, eId)
if not result['OK']:
return result
eType = result['Value']
# Executor didn't have the task.
if not eType:
# Fill the executor
self.__sendTaskToExecutor(eId)
return S_OK()
# Call the done callback
if not taskObj:
taskObj = self.__tasks[taskId].taskObj
result = self.__taskProcessedCallback(taskId, taskObj, eType)
if not result['OK']:
# Fill the executor
self.__sendTaskToExecutor(eId)
# Remove the task
self.removeTask(taskId)
return result
# Up until here it's an executor error. From now on it can be a task error
try:
self.__tasks[taskId].taskObj = taskObj
self.__tasks[taskId].pathExecuted.append(eType)
except KeyError:
self.__log.error("Task seems to have been removed while being processed!", "%s" % taskId)
self.__sendTaskToExecutor(eId, eType)
return S_OK()
self.__log.verbose("Executor %s processed task %s" % (eId, taskId))
result = self.__dispatchTask(taskId)
self.__sendTaskToExecutor(eId, eType)
return result
def retryTask(self, eId, taskId):
if taskId not in self.__tasks:
errMsg = "Task %s is not known" % taskId
self.__log.error("Task is not known", "%s" % taskId)
return S_ERROR(errMsg)
if not self.__states.removeTask(taskId, eId):
self.__log.info("Executor %s says it's processed task %s but it didn't have it" % (eId, taskId))
self.__sendTaskToExecutor(eId)
return S_OK()
self.__log.verbose("Executor %s did NOT process task %s, retrying" % (eId, taskId))
try:
self.__tasks[taskId].retries += 1
except KeyError:
self.__log.error("Task seems to have been removed while waiting for retry!", "%s" % taskId)
return S_OK()
return self.__dispatchTask(taskId)
def __fillExecutors(self, eType, defrozeIfNeeded=True):
if defrozeIfNeeded:
self.__log.verbose("Unfreezing tasks for %s" % eType)
self.__unfreezeTasks(eType)
self.__log.verbose("Filling %s executors" % eType)
eId = self.__states.getIdleExecutor(eType)
while eId:
result = self.__sendTaskToExecutor(eId, eType)
if not result['OK']:
self.__log.error("Could not send task to executor", "%s" % result['Message'])
else:
if not result['Value']:
# No more tasks for eType
break
self.__log.verbose("Task %s was sent to %s" % (result['Value'], eId))
eId = self.__states.getIdleExecutor(eType)
self.__log.verbose("No more idle executors for %s" % eType)
def __sendTaskToExecutor(self, eId, eTypes=False, checkIdle=False):
if checkIdle and self.__states.freeSlots(eId) == 0:
return S_OK()
try:
searchTypes = list(reversed(self.__idMap[eId]))
except KeyError:
self.__log.verbose("Executor %s invalid/disconnected" % eId)
return S_ERROR("Invalid executor")
if eTypes:
if not isinstance(eTypes, (list, tuple)):
eTypes = [eTypes]
for eType in reversed(eTypes):
try:
searchTypes.remove(eType)
except ValueError:
pass
searchTypes.append(eType)
pData = self.__queues.popTask(searchTypes)
if pData is None:
self.__log.verbose("No more tasks for %s" % eTypes)
return S_OK()
taskId, eType = pData
self.__log.verbose("Sending task %s to %s=%s" % (taskId, eType, eId))
self.__states.addTask(eId, taskId)
result = self.__msgTaskToExecutor(taskId, eId, eType)
if not result['OK']:
self.__queues.pushTask(eType, taskId, ahead=True)
self.__states.removeTask(taskId)
return result
return S_OK(taskId)
def __msgTaskToExecutor(self, taskId, eId, eType):
try:
self.__tasks[taskId].sendTime = time.time()
except KeyError:
return S_ERROR("Task %s has been deleted" % taskId)
try:
result = self.__cbHolder.cbSendTask(taskId, self.__tasks[taskId].taskObj, eId, eType)
except Exception:
self.__log.exception("Exception while sending task to executor")
return S_ERROR("Exception while sending task to executor")
if isReturnStructure(result):
return result
errMsg = "Send task callback did not send back an S_OK/S_ERROR structure"
self.__log.fatal(errMsg)
return S_ERROR(errMsg)
|
somchaisomph/RPI.GPIO.TH | refs/heads/master | gadgets/th_gpio.py | 1 | import atexit
import RPi.GPIO as GPIO ## Import GPIO library
def singleton(cls):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
'''
I think TH_GPIO class is needed to be a singleton
to be sure that there is only one instance of it running
at a time.
'''
@singleton
class TH_GPIO():
def __init__(self):
GPIO.setmode(GPIO.BOARD) ## Use board pin numbering
self._gpios = {}
#to make sure every gpio pin will be cleanup at the end of program.
atexit.register(self.all_done)
def enable_pin(self,pin_number,mode='in'):
if pin_number not in self._gpios :
mode = mode.lower()
if mode == 'out' :
GPIO.setup(pin_number, GPIO.OUT)
elif mode == 'in' :
GPIO.setup(pin_number, GPIO.IN)
self._gpios[pin_number]=None
def enable_pins(self,pin_list=[],mode='in'):
mode = mode.lower()
for p in pin_list:
if p not in self._gpios :
if mode == 'out' :
GPIO.setup(p, GPIO.OUT)
elif mode == 'in':
GPIO.setup(p, GPIO.IN)
self._gpios[p]=None
def send(self,pin_number,state=True):
if pin_number in self._gpios :
GPIO.output(pin_number,state)
def read(self,pin_number):
res = None
if pin_number in self._gpios :
res = GPIO.input(pin_number)
return res
def pwm_create(self,pin_number,freq=100):
p = None
if pin_number not in self._gpios:
GPIO.setup(pin_number, GPIO.OUT)
p = SOFT_PWM(GPIO.PWM(pin_number,freq),freq)
self._gpios[pin_number]=p
return p
def disable_pin(self,pin_number):
if pin_number in self._gpios :
if self._gpios[pin_number] is not None:
# it is pwn pin, should be stop before cleanup
self._gpios[pin_number].stop()
GPIO.cleanup(pin_number)
del self._gpios[pin_number]
def disable_pins(self,pin_list):
for p in pin_list:
if self._gpios[pin_number] is not None:
self._gpios[pin_number].stop()
GPIO.cleanup(pin_number)
del self._gpios[pin_number]
def all_done(self):
if len(self._gpios) > 0 :
for p in self._gpios:
if self._gpios[p] is not None:
self._gpios[p].stop()
GPIO.cleanup(p)
self._gpios.clear()
class SOFT_PWM():
def __init__(self,pwm,freq):
self.pwm = pwm
self.freq = freq
self.pwm.start(1)
def stop(self):
self.pwm.stop()
def change(self,dc):
self.pwm.ChangeDutyCycle(dc)
def set_freq(self,freq):
self.pwm.ChangeFrequency(freq)
self.freq = freq
|
PanDAWMS/panda-bigmon-lsst | refs/heads/wenaus | lsst/settings/__init__.py | 2 | """
Settings package to replace standard django project settings.py.
It separates
A) base.py: general project configuration, should not change this one
B) config.py: machine-specific environment config with sensible defaults
C) local.py: settings to override machine-specific environment config
"""
# Load base configuration for the whole application
from core.common.settings.base import *
from lsst.settings.base import *
# Load dev env config
from core.common.settings.config import *
from lsst.settings.config import *
# Load any settings for local development
try:
from core.common.settings.local import *
except ImportError:
pass
try:
from lsst.settings.local import *
except ImportError:
pass
import os
os.environ['NLS_LANG'] = 'AMERICAN_AMERICA.WE8ISO8859P1'
|
notriddle/servo | refs/heads/master | tests/wpt/web-platform-tests/webdriver/tests/element_send_keys/events.py | 26 | import pytest
from tests.support.asserts import (
assert_element_has_focus,
assert_events_equal,
assert_success,
)
from tests.support.inline import inline
from . import map_files_to_multiline_text
@pytest.fixture
def tracked_events():
return [
"blur",
"change",
"focus",
"input",
"keydown",
"keypress",
"keyup",
]
def element_send_keys(session, element, text):
return session.transport.send(
"POST", "/session/{session_id}/element/{element_id}/value".format(
session_id=session.session_id,
element_id=element.id),
{"text": text})
def test_file_upload(session, create_files, add_event_listeners, tracked_events):
expected_events = [
"input",
"change",
]
files = create_files(["foo", "bar"])
session.url = inline("<input type=file multiple>")
element = session.find.css("input", all=False)
add_event_listeners(element, tracked_events)
response = element_send_keys(session, element, map_files_to_multiline_text(files))
assert_success(response)
assert_events_equal(session, expected_events)
@pytest.mark.parametrize("tag", ["input", "textarea"])
def test_form_control_send_text(session, add_event_listeners, tracked_events, tag):
expected_events = [
"focus",
"keydown",
"keypress",
"input",
"keyup",
"keydown",
"keypress",
"input",
"keyup",
"keydown",
"keypress",
"input",
"keyup",
]
session.url = inline("<%s>" % tag)
element = session.find.css(tag, all=False)
add_event_listeners(element, tracked_events)
response = element_send_keys(session, element, "foo")
assert_success(response)
assert_events_equal(session, expected_events)
@pytest.mark.parametrize("tag", ["input", "textarea"])
def test_not_blurred(session, tag):
session.url = inline("<%s>" % tag)
element = session.find.css(tag, all=False)
response = element_send_keys(session, element, "")
assert_success(response)
assert_element_has_focus(element)
|
cheery/essence | refs/heads/master | richtext.py | 1 | from argon import rgba
from box import Box
blackish = rgba(0x00, 0x00, 0x00, 0x40)
whiteish = rgba(0xf6, 0xf3, 0xe8)
blueish = rgba(0x00, 0xf3, 0xe8, 0x80)
def clamp(low, high, value):
return min(high, max(low, value))
class Segment(object):
def __init__(self, width, text):
self.width = width
self.text = text
def line_break(width, words, space_width):
table = [(0.0, 0)]
for stop in range(1, len(words)+1):
start = stop - 1
c = words[start].width
best = (width-c)**2.0 + table[start][0], start
start -= 1
while start >= 0 and c <= width:
c += words[start].width + space_width
p = (width-c)**2.0 + table[start][0]
if p <= best[0] and c <= width:
best = p, start
start -= 1
table.append(best)
lines = []
j = len(words)
while j > 0:
_, i = table[j]
lines.append(words[i:j])
j = i
lines.reverse()
return lines
def line_offsets(lines):
yield 0
current = 0
for line in lines:
current += sum(len(word.text) for word in line) + max(0, len(line)-1) + 1
yield current
class Paragraph(Box):
def __init__(self, font, text, tags, head=0, tail=0):
self.font = font
self.text = text
self.tags = tags
self.head = head + 11
self.tail = tail + 40
Box.__init__(self)
self.width = 300
self.line_height = font.height * 1.2
self.dragging = False
def update(self):
font = self.font
space_width = font.measure(' ')[-1]
self.lines = line_break(self.width, [
Segment(font.measure(word)[-1], word)
for word in self.text.split(' ')
], space_width)
self.height = self.line_height * len(self.lines)
self.bases = list(line_offsets(self.lines))
self.offsets = font.measure(self.text)
def getline(self, offset):
for j, base in enumerate(reversed(self.bases), 1):
if base <= offset:
return len(self.bases) -j
return len(self.bases) - j
def getlox(self, offset):
line = self.getline(offset)
base = self.bases[line]
return line, self.offsets[offset] - self.offsets[base]
@property
def start(self):
return min(self.head, self.tail)
@property
def stop(self):
return max(self.head, self.tail)
def textgeometry(self, argon):
x, y = self.left, self.top + self.font.baseline
for line in self.lines:
text = ' '.join(word.text for word in line)
yield (x, y), text, whiteish, self.font
y += self.line_height
def selgeometry(self, argon, start, stop):
x0, y = self.left, self.top
x1 = x0 + self.width
l0, o0 = self.getlox(start)
l1, o1 = self.getlox(stop)
x2, x3 = x0+o0, x0+o1
if l0 == l1:
rect = x2, y + self.line_height * l0, x3-x2, self.line_height
return [(rect, blueish, argon.plain)]
elif l0+1 == l1:
rect0 = x2, y + self.line_height * l0, x1-x2, self.line_height
rect1 = x0, y + self.line_height * l1, x3-x0, self.line_height
return [
(rect0, blueish, argon.plain),
(rect1, blueish, argon.plain)
]
else:
rect0 = x2, y + self.line_height * l0, x1-x2, self.line_height
rect1 = x0, y + self.line_height * l1, x3-x0, self.line_height
rect2 = x0, y + self.line_height * (l0+1), x1-x0, self.line_height*(l1-l0-1)
return [
(rect0, blueish, argon.plain),
(rect1, blueish, argon.plain),
(rect2, blueish, argon.plain)
]
def render(self, argon):
self.update()
font = self.font
x, y = self.left, self.top + font.baseline
l, o = self.getlox(self.head)
argon.render([
(self.rect, blackish, argon.plain),
((x+o-1, self.top+self.line_height*l, 2, self.line_height), blueish, argon.plain),
] + list(self.textgeometry(argon))
+ list(self.selgeometry(argon, self.start, self.stop))
)
def pick_offset(self, (x, y)):
line = clamp(0, len(self.lines)-1, int((y - self.top) / self.line_height))
base = self.bases[line]
x = (x - self.left)
best = base, abs(x)
for i in range(base, self.bases[line+1]):
if len(self.offsets) <= i:
continue
o = abs(self.offsets[i] - self.offsets[base] - x)
if o <= best[1]:
best = i, o
return best[0]
def mousedown(self, buttons, pos):
self.head = self.tail = self.pick_offset(pos)
self.dragging = True
def mouseup(self, buttons, pos):
self.head = self.pick_offset(pos)
self.dragging = False
def mousemotion(self, pos, vel):
if self.dragging:
self.head = self.pick_offset(pos)
def replace(self, text, start, stop):
self.text = self.text[:start] + text + self.text[stop:]
def keydown(self, name, mod, text):
if name in ('backspace', 'delete') and self.start < self.stop:
self.replace(text, self.start, self.stop)
self.head = self.tail = self.start
elif name == 'backspace':
last = clamp(0, len(self.text), self.head-1)
self.replace('', last, self.head)
self.head = self.tail = last
elif name == 'delete':
nxt = clamp(0, len(self.text), self.head+1)
self.replace('', self.head, nxt)
self.head = self.tail = self.head
elif len(text) > 0:
self.replace(text, self.start, self.stop)
self.head = self.tail = self.start + len(text)
|
w1r0x/ansible | refs/heads/devel | lib/ansible/plugins/callback/default.py | 11 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'default'
def v2_runner_on_failed(self, result, ignore_errors=False):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if 'exception' in result._result:
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result._result['exception'].strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
self._display.display(msg, color=C.COLOR_ERROR)
# finally, remove the exception from the result so it's not shown every time
del result._result['exception']
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if delegated_vars:
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR)
else:
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
if result._task.ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_runner_on_ok(self, result):
self._clean_results(result._result, result._task.action)
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if result._task.action == 'include':
return
elif result._result.get('changed', False):
if delegated_vars:
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "changed: [%s]" % result._host.get_name()
color = C.COLOR_CHANGED
else:
if delegated_vars:
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "ok: [%s]" % result._host.get_name()
color = C.COLOR_OK
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
msg += " => %s" % (self._dump_results(result._result),)
self._display.display(msg, color=color)
self._handle_warnings(result._result)
def v2_runner_on_skipped(self, result):
if C.DISPLAY_SKIPPED_HOSTS:
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
msg = "skipping: [%s]" % result._host.get_name()
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR)
else:
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
def v2_playbook_on_no_hosts_matched(self):
self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
def v2_playbook_on_no_hosts_remaining(self):
self._display.banner("NO MORE HOSTS LEFT")
def v2_playbook_on_task_start(self, task, is_conditional):
self._display.banner("TASK [%s]" % task.get_name().strip())
if self._display.verbosity > 2:
path = task.get_path()
if path:
self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
def v2_playbook_on_cleanup_task_start(self, task):
self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
def v2_playbook_on_handler_task_start(self, task):
self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if not name:
msg = "PLAY"
else:
msg = "PLAY [%s]" % name
self._display.banner(msg)
def v2_on_file_diff(self, result):
if result._task.loop and 'results' in result._result:
for res in result._result['results']:
if 'diff' in res and res['diff']:
self._display.display(self._get_diff(res['diff']))
elif 'diff' in result._result and result._result['diff']:
self._display.display(self._get_diff(result._result['diff']))
def v2_playbook_item_on_ok(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if result._task.action == 'include':
return
elif result._result.get('changed', False):
if delegated_vars:
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "changed: [%s]" % result._host.get_name()
color = C.COLOR_CHANGED
else:
if delegated_vars:
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "ok: [%s]" % result._host.get_name()
color = C.COLOR_OK
msg += " => (item=%s)" % (result._result['item'],)
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=color)
def v2_playbook_item_on_failed(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if 'exception' in result._result:
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result._result['exception'].strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
self._display.display(msg, color=C.COLOR_ERROR)
# finally, remove the exception from the result so it's not shown every time
del result._result['exception']
if delegated_vars:
self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR)
else:
self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR)
self._handle_warnings(result._result)
def v2_playbook_item_on_skipped(self, result):
msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), result._result['item'])
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_include(self, included_file):
msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
color = C.COLOR_SKIP
self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_stats(self, stats):
self._display.banner("PLAY RECAP")
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t),
colorize(u'ok', t['ok'], C.COLOR_OK),
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', t['failures'], C.COLOR_ERROR)),
screen_only=True
)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t, False),
colorize(u'ok', t['ok'], None),
colorize(u'changed', t['changed'], None),
colorize(u'unreachable', t['unreachable'], None),
colorize(u'failed', t['failures'], None)),
log_only=True
)
self._display.display("", screen_only=True)
|
nikitos/npui | refs/heads/master | netprofile/netprofile/common/magic.py | 3 | #!/usr/bin/env python
'''
Python bindings for libmagic
'''
import ctypes
from ctypes import *
from ctypes.util import find_library
def _init():
"""
Loads the shared library through ctypes and returns a library
L{ctypes.CDLL} instance
"""
dll = None
lib = find_library('magic') or find_library('magic1')
if lib:
dll = ctypes.cdll.LoadLibrary(lib)
# Following shamelessly copied from python-magic
if (not dll) or (not dll._name):
import sys
lib_map = {
'darwin' : (
'/opt/local/lib/libmagic.dylib',
'/usr/local/lib/libmagic.dylib',
'/usr/local/Cellar/libmagic/5.10/lib/libmagic.dylib',
),
'win32' : (
'magic1.dll',
)
}
for libpath in lib_map.get(sys.platform, ()):
try:
dll = ctypes.cdll.LoadLibrary(libpath)
except OSError:
dll = None
return dll
_libraries = {}
_libraries['magic'] = _init()
# Flag constants for open and setflags
MAGIC_NONE = NONE = 0
MAGIC_DEBUG = DEBUG = 1
MAGIC_SYMLINK = SYMLINK = 2
MAGIC_COMPRESS = COMPRESS = 4
MAGIC_DEVICES = DEVICES = 8
MAGIC_MIME_TYPE = MIME_TYPE = 16
MAGIC_CONTINUE = CONTINUE = 32
MAGIC_CHECK = CHECK = 64
MAGIC_PRESERVE_ATIME = PRESERVE_ATIME = 128
MAGIC_RAW = RAW = 256
MAGIC_ERROR = ERROR = 512
MAGIC_MIME_ENCODING = MIME_ENCODING = 1024
MAGIC_MIME = MIME = 1040
MAGIC_APPLE = APPLE = 2048
MAGIC_NO_CHECK_COMPRESS = NO_CHECK_COMPRESS = 4096
MAGIC_NO_CHECK_TAR = NO_CHECK_TAR = 8192
MAGIC_NO_CHECK_SOFT = NO_CHECK_SOFT = 16384
MAGIC_NO_CHECK_APPTYPE = NO_CHECK_APPTYPE = 32768
MAGIC_NO_CHECK_ELF = NO_CHECK_ELF = 65536
MAGIC_NO_CHECK_TEXT = NO_CHECK_TEXT = 131072
MAGIC_NO_CHECK_CDF = NO_CHECK_CDF = 262144
MAGIC_NO_CHECK_TOKENS = NO_CHECK_TOKENS = 1048576
MAGIC_NO_CHECK_ENCODING = NO_CHECK_ENCODING = 2097152
MAGIC_NO_CHECK_BUILTIN = NO_CHECK_BUILTIN = 4173824
class magic_set(Structure):
pass
magic_set._fields_ = []
magic_t = POINTER(magic_set)
_open = _libraries['magic'].magic_open
_open.restype = magic_t
_open.argtypes = [c_int]
_close = _libraries['magic'].magic_close
_close.restype = None
_close.argtypes = [magic_t]
_file = _libraries['magic'].magic_file
_file.restype = c_char_p
_file.argtypes = [magic_t, c_char_p]
_descriptor = _libraries['magic'].magic_descriptor
_descriptor.restype = c_char_p
_descriptor.argtypes = [magic_t, c_int]
_buffer = _libraries['magic'].magic_buffer
_buffer.restype = c_char_p
_buffer.argtypes = [magic_t, c_void_p, c_size_t]
_error = _libraries['magic'].magic_error
_error.restype = c_char_p
_error.argtypes = [magic_t]
_setflags = _libraries['magic'].magic_setflags
_setflags.restype = c_int
_setflags.argtypes = [magic_t, c_int]
_load = _libraries['magic'].magic_load
_load.restype = c_int
_load.argtypes = [magic_t, c_char_p]
_compile = _libraries['magic'].magic_compile
_compile.restype = c_int
_compile.argtypes = [magic_t, c_char_p]
_check = _libraries['magic'].magic_check
_check.restype = c_int
_check.argtypes = [magic_t, c_char_p]
_list = _libraries['magic'].magic_list
_list.restype = c_int
_list.argtypes = [magic_t, c_char_p]
_errno = _libraries['magic'].magic_errno
_errno.restype = c_int
_errno.argtypes = [magic_t]
class Magic(object):
def __init__(self, ms):
self._magic_t = ms
def close(self):
"""
Closes the magic database and deallocates any resources used.
"""
_close(self._magic_t)
def file(self, filename):
"""
Returns a textual description of the contents of the argument passed
as a filename or None if an error occurred and the MAGIC_ERROR flag
is set. A call to errno() will return the numeric error code.
"""
try: # attempt python3 approach first
bi = bytes(filename, 'utf-8')
return str(_file(self._magic_t, bi), 'utf-8')
except:
return _file(self._magic_t, filename)
def descriptor(self, fd):
"""
Like the file method, but the argument is a file descriptor.
"""
return _descriptor(self._magic_t, fd)
def buffer(self, buf):
"""
Returns a textual description of the contents of the argument passed
as a buffer or None if an error occurred and the MAGIC_ERROR flag
is set. A call to errno() will return the numeric error code.
"""
try: # attempt python3 approach first
return str(_buffer(self._magic_t, buf, len(buf)), 'utf-8')
except:
return _buffer(self._magic_t, buf, len(buf))
def error(self):
"""
Returns a textual explanation of the last error or None
if there was no error.
"""
try: # attempt python3 approach first
return str(_error(self._magic_t), 'utf-8')
except:
return _error(self._magic_t)
def setflags(self, flags):
"""
Set flags on the magic object which determine how magic checking behaves;
a bitwise OR of the flags described in libmagic(3), but without the MAGIC_
prefix.
Returns -1 on systems that don't support utime(2) or utimes(2)
when PRESERVE_ATIME is set.
"""
return _setflags(self._magic_t, flags)
def load(self, filename=None):
"""
Must be called to load entries in the colon separated list of database files
passed as argument or the default database file if no argument before
any magic queries can be performed.
Returns 0 on success and -1 on failure.
"""
return _load(self._magic_t, filename)
def compile(self, dbs):
"""
Compile entries in the colon separated list of database files
passed as argument or the default database file if no argument.
Returns 0 on success and -1 on failure.
The compiled files created are named from the basename(1) of each file
argument with ".mgc" appended to it.
"""
return _compile(self._magic_t, dbs)
def check(self, dbs):
"""
Check the validity of entries in the colon separated list of
database files passed as argument or the default database file
if no argument.
Returns 0 on success and -1 on failure.
"""
return _check(self._magic_t, dbs)
def list(self, dbs):
"""
Check the validity of entries in the colon separated list of
database files passed as argument or the default database file
if no argument.
Returns 0 on success and -1 on failure.
"""
return _list(self._magic_t, dbs)
def errno(self):
"""
Returns a numeric error code. If return value is 0, an internal
magic error occurred. If return value is non-zero, the value is
an OS error code. Use the errno module or os.strerror() can be used
to provide detailed error information.
"""
return _errno(self._magic_t)
def open(flags):
"""
Returns a magic object on success and None on failure.
Flags argument as for setflags.
"""
return Magic(_open(flags))
|
spiceweasel/studyit-flashcard | refs/heads/master | book/models.py | 1 | # studyit-flashcard is a django based flashcard system designed for studying on the go.
# Copyright (C) 2013 Scott Jacovidis
#
# This file is part of studyit-flashcard.
#
# Studyit-flashcard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Studyit-flashcard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with studyit-flashcard. If not, see <http://www.gnu.org/licenses/gpl-2.0.html>.
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class BookCategory(models.Model):
parent = models.ForeignKey('BookCategory', null=True, blank=True)
name = models.CharField(max_length=30)
_book_count = None
def __unicode__(self): return self.full_name()
def full_name(self):
if self.parent: return self.parent.full_name() + '>' + self.name
return self.name
def book_count(self):
if self._book_count == None: self._book_count = self.book_set.all().count()
return self._book_count
def children(self): return BookCategory.objects.filter(parent=self.id)
class Book(models.Model):
isbn = models.CharField(primary_key=True, max_length=13)
title = models.CharField(max_length=100, db_index=True)
edition = models.CharField(max_length=25)
authors = models.CharField(max_length=100, db_index=True)
publisher = models.CharField(max_length=100, db_index=True)
publication_date = models.DateField()
category = models.ManyToManyField(BookCategory)
def __unicode__(self):
return self.title
class Chapter(models.Model):
book = models.ForeignKey(Book)
number = models.SmallIntegerField()
title = models.CharField(max_length=100, db_index=True)
def __unicode__(self):
return self.title
class Meta:
ordering = ('number',)
class Section(models.Model):
chapter = models.ForeignKey(Chapter)
number = models.SmallIntegerField()
title = models.CharField(max_length=125, blank=True, default="")
page = models.IntegerField(blank=True, default=None, null=True)
def __unicode__(self):
return "%i" % self.number
class Meta:
ordering = ('number',)
|
grlee77/nipype | refs/heads/master | nipype/interfaces/slicer/filtering/arithmetic.py | 15 | # -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class MultiplyScalarVolumesInputSpec(CommandLineInputSpec):
inputVolume1 = File(position=-3, desc="Input volume 1", exists=True, argstr="%s")
inputVolume2 = File(position=-2, desc="Input volume 2", exists=True, argstr="%s")
outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Volume1 * Volume2", argstr="%s")
order = traits.Enum("0", "1", "2", "3", desc="Interpolation order if two images are in different coordinate frames or have different sampling.", argstr="--order %s")
class MultiplyScalarVolumesOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Volume1 * Volume2", exists=True)
class MultiplyScalarVolumes(SEMLikeCommandLine):
"""title: Multiply Scalar Volumes
category: Filtering.Arithmetic
description: Multiplies two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions.
version: 0.1.0.$Revision: 8595 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Multiply
contributor: Bill Lorensen (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = MultiplyScalarVolumesInputSpec
output_spec = MultiplyScalarVolumesOutputSpec
_cmd = "MultiplyScalarVolumes "
_outputs_filenames = {'outputVolume':'outputVolume.nii'}
class MaskScalarVolumeInputSpec(CommandLineInputSpec):
InputVolume = File(position=-3, desc="Input volume to be masked", exists=True, argstr="%s")
MaskVolume = File(position=-2, desc="Label volume containing the mask", exists=True, argstr="%s")
OutputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output volume: Input Volume masked by label value from Mask Volume", argstr="%s")
label = traits.Int(desc="Label value in the Mask Volume to use as the mask", argstr="--label %d")
replace = traits.Int(desc="Value to use for the output volume outside of the mask", argstr="--replace %d")
class MaskScalarVolumeOutputSpec(TraitedSpec):
OutputVolume = File(position=-1, desc="Output volume: Input Volume masked by label value from Mask Volume", exists=True)
class MaskScalarVolume(SEMLikeCommandLine):
"""title: Mask Scalar Volume
category: Filtering.Arithmetic
description: Masks two images. The output image is set to 0 everywhere except where the chosen label from the mask volume is present, at which point it will retain it's original values. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions.
version: 0.1.0.$Revision: 8595 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Mask
contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = MaskScalarVolumeInputSpec
output_spec = MaskScalarVolumeOutputSpec
_cmd = "MaskScalarVolume "
_outputs_filenames = {'OutputVolume':'OutputVolume.nii'}
class SubtractScalarVolumesInputSpec(CommandLineInputSpec):
inputVolume1 = File(position=-3, desc="Input volume 1", exists=True, argstr="%s")
inputVolume2 = File(position=-2, desc="Input volume 2", exists=True, argstr="%s")
outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Volume1 - Volume2", argstr="%s")
order = traits.Enum("0", "1", "2", "3", desc="Interpolation order if two images are in different coordinate frames or have different sampling.", argstr="--order %s")
class SubtractScalarVolumesOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Volume1 - Volume2", exists=True)
class SubtractScalarVolumes(SEMLikeCommandLine):
"""title: Subtract Scalar Volumes
category: Filtering.Arithmetic
description: Subtracts two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions.
version: 0.1.0.$Revision: 19608 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Subtract
contributor: Bill Lorensen (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = SubtractScalarVolumesInputSpec
output_spec = SubtractScalarVolumesOutputSpec
_cmd = "SubtractScalarVolumes "
_outputs_filenames = {'outputVolume':'outputVolume.nii'}
class AddScalarVolumesInputSpec(CommandLineInputSpec):
inputVolume1 = File(position=-3, desc="Input volume 1", exists=True, argstr="%s")
inputVolume2 = File(position=-2, desc="Input volume 2", exists=True, argstr="%s")
outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Volume1 + Volume2", argstr="%s")
order = traits.Enum("0", "1", "2", "3", desc="Interpolation order if two images are in different coordinate frames or have different sampling.", argstr="--order %s")
class AddScalarVolumesOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Volume1 + Volume2", exists=True)
class AddScalarVolumes(SEMLikeCommandLine):
"""title: Add Scalar Volumes
category: Filtering.Arithmetic
description: Adds two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions.
version: 0.1.0.$Revision: 19608 $(alpha)
documentation-url: http://slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Add
contributor: Bill Lorensen (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = AddScalarVolumesInputSpec
output_spec = AddScalarVolumesOutputSpec
_cmd = "AddScalarVolumes "
_outputs_filenames = {'outputVolume':'outputVolume.nii'}
class CastScalarVolumeInputSpec(CommandLineInputSpec):
InputVolume = File(position=-2, desc="Input volume, the volume to cast.", exists=True, argstr="%s")
OutputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output volume, cast to the new type.", argstr="%s")
type = traits.Enum("Char", "UnsignedChar", "Short", "UnsignedShort", "Int", "UnsignedInt", "Float", "Double", desc="Type for the new output volume.", argstr="--type %s")
class CastScalarVolumeOutputSpec(TraitedSpec):
OutputVolume = File(position=-1, desc="Output volume, cast to the new type.", exists=True)
class CastScalarVolume(SEMLikeCommandLine):
"""title: Cast Scalar Volume
category: Filtering.Arithmetic
description: Cast a volume to a given data type.
Use at your own risk when casting an input volume into a lower precision type!
Allows casting to the same type as the input volume.
version: 0.1.0.$Revision: 2104 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Cast
contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = CastScalarVolumeInputSpec
output_spec = CastScalarVolumeOutputSpec
_cmd = "CastScalarVolume "
_outputs_filenames = {'OutputVolume':'OutputVolume.nii'}
|
CERNDocumentServer/invenio | refs/heads/prod | modules/bibformat/lib/elements/bfe_sword_push.py | 1 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Print link to push an entry to a remote server through SWORD
"""
__revision__ = "$Id$"
from invenio.config import CFG_BASE_URL
from invenio.urlutils import create_html_link
from invenio.access_control_engine import acc_authorize_action
def format_element(bfo, remote_server_id, link_label="Push via Sword"):
"""
Print link to push an entry to a remote server through SWORD
@param remote_server_id: ID of the remove server to link to. When
not specified, link to BibSword page
allowing to select server.
"""
user_info = bfo.user_info
auth_code, auth_message = acc_authorize_action(user_info, 'runbibswordclient')
if auth_code != 0:
return ""
sword_arguments = {'ln': bfo.lang,
'record_id': bfo.recID}
if remote_server_id:
sword_arguments['server_id'] = remote_server_id
return create_html_link(CFG_BASE_URL + '/sword_client/submit',
sword_arguments,
link_label)
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
smartforceplus/SmartForceplus | refs/heads/master | addons/website_forum/controllers/__init__.py | 4497 | # -*- coding: utf-8 -*-
import main
|
mattiaslinnap/django-partial-index | refs/heads/master | partial_index/mixins.py | 1 | from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.db.models import Q
from .index import PartialIndex
from . import query
class PartialUniqueValidationError(ValidationError):
pass
class ValidatePartialUniqueMixin(object):
"""PartialIndex with unique=True validation to ModelForms and Django Rest Framework Serializers.
Mixin should be added before the parent model class, for example:
class MyModel(ValidatePartialUniqueMixin, models.Model):
...
indexes = [
PartialIndex(...)
]
The mixin is usable only for PartialIndexes with a Q-object where-condition. If applied to a model
with a text-based where-condition, an error is raised.
Important Note:
Django's standard ModelForm validation for unique constraints is sub-optimal. If a field belonging to the
unique index is not present on the form, then it the constraint is not validated. This requires adding
hidden fields on the form, checking them against tampering, etc.
ValidatePartialUniqueMixin does not follow that example:
It always validates with all fields, even if they are not on the form.
"""
def validate_unique(self, exclude=None):
# Standard unique validation first.
super(ValidatePartialUniqueMixin, self).validate_unique(exclude=exclude)
self.validate_partial_unique()
def validate_partial_unique(self):
"""Check partial unique constraints on the model and raise ValidationError if any failed.
We want to check if another instance already exists with the fields mentioned in idx.fields, but only if idx.where matches.
But can't just check for the fields in idx.fields - idx.where may refer to other fields on the current (or other) models.
Also can't check for all fields on the current model - should not include irrelevant fields which may hide duplicates.
To find potential conflicts, we need to build a queryset which:
1. Filters by idx.fields with their current values on this instance,
2. Filters on idx.where
3. Filters by fields mentioned in idx.where, with their current values on this instance,
4. Excludes current object if it does not match the where condition.
Note that step 2 ensures the lookup only looks for conflicts among rows covered by the PartialIndes,
and steps 2+3 ensures that the QuerySet is empty if the PartialIndex does not cover the current object.
"""
# Find PartialIndexes with unique=True defined on model.
unique_idxs = [idx for idx in self._meta.indexes if isinstance(idx, PartialIndex) and idx.unique]
if unique_idxs:
model_fields = set(f.name for f in self._meta.get_fields(include_parents=True, include_hidden=True))
for idx in unique_idxs:
where = idx.where
if not isinstance(where, Q):
raise ImproperlyConfigured(
'ValidatePartialUniqueMixin is not supported for PartialIndexes with a text-based where condition. ' +
'Please upgrade to Q-object based where conditions.'
)
mentioned_fields = set(idx.fields) | set(query.q_mentioned_fields(where, self.__class__))
missing_fields = mentioned_fields - model_fields
if missing_fields:
raise RuntimeError('Unable to use ValidatePartialUniqueMixin: expecting to find fields %s on model. ' +
'This is a bug in the PartialIndex definition or the django-partial-index library itself.')
values = {field_name: getattr(self, field_name) for field_name in mentioned_fields}
conflict = self.__class__.objects.filter(**values) # Step 1 and 3
conflict = conflict.filter(where) # Step 2
if self.pk:
conflict = conflict.exclude(pk=self.pk) # Step 4
if conflict.exists():
raise PartialUniqueValidationError('%s with the same values for %s already exists.' % (
self.__class__.__name__,
', '.join(sorted(idx.fields)),
))
|
KangaCoders/titanium_mobile | refs/heads/master | support/common/markdown/preprocessors.py | 112 |
"""
PRE-PROCESSORS
=============================================================================
Preprocessors work on source text before we start doing anything too
complicated.
"""
import re
import markdown
HTML_PLACEHOLDER_PREFIX = markdown.STX+"wzxhzdk:"
HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%d" + markdown.ETX
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Preprocessor (Processor):
"""
Preprocessors are run after the text is broken into lines.
Each preprocessor implements a "run" method that takes a pointer to a
list of lines of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new list.
Preprocessors must extend markdown.Preprocessor.
"""
def run(self, lines):
"""
Each subclass of Preprocessor should override the `run` method, which
takes the document as a list of strings split by newlines and returns
the (possibly modified) list of lines.
"""
pass
class HtmlStash:
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__ (self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
* safe: label an html segment as safe for safemode
Returns : a placeholder string
"""
self.rawHtmlBlocks.append((html, safe))
placeholder = HTML_PLACEHOLDER % self.html_counter
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
class HtmlBlockPreprocessor(Preprocessor):
"""Remove html blocks from the text and store them for later retrieval."""
right_tag_patterns = ["</%s>", "%s>"]
def _get_left_tag(self, block):
return block[1:].replace(">", " ", 1).split()[0].lower()
def _get_right_tag(self, left_tag, block):
for p in self.right_tag_patterns:
tag = p % left_tag
i = block.rfind(tag)
if i > 2:
return tag.lstrip("<").rstrip(">"), i + len(p)-2 + len(left_tag)
return block.rstrip()[-len(left_tag)-2:-1].lower(), len(block)
def _equal_tags(self, left_tag, right_tag):
if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
return True
if ("/" + left_tag) == right_tag:
return True
if (right_tag == "--" and left_tag == "--"):
return True
elif left_tag == right_tag[1:] \
and right_tag[0] != "<":
return True
else:
return False
def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/'])
def run(self, lines):
text = "\n".join(lines)
new_blocks = []
text = text.split("\n\n")
items = []
left_tag = ''
right_tag = ''
in_tag = False # flag
while text:
block = text[0]
if block.startswith("\n"):
block = block[1:]
text = text[1:]
if block.startswith("\n"):
block = block[1:]
if not in_tag:
if block.startswith("<"):
left_tag = self._get_left_tag(block)
right_tag, data_index = self._get_right_tag(left_tag, block)
if block[1] == "!":
# is a comment block
left_tag = "--"
right_tag, data_index = self._get_right_tag(left_tag, block)
# keep checking conditions below and maybe just append
if data_index < len(block) \
and markdown.isBlockLevel(left_tag):
text.insert(0, block[data_index:])
block = block[:data_index]
if not (markdown.isBlockLevel(left_tag) \
or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block)
continue
if self._is_oneliner(left_tag):
new_blocks.append(block.strip())
continue
if block.rstrip().endswith(">") \
and self._equal_tags(left_tag, right_tag):
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
else: #if not block[1] == "!":
# if is block level tag and is not complete
if markdown.isBlockLevel(left_tag) or left_tag == "--" \
and not block.rstrip().endswith(">"):
items.append(block.strip())
in_tag = True
else:
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
new_blocks.append(block)
else:
items.append(block.strip())
right_tag, data_index = self._get_right_tag(left_tag, block)
if self._equal_tags(left_tag, right_tag):
# if find closing tag
in_tag = False
new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items)))
items = []
if items:
new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items)))
new_blocks.append('\n')
new_text = "\n\n".join(new_blocks)
return new_text.split("\n")
class ReferencePreprocessor(Preprocessor):
""" Remove reference definitions from text and store for later use. """
RE = re.compile(r'^(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)$', re.DOTALL)
def run (self, lines):
new_text = [];
for line in lines:
m = self.RE.match(line)
if m:
id = m.group(2).strip().lower()
t = m.group(4).strip() # potential title
if not t:
self.markdown.references[id] = (m.group(3), t)
elif (len(t) >= 2
and (t[0] == t[-1] == "\""
or t[0] == t[-1] == "\'"
or (t[0] == "(" and t[-1] == ")") ) ):
self.markdown.references[id] = (m.group(3), t[1:-1])
else:
new_text.append(line)
else:
new_text.append(line)
return new_text #+ "\n"
|
kdwink/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/db/backends/mysql/creation.py | 311 | from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
suffix = []
if self.connection.settings_dict['TEST_CHARSET']:
suffix.append('CHARACTER SET %s' % self.connection.settings_dict['TEST_CHARSET'])
if self.connection.settings_dict['TEST_COLLATION']:
suffix.append('COLLATE %s' % self.connection.settings_dict['TEST_COLLATION'])
return ' '.join(suffix)
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"All inline references are pending under MySQL"
return [], True
def sql_for_inline_many_to_many_references(self, model, field, style):
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL')),
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL'))
]
deferred = [
(field.m2m_db_table(), field.m2m_column_name(), opts.db_table,
opts.pk.column),
(field.m2m_db_table(), field.m2m_reverse_name(),
field.rel.to._meta.db_table, field.rel.to._meta.pk.column)
]
return table_output, deferred
|
libracore/erpnext | refs/heads/v12 | erpnext/patches/v4_2/reset_bom_costs.py | 120 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc('manufacturing', 'doctype', 'bom_operation')
for d in frappe.db.sql("""select name from `tabBOM` where docstatus < 2""", as_dict=1):
try:
bom = frappe.get_doc('BOM', d.name)
bom.flags.ignore_validate_update_after_submit = True
bom.calculate_cost()
bom.save()
frappe.db.commit()
except:
frappe.db.rollback()
|
nicholedwight/nichole-theme | refs/heads/master | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/styles/trac.py | 364 | # -*- coding: utf-8 -*-
"""
pygments.styles.trac
~~~~~~~~~~~~~~~~~~~~
Port of the default trac highlighter design.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class TracStyle(Style):
"""
Port of the default trac highlighter design.
"""
default_style = ''
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #999988',
Comment.Preproc: 'bold noitalic #999999',
Comment.Special: 'bold #999999',
Operator: 'bold',
String: '#bb8844',
String.Regex: '#808000',
Number: '#009999',
Keyword: 'bold',
Keyword.Type: '#445588',
Name.Builtin: '#999999',
Name.Function: 'bold #990000',
Name.Class: 'bold #445588',
Name.Exception: 'bold #990000',
Name.Namespace: '#555555',
Name.Variable: '#008080',
Name.Constant: '#008080',
Name.Tag: '#000080',
Name.Attribute: '#008080',
Name.Entity: '#800080',
Generic.Heading: '#999999',
Generic.Subheading: '#aaaaaa',
Generic.Deleted: 'bg:#ffdddd #000000',
Generic.Inserted: 'bg:#ddffdd #000000',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
|
jonasmcarson/cmsplugin-iframe | refs/heads/master | tests/settings_18.py | 2 | # This settings module is for Django 1.8 or higher
from settings_17 import * # NOQA
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
],
}
}]
|
geekaia/edx-platform | refs/heads/master | common/djangoapps/cache_toolbox/model.py | 239 | """
Caching model instances
-----------------------
``cache_model`` adds utility methods to a model to obtain ``ForeignKey``
instances via the cache.
Usage
~~~~~
::
from django.db import models
from django.contrib.auth.models import User
class Foo(models.Model):
name = models.CharField(length=20)
cache_model(Foo)
::
>>> a = Foo.objects.create(name='a')
>>> a
<Foo: >
>>> Foo.get_cached(a.pk) # Cache miss
<Foo: >
>>> a = Foo.get_cached(a.pk) # Cache hit
>>> a.name
u'a'
Instances returned from ``get_cached`` are real model instances::
>>> a = Foo.get_cached(a.pk) # Cache hit
>>> type(a)
<class '__main__.models.A'>
>>> a.pk
1L
Invalidation
~~~~~~~~~~~~
Invalidation is performed automatically upon saving or deleting a ``Foo``
instance::
>>> a = Foo.objects.create(name='a')
>>> a.name = 'b'
>>> a.save()
>>> a = Foo.get_cached(a.pk)
>>> a.name
u'b'
>>> a.delete()
>>> a = Foo.get_cached(a.pk)
... Foo.DoesNotExist
"""
from django.db.models.signals import post_save, post_delete
from .core import get_instance, delete_instance
def cache_model(model, timeout=None):
if hasattr(model, 'get_cached'):
# Already patched
return
def clear_cache(sender, instance, *args, **kwargs):
delete_instance(sender, instance)
post_save.connect(clear_cache, sender=model, weak=False)
post_delete.connect(clear_cache, sender=model, weak=False)
@classmethod
def get(cls, pk, using=None):
if pk is None:
return None
return get_instance(cls, pk, timeout, using)
model.get_cached = get
|
nschloe/seacas | refs/heads/master | cmake/tribits/ci_support/CheckinTest.py | 1 | # @HEADER
# ************************************************************************
#
# TriBITS: Tribal Build, Integrate, and Test System
# Copyright 2013 Sandia Corporation
#
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Corporation nor the names of the
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ************************************************************************
# @HEADER
#
# ToDo:
#
# (*) Create a TaskStatus class and use it to simplify the logic replacing
# the simple bools.
#
#
# General scripting support
#
# NOTE: Included first to check the version of python!
#
from __future__ import print_function
import sys
import os
import time
import pprint
import re
checkinTestBasePath = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
sys.path = [checkinTestBasePath+"/../python_utils"] + sys.path
from GeneralScriptSupport import *
from CheckinTestConstants import *
from TribitsDependencies import getProjectDependenciesFromXmlFile
from TribitsDependencies import getDefaultDepsXmlInFile
from TribitsPackageFilePathUtils import *
import gitdist
pp = pprint.PrettyPrinter(indent=4)
# Load some default dependencies for some unit tests
projectDependenciesCache = None
def getDefaultProjectDependenices():
return projectDependenciesCache
def getGitRepoDir(srcDir, gitRepoName):
if gitRepoName:
return srcDir+"/"+gitRepoName
return srcDir
def getGitRepoFileExt(gitRepoName):
if gitRepoName:
return "."+gitRepoName
return ""
def getCommonConfigFileName():
return "COMMON.config"
def getProjectDependenciesXmlFileName(projectName):
return projectName+"PackageDependencies.xml"
def getProjectDependenciesXmlGenerateOutputFileName(projectName):
return projectName+"PackageDependencies.generate.out"
def getProjectExtraReposPythonOutFile(projectName):
return projectName+"ExtraRepos.py"
def getTribitsGetExtraReposForCheckinTestOututFile(projectName):
return projectName+"ExtraRepos.generate.out"
def getBuildSpecificConfigFileName(buildTestCaseName):
return buildTestCaseName+".config"
def getInitialPullOutputFileName(gitRepoName):
return "pullInitial"+getGitRepoFileExt(gitRepoName)+".out"
def getInitialExtraPullOutputFileName(gitRepoName):
return "pullInitialExtra"+getGitRepoFileExt(gitRepoName)+".out"
def getInitialPullSuccessFileName():
return "pullInitial.success"
def getModifiedFilesOutputFileName(gitRepoName):
return "modifiedFiles"+getGitRepoFileExt(gitRepoName)+".out"
def getFinalPullOutputFileName(gitRepoName):
return "pullFinal"+getGitRepoFileExt(gitRepoName)+".out"
def getConfigureOutputFileName():
return "configure.out"
def getConfigureSuccessFileName():
return "configure.success"
def getBuildOutputFileName():
return "make.out"
def getBuildSuccessFileName():
return "make.success"
def getTestOutputFileName():
return "ctest.out"
def getTestSuccessFileName():
return "ctest.success"
def getEmailBodyFileName():
return "email.out"
def getEmailSuccessFileName():
return "email.success"
def getFinalCommitBodyFileName(gitRepoName):
return "commitFinalBody"+getGitRepoFileExt(gitRepoName)+".out"
def getFinalCommitOutputFileName(gitRepoName):
return "commitFinal"+getGitRepoFileExt(gitRepoName)+".out"
def getCommitStatusEmailBodyFileName():
return "commitStatusEmailBody.out"
def getPushOutputFileName(gitRepoName):
return "push"+getGitRepoFileExt(gitRepoName)+".out"
def getExtraCommandOutputFileName():
return "extraCommand.out"
def getHostname():
return getCmndOutput("hostname", True)
def getEmailAddressesSpaceString(emailAddressesCommasStr):
emailAddressesList = emailAddressesCommasStr.split(',')
return ' '.join(emailAddressesList)
def performAnyBuildTestActions(inOptions):
if inOptions.doConfigure or inOptions.doBuild \
or inOptions.doTest or inOptions.doAll or inOptions.localDoAll \
:
return True
return False
def performAnyActions(inOptions):
if performAnyBuildTestActions(inOptions) or inOptions.doPull:
return True
return False
def doGenerateOutputFiles(inOptions):
return performAnyActions(inOptions)
def doRemoveOutputFiles(inOptions):
return performAnyActions(inOptions)
def assertAndSetupGit(inOptions):
gitWhich = getCmndOutput("which git", True, False)
if gitWhich == "" or re.match(".+no git.+", gitWhich):
msg = "Error, the 'git' command is not in your path! (" + gitWhich + ")"
print(msg)
raise Exception(msg)
else:
setattr(inOptions, "git", "git")
def assertGitRepoExists(inOptions, gitRepo):
gitRepoDir = getGitRepoDir(inOptions.srcDir, gitRepo.repoDir)
if not os.path.os.path.exists(gitRepoDir):
raise Exception("Error, the specified git repo '"+gitRepo.repoName+"' directory"
" '"+gitRepoDir+"' does not exist!")
def assertPackageNames(optionName, packagesListStr):
if not packagesListStr:
return
for packageName in packagesListStr.split(','):
if getDefaultProjectDependenices().packageNameToID(packageName) == -1:
validPackagesListStr = ""
for i in range(getDefaultProjectDependenices().numPackages()):
if validPackagesListStr != "":
validPackagesListStr += ", "
validPackagesListStr += getDefaultProjectDependenices().getPackageByID(i).packageName
raise Exception("Error, invalid package name "+packageName+" in " \
+optionName+"="+packagesListStr \
+". The valid package names include: "+validPackagesListStr)
def assertExtraBuildConfigFiles(extraBuilds):
if not extraBuilds:
return
for extraBuild in extraBuilds.split(','):
extraBuildConfigFile = extraBuild+".config"
if not os.path.exists(extraBuildConfigFile):
raise Exception("Error, the extra build configuration file " \
+extraBuildConfigFile+" does not exit!")
class GitdistOptions:
def __init__(self, useGit):
self.useGit = useGit
# Create a matching version of gitdist.getCmndOutout
def getCmndOutputForGitDist(cmnd, rtnCode=False):
return getCmndOutput(cmnd, rtnCode=rtnCode, throwOnError=False)
def getRepoStats(inOptions, gitRepo_inout):
gitRepoDir = getGitRepoDir(inOptions.srcDir, gitRepo_inout.repoDir)
gitdistOptions = GitdistOptions(inOptions.git)
pwd = os.getcwd()
try:
os.chdir(gitRepoDir)
gitRepo_inout.gitRepoStats = \
gitdist.getRepoStats(gitdistOptions, getCmndOutputForGitDist)
finally:
os.chdir(pwd)
def getReposStats(inOptions, tribitsGitRepos):
hasChangesToPush = False
repoStatTable = gitdist.RepoStatTable()
repoIdx = 0
for gitRepo in tribitsGitRepos.gitRepoList():
getRepoStats(inOptions, gitRepo)
if gitRepo.gitRepoStats.numCommitsInt() > 0:
hasChangesToPush = True
repoStatTableDirName = getRepoStatTableDirName(inOptions, gitRepo.repoDir)
repoStatTable.insertRepoStat(repoStatTableDirName, gitRepo.gitRepoStats, repoIdx)
repoIdx += 1
print(gitdist.createAsciiTable(repoStatTable.getTableData()))
return hasChangesToPush
# NOTE: Above, we could just call 'gitdist dist-repo-status' but by
# printing the table here with the actualy gitRepoStat data, we ensure
# that it gets collected correctly and that the selection of repos is
# exactly the same.
def assertRepoHasBranchAndTrackingBranch(inOptions, gitRepo):
repoName = gitRepo.repoName
if repoName == "":
repoNameEntry = "base repo"
else:
repoNameEntry = "repo '"+repoName+"'"
gitRepoStats = gitRepo.gitRepoStats
if gitRepoStats.branch == "HEAD":
raise Exception("Error, the "+repoNameEntry+" is in a detached head state which" \
" is not allowed in this case!")
if gitRepoStats.trackingBranch == "":
raise Exception("Error, the "+repoNameEntry+" is not on a tracking branch which" \
" is not allowed in this case!")
def pushToTrackingBranchArgs(gitRepo):
(repo, trackingbranch) = gitRepo.gitRepoStats.trackingBranch.split("/")
return repo+" "+gitRepo.gitRepoStats.branch+":"+trackingbranch
def didSinglePullBringChanges(pullOutFileFullPath):
pullOutFileStr = readStrFromFile(pullOutFileFullPath)
#print("\npullOutFileStr:\n" + pullOutFileStr)
alreadyUpToDateIdx = pullOutFileStr.find("Already up-to-date")
#print("alreadyUpToDateIdx = "+str(alreadyUpToDateIdx))
return alreadyUpToDateIdx == -1
def executePull(gitRepo, inOptions, baseTestDir, outFile, pullFromRepo=None,
doRebase=False)\
:
cmnd = inOptions.git+" pull"
if pullFromRepo:
repoSpaceBranch = pullFromRepo.remoteRepo+" "+pullFromRepo.remoteBranch
print("\nPulling in updates to local repo '" + gitRepo.repoName + "'" +
" from '" + repoSpaceBranch + "' ...\n")
cmnd += " " + repoSpaceBranch
else:
print("\nPulling in updates from '" + gitRepo.gitRepoStats.trackingBranch +
"' ...")
# NOTE: If you do 'git pull <remote> <branch>', then the list of locally
# modified files will be wrong. I don't know why this is but if instead
# you do a raw 'git pull', then the right list of files shows up.
if doRebase:
cmnd += " && "+inOptions.git+" rebase "+gitRepo.gitRepoStats.trackingBranch
outFileFullPath = os.path.join(baseTestDir, outFile)
(pullRtn, pullTimings) = echoRunSysCmnd( cmnd,
workingDir=getGitRepoDir(inOptions.srcDir, gitRepo.repoDir),
outFile=outFileFullPath,
timeCmnd=True, returnTimeCmnd=True, throwExcept=False
)
if pullRtn == 0:
pullGotChanges = didSinglePullBringChanges(outFileFullPath)
if pullGotChanges:
print("\n ==> '" + gitRepo.repoName + "': Pulled changes from this repo!")
else:
print("\n ==> '" + gitRepo.repoName +
"': Did not pull any changes from this repo!")
else:
print("\n ==> '" + gitRepo.repoName + "': Pull failed!")
pullGotChanges = False
return (pullRtn, pullTimings, pullGotChanges)
class Timings:
def __init__(self):
self.pull = -1.0
self.configure = -1.0
self.build = -1.0
self.test = -1.0
def deepCopy(self):
copyTimings = Timings()
copyTimings.pull = self.pull
copyTimings.configure = self.configure
copyTimings.build = self.build
copyTimings.test = self.test
return copyTimings
def totalTime(self):
tt = 0.0
if self.pull > 0: tt += self.pull
if self.configure > 0: tt += self.configure
if self.build > 0: tt += self.build
if self.test > 0: tt += self.test
return tt
class GitRepo:
def __init__(self, repoName, repoDir='', repoType='GIT', repoHasPackages=True,
repoPrePost='POST' \
):
self.repoName = repoName
if repoDir:
self.repoDir = repoDir
else:
self.repoDir = repoName
self.repoType = repoType
self.repoHasPackages = repoHasPackages
self.repoPrePost = repoPrePost
self.hasChanges = False
self.gitRepoStats = None
if (self.repoName and self.repoHasPackages) and (self.repoName != self.repoDir):
raise Exception("ERROR! For extra repo '"+repoName+"', if repoHasPackages==True" \
+" then repoDir must be same as repo name, not '"+repoDir+"'!")
if self.repoType != 'GIT':
raise Exception("ERROR! For extra repo '"+repoName+"', the repo type" \
+" '"+self.repoType+"' is not supported by the checkin-test.py script, only 'GIT'!")
def __str__(self):
return "GitRepo{repoName='"+self.repoName+"'" \
+", repoDir='"+str(self.repoDir)+"'" \
+", repoType='"+str(self.repoType)+"'" \
+", repoHasPackages="+str(self.repoHasPackages) \
+", repoPrePost="+str(self.repoPrePost) \
+", hasChanges="+str(self.hasChanges) \
+"}"
def __rep__(self):
return str(self)
def getExtraReposFilePath(inOptions):
if inOptions.extraReposFile == "project":
extraReposFile = inOptions.srcDir+"/cmake/ExtraRepositoriesList.cmake"
else:
extraReposFile = inOptions.extraReposFile
return extraReposFile
def getExtraReposPyFileFromCmakeFile(inOptions, extraReposPythonOutFile, \
consoleOutputFile = None, verbose=False \
):
extraReposFile = getExtraReposFilePath(inOptions)
printConsoleOutputFile = False
if not consoleOutputFile:
# Need to send output to a file so that you can read it back in again and
# then print it out using the 'print' statement. This is needed so that
# the output shows up in both the STDOUT and the checkin-test.out log
# files!
consoleOutputFile = "TribitsGetExtraReposForCheckinTest.out"
printConsoleOutputFile = True
cmnd = "\""+inOptions.withCmake+"\""+ \
" -DSUPPRESS_PRINT_VAR_OUTPUT=TRUE" \
" -DPROJECT_SOURCE_DIR="+inOptions.srcDir+ \
" -DTRIBITS_BASE_DIR="+inOptions.tribitsDir+ \
" -DEXTRA_REPOS_FILE="+extraReposFile+ \
" -DENABLE_KNOWN_EXTERNAL_REPOS_TYPE="+inOptions.extraReposType+ \
" -DEXTRA_REPOS="+inOptions.extraRepos+ \
" -DEXTRA_REPOS_PYTHON_OUT_FILE="+extraReposPythonOutFile
if inOptions.ignoreMissingExtraRepos:
cmnd += " -DIGNORE_MISSING_EXTRA_REPOSITORIES=TRUE"
cmnd += \
" -P "+inOptions.tribitsDir+"/ci_support/TribitsGetExtraReposForCheckinTest.cmake"
try:
echoRunSysCmnd(cmnd, throwExcept=True, timeCmnd=True, outFile=consoleOutputFile, \
verbose=verbose)
finally:
if printConsoleOutputFile:
print("\n" + open(consoleOutputFile, 'r').read())
def translateExtraReposPyToDictGitRepo(extraReposPyDict):
repoName = extraReposPyDict['NAME']
repoDir = extraReposPyDict['DIR']
repoType = extraReposPyDict['REPOTYPE']
repoHasPackages = (extraReposPyDict['HASPKGS'] == 'HASPACKAGES')
repoPrePost = extraReposPyDict['PREPOST']
return GitRepo(repoName, repoDir, repoType, repoHasPackages, repoPrePost)
class TribitsGitRepos:
def __init__(self):
self.reset()
self.__insertMainRepo()
self.__initFinalize()
def initFromCommandlineArguments(self, inOptions, consoleOutputFile=None, verbose=True):
self.reset()
self.__insertMainRepo()
if inOptions.extraRepos!="" and \
(inOptions.extraReposFile=="" or inOptions.extraReposType=="") \
:
# Just use the listed set of extra repos with no checking
for extraRepoName in inOptions.extraRepos.split(","):
extraRepo = GitRepo(extraRepoName)
self.__gitRepoList.append(extraRepo)
elif inOptions.extraReposFile!="" and inOptions.extraReposType!="":
# Read in the extra repos from file and assert or ignore missing repos, etc.
extraReposPythonOutFile = getProjectExtraReposPythonOutFile(inOptions.projectName)
getExtraReposPyFileFromCmakeFile(inOptions, extraReposPythonOutFile, \
consoleOutputFile=consoleOutputFile, verbose=verbose)
extraReposPyTxt = readStrFromFile(extraReposPythonOutFile)
extraReposPyList = eval(extraReposPyTxt)
for extraRepoDict in extraReposPyList:
extraRepo = translateExtraReposPyToDictGitRepo(extraRepoDict)
self.__gitRepoList.append(extraRepo)
self.__initFinalize()
def gitRepoList(self):
return self.__gitRepoList
def tribitsPreRepoNamesList(self):
return self.__tribitsPreRepoNamesList
def numTribitsPreRepos(self):
return len(self.__tribitsPreRepoNamesList)
def tribitsExtraRepoNamesList(self):
return self.__tribitsExtraRepoNamesList
def numTribitsExtraRepos(self):
return len(self.__tribitsExtraRepoNamesList)
def tribitsAllExtraRepoNamesList(self):
return self.__tribitsAllExtraRepoNamesList
def numTribitsAllExtraRepos(self):
return len(self.__tribitsAllExtraRepoNamesList)
def __str__(self):
strRep = "{\n"
strRep += " gitRepoList = " + self.__printReposList(self.__gitRepoList)
strRep += " tribitsPreRepoNamesList = "+str(self.__tribitsPreRepoNamesList)+"\n"
strRep += " tribitsExtraRepoNamesList = "+str(self.__tribitsExtraRepoNamesList)+"\n"
strRep += " tribitsAllExtraRepoNamesList = "+str(self.__tribitsAllExtraRepoNamesList)+"\n"
strRep += " }\n"
return strRep
def reset(self):
self.__gitRepoList = []
self.__tribitsPreRepoNamesList = []
self.__tribitsExtraRepoNamesList = []
self.__tribitsAllRepoNamesList = []
return self
# Private
def __insertMainRepo(self):
mainRepo = GitRepo("")
self.__gitRepoList.append(mainRepo)
def __printReposList(self, reposList):
strRep = "[\n"
for gitRepo in reposList:
strRep += (" " + str(gitRepo) + ",\n")
strRep += " ]\n"
return strRep
def __initFinalize(self):
self.__tribitsPreRepoNamesList = []
self.__tribitsExtraRepoNamesList = []
self.__tribitsAllExtraRepoNamesList = []
for gitRepo in self.__gitRepoList:
if gitRepo.repoName and gitRepo.repoHasPackages:
self.__tribitsAllExtraRepoNamesList.append(gitRepo.repoName)
if gitRepo.repoPrePost == 'PRE':
self.__tribitsPreRepoNamesList.append(gitRepo.repoName)
else:
self.__tribitsExtraRepoNamesList.append(gitRepo.repoName)
def createAndGetProjectDependencies(inOptions, baseTestDir, tribitsGitRepos):
if tribitsGitRepos.numTribitsPreRepos() > 0:
print("\nPulling in packages from PRE extra repos: " +
','.join(tribitsGitRepos.tribitsPreRepoNamesList()) + " ...")
if tribitsGitRepos.numTribitsExtraRepos() > 0:
print("\nPulling in packages from POST extra repos: " +
','.join(tribitsGitRepos.tribitsExtraRepoNamesList()) + " ...")
for gitRepo in tribitsGitRepos.gitRepoList():
assertGitRepoExists(inOptions, gitRepo)
projectDepsXmlFile = baseTestDir+"/"\
+getProjectDependenciesXmlFileName(inOptions.projectName)
if not inOptions.skipDepsUpdate:
# There are extra repos so we need to build a new list of Project
# packages to include the add-on packages.
cmakeArgumentList = [
"cmake",
"-DPROJECT_NAME=%s" % inOptions.projectName,
cmakeScopedDefine(inOptions.projectName, "TRIBITS_DIR", inOptions.tribitsDir),
"-DPROJECT_SOURCE_DIR="+inOptions.srcDir,
cmakeScopedDefine(inOptions.projectName, "PRE_REPOSITORIES", "\""+\
';'.join(tribitsGitRepos.tribitsPreRepoNamesList())+"\""),
cmakeScopedDefine(inOptions.projectName, "EXTRA_REPOSITORIES", "\""+\
';'.join(tribitsGitRepos.tribitsExtraRepoNamesList())+"\""),
cmakeScopedDefine(inOptions.projectName, "DEPS_XML_OUTPUT_FILE", projectDepsXmlFile),
"-P %s/ci_support/TribitsDumpDepsXmlScript.cmake" % inOptions.tribitsDir,
]
cmnd = ' '.join(cmakeArgumentList)
echoRunSysCmnd(cmnd,
workingDir=baseTestDir,
outFile=baseTestDir+"/"\
+getProjectDependenciesXmlGenerateOutputFileName(inOptions.projectName),
timeCmnd=True)
else:
print("\nSkipping update of dependencies XML file on request!")
projectDepsXmlFileOverride = os.environ.get("CHECKIN_TEST_DEPS_XML_FILE_OVERRIDE")
if projectDepsXmlFileOverride:
print("\nprojectDepsXmlFileOverride=" + projectDepsXmlFileOverride)
projectDepsXmlFile = projectDepsXmlFileOverride
global projectDependenciesCache
projectDependenciesCache = getProjectDependenciesFromXmlFile(projectDepsXmlFile)
class RemoteRepoAndBranch:
def __init__(self, remoteRepo, remoteBranch):
self.remoteRepo = remoteRepo
self.remoteBranch = remoteBranch
def __str__(self):
return "RemoteRepoAndBranch{repoRepo='"+str(self.remoteRepo)+"'" \
+", remoteBranch='"+str(self.remoteBranch)+"'" \
+"}"
class RepoExtraRemotePulls:
def __init__(self, gitRepo, remoteRepoAndBranchList):
self.gitRepo = gitRepo
self.remoteRepoAndBranchList = remoteRepoAndBranchList
def getLocalRepoRemoteRepoAndBranchFromExtraPullArg(extraPullArg):
extraPullArgArray = extraPullArg.split(':')
localRepo = ""
remoteRepo = ""
remoteBranch = ""
matchesAllRepos = False
extraPullArgArray_len = len(extraPullArgArray)
if extraPullArgArray_len == 3:
localRepo = extraPullArgArray[0]
remoteRepo = extraPullArgArray[1]
remoteBranch = extraPullArgArray[2]
elif extraPullArgArray_len == 2:
remoteRepo = extraPullArgArray[0]
remoteBranch = extraPullArgArray[1]
matchesAllRepos = True
else:
raise ValueError(
"Error, the --extra-pull-from arg '"+extraPullArg+"' is not of the form" \
+ " <localreponame>:<remoterepo>:<remotebranch>!")
if remoteRepo == "":
raise ValueError(
"Error, the --extra-pull-from arg '"+extraPullArg+"' has an empty <remoterepo>" \
+ " field in <localreponame>:<remoterepo>:<remotebranch>!")
elif remoteBranch == "":
raise ValueError(
"Error, the --extra-pull-from arg '"+extraPullArg+"' has an empty <remotebranch>" \
+ " field in <localreponame>:<remoterepo>:<remotebranch>!")
return (localRepo, remoteRepo, remoteBranch, matchesAllRepos)
def matchExtraRepoLocalRepoMatchLocalRepo(repoName, extraRepoLocalRepoName):
if repoName == extraRepoLocalRepoName:
return True
elif repoName == "" and extraRepoLocalRepoName == "BASE_REPO":
return True
return False
def parseExtraPullFromArgs(gitRepoList, extraPullFromArgs):
# Initialize an empty set of extra pulls
repoExtraRemotePullsList = []
for gitRepo in gitRepoList:
repoExtraRemotePullsList.append(
RepoExtraRemotePulls(gitRepo, []))
# Parse the arguments and fill in the remote repos and branches
if extraPullFromArgs:
for extraPullFromArg in extraPullFromArgs.split(","):
(localRepo, remoteRepo, remoteBranch, matchesAllRepos) = \
getLocalRepoRemoteRepoAndBranchFromExtraPullArg(extraPullFromArg)
if matchesAllRepos:
for repoExtraRemotePulls in repoExtraRemotePullsList:
repoExtraRemotePulls.remoteRepoAndBranchList.append(
RemoteRepoAndBranch(remoteRepo, remoteBranch) )
else:
for repoExtraRemotePulls in repoExtraRemotePullsList:
if repoExtraRemotePulls.gitRepo.repoName == localRepo:
repoExtraRemotePulls.remoteRepoAndBranchList.append(
RemoteRepoAndBranch(remoteRepo, remoteBranch) )
return repoExtraRemotePullsList
class BuildTestCase:
def __init__(self, name, runBuildTestCase, validPackageTypesList,
isDefaultBuild, skipCaseIfNoChangeFromDefaultEnables,
extraCMakeOptions, buildIdx \
):
self.name = name
self.runBuildTestCase = runBuildTestCase
self.validPackageTypesList = validPackageTypesList
self.isDefaultBuild = isDefaultBuild
self.skipCaseIfNoChangeFromDefaultEnables = skipCaseIfNoChangeFromDefaultEnables
self.extraCMakeOptions = extraCMakeOptions
self.skippedConfigureDueToNoEnables = False
self.buildIdx = buildIdx
self.timings = Timings()
def setBuildTestCaseInList(buildTestCaseList_inout,
name, runBuildTestCase, validPackageTypesList, isDefaultBuild,
skipCaseIfNoChangeFromDefaultEnables, extraCMakeOptions \
):
buildTestCaseList_inout.append(
BuildTestCase(name, runBuildTestCase, validPackageTypesList, isDefaultBuild,
skipCaseIfNoChangeFromDefaultEnables, extraCMakeOptions,
len(buildTestCaseList_inout) ) )
def writeDefaultCommonConfigFile():
commonConfigFileName = getCommonConfigFileName()
if os.path.exists(commonConfigFileName):
print("\nThe file " + commonConfigFileName + " already exists!")
else:
print("\nCreating a default skeleton file " + commonConfigFileName + " ...")
commonConfigFileStr = \
"# Fill in the minimum CMake options that are needed to build and link\n" \
"# that are common to all builds such as the following:\n" \
"#\n" \
"#-DCMAKE_VERBOSE_MAKEFILE=ON\n" \
"#-DBUILD_SHARED_LIBS=ON\n" \
"#\n" \
"# NOTE: Please do not add any options here that would select what packages\n" \
"# get enabled or disabled.\n"
writeStrToFile(commonConfigFileName, commonConfigFileStr)
def writeDefaultBuildSpecificConfigFile(buildTestCaseName):
serialOrMpi = buildTestCaseName.split('_')[0]
buildSpecificConfigFileName = getBuildSpecificConfigFileName(buildTestCaseName)
if os.path.exists(buildSpecificConfigFileName):
print("\nThe file " + buildSpecificConfigFileName + " already exists!")
else:
# ToDo: Get rid fo these! These are too specific!
print("\nCreating a default skeleton file " + buildSpecificConfigFileName +
" ...")
buildSpecificConfigFileStr = \
"# Fill in the minimum CMake options that are needed to build and link\n" \
"# that are specific to the "+serialOrMpi+" build such as:\n" \
"#\n" \
"#-DBUILD_SHARED_LIBS=ON\n" \
"#\n" \
"# NOTE: Please do not add any options here that would change what packages\n" \
"# or TPLs get enabled or disabled.\n"
writeStrToFile(buildSpecificConfigFileName, buildSpecificConfigFileStr)
def assertNoIllegalEnables(projectName, fileName, cmakeOption):
reTPlEnable = re.compile(r"-DTPL_ENABLE_.+")
reProjectEnableOn = re.compile(r"-D%s_ENABLE_[a-zA-Z]+.+=ON" % projectName)
success = True
if reTPlEnable.match(cmakeOption):
print(" ERROR: Illegal TPL enable " + cmakeOption + " in " + fileName+"!")
success = False
elif reProjectEnableOn.match(cmakeOption):
print(" ERROR: Illegal enable " + cmakeOption + " in " + fileName + "!")
success = False
return success
def readAndAppendCMakeOptions(
projectName,
fileName,
cmakeOptions_inout,
assertNoIllegalEnablesBool):
success = True
if not os.path.exists(fileName):
return
print("\nAppending options from " + fileName + ":")
cmakeOptionsFile = open(fileName, 'r')
for line in cmakeOptionsFile:
if line[0] != '#':
cmakeOption = line.strip()
if cmakeOption == "": continue
print(" Appending: " + cmakeOption)
if assertNoIllegalEnablesBool:
if not assertNoIllegalEnables(projectName, fileName, cmakeOption):
success = False
cmakeOptions_inout.append(cmakeOption)
return success
reModifiedFiles = re.compile(r"^[MAD]\t(.+)$")
def getCurrentDiffOutput(gitRepo, inOptions, baseTestDir):
if gitRepo.gitRepoStats.numCommitsInt() > 0:
echoRunSysCmnd(
inOptions.git+" diff --name-status "+gitRepo.gitRepoStats.trackingBranch,
workingDir=getGitRepoDir(inOptions.srcDir, gitRepo.repoDir),
outFile=os.path.join(baseTestDir, getModifiedFilesOutputFileName(gitRepo.repoName)),
timeCmnd=True
)
def repoHasModifiedFiles(gitRepo, baseTestDir):
if gitRepo.gitRepoStats.numCommitsInt() > 0:
modifiedFilesStr = readStrFromFile(
baseTestDir+"/"+getModifiedFilesOutputFileName(gitRepo.repoName))
if modifiedFilesStr:
return True
return False
def getCurrentDiffOutputAndLogModified(inOptions, gitRepo, baseTestDir):
getCurrentDiffOutput(gitRepo, inOptions, baseTestDir)
gitRepo.hasChanges = repoHasModifiedFiles(gitRepo, baseTestDir)
if gitRepo.hasChanges:
print("\n ==> '" + gitRepo.repoName + "': Has modified files!")
else:
print("\n ==> '" + gitRepo.repoName + "': Does *not* have any modified " +
"files!")
def extractPackageEnablesFromChangeStatus(changedFileDiffOutputStr, inOptions_inout,
gitRepo, enablePackagesList_inout, verbose=True,
projectDependenciesLocal=None ) \
:
if not projectDependenciesLocal:
projectDependenciesLocal = getDefaultProjectDependenices()
modifiedFilesList = extractFilesListMatchingPattern(
changedFileDiffOutputStr.splitlines(), reModifiedFiles )
for modifiedFileFullPath in modifiedFilesList:
# Only look for global rebuild files in the master repo (not in extra repos)
if gitRepo.repoName == '' and \
isGlobalBuildFileRequiringGlobalRebuild(modifiedFileFullPath) \
:
if inOptions_inout.enableAllPackages == 'auto':
if verbose:
print("\nModifed file: '" + modifiedFileFullPath + "'\n" +
" => Enabling all " + inOptions_inout.projectName +
" packages!")
inOptions_inout.enableAllPackages = 'on'
if gitRepo.repoDir:
modifiedFileFullPath = gitRepo.repoDir+"/"+modifiedFileFullPath
#print("\nmodifiedFileFullPath =", modifiedFileFullPath)
packageName = getPackageNameFromPath(projectDependenciesLocal, modifiedFileFullPath)
if packageName and findInSequence(enablePackagesList_inout, packageName) == -1:
if verbose:
print("\nModified file: '" + modifiedFileFullPath + "'\n" +
" => Enabling '" + packageName + "'!")
enablePackagesList_inout.append(packageName)
def createConfigureFile(cmakeOptions, baseCmnd, srcDir, configFileName):
doConfigStr = ""
doConfigStr += \
baseCmnd+ " \\\n"
for opt in cmakeOptions:
doConfigStr += opt + " \\\n"
doConfigStr += \
"\"$@\""
if srcDir:
doConfigStr += " \\\n"+srcDir
doConfigStr += "\n"
writeStrToFile(configFileName, doConfigStr)
echoRunSysCmnd('chmod a+x '+configFileName)
def formatMinutesStr(timeInMinutes):
return ("%.2f" % timeInMinutes) + " min"
def getStageStatus(stageName, stageDoBool, stagePassed, stageTiming):
stageStatusStr = stageName + ": "
if stageDoBool:
if stagePassed:
stageStatusStr += "Passed"
else:
stageStatusStr += "FAILED"
stageStatusStr += " ("+formatMinutesStr(stageTiming)+")"
else:
stageStatusStr += "Not Performed"
stageStatusStr += "\n"
return stageStatusStr
def getTotalTimeBeginStr(buildTestCaseName):
return "Total time for "+buildTestCaseName
def getTotalTimeLineStr(buildTestCaseName, timeInMin):
return getTotalTimeBeginStr(buildTestCaseName)+" = "+formatMinutesStr(timeInMin)
def getTimeInMinFromTotalTimeLine(buildTestCaseName, totalTimeLine):
if not totalTimeLine:
return -1.0
m = re.match(getTotalTimeBeginStr(buildTestCaseName)+r" = (.+) min", totalTimeLine)
if m and m.groups():
return float(m.groups()[0])
else:
return -1.0
reCtestFailTotal = re.compile(r".+, ([0-9]+) tests failed out of ([0-9]+)")
def analyzeResultsSendEmail(inOptions, buildTestCase,
enabledPackagesList, cmakeOptions, startingTime, timings ) \
:
buildTestCaseName = buildTestCase.name
print("")
print("E.1) Determine what passed and failed ...")
print("")
success = False
# Determine if the pull passed
pullPassed = None
pullOutputExists = False
if inOptions.doPull:
if os.path.exists("../"+getInitialPullOutputFileName("")):
pullOutputExists = True
if os.path.exists("../"+getInitialPullSuccessFileName()):
print("\nThe pull passed!\n")
pullPassed = True
elif pullOutputExists:
print("\nThe pull FAILED!\n")
pullPassed = False
else:
print("\nThe pull was never attempted!\n")
pullPassed = False
else:
print("\nThe pull step was not performed!\n")
# Determine if the configured passed
configurePassed = None
configureOutputExists = False
if inOptions.doConfigure:
if os.path.exists(getConfigureOutputFileName()):
configureOutputExists = True
if os.path.exists(getConfigureSuccessFileName()):
print("\nThe configure passed!\n")
configurePassed = True
elif configureOutputExists:
print("\nThe configure FAILED!\n")
configurePassed = False
else:
print("\nThe configure was never attempted!\n")
configurePassed = False
else:
print("\nThe configure step was not performed!\n")
# Determine if the build passed
buildPassed = None
buildOutputExists = False
if inOptions.doBuild:
if os.path.exists(getBuildOutputFileName()):
buildOutputExists = True
if os.path.exists(getBuildSuccessFileName()):
print("\nThe build passed!\n")
buildPassed = True
elif buildOutputExists:
print("\nThe build FAILED!\n")
buildPassed = False
else:
print("\nThe build was never attempted!\n")
buildPassed = False
else:
print("\nThe build step was not performed!\n")
# Determine if the tests passed
testsPassed = None
testOutputExists = False
if inOptions.doTest:
if os.path.exists(getTestOutputFileName()):
testOutputExists = True
if not testOutputExists:
print("\nThe tests were never even run!\n")
testsPassed = False
else: # testOutputExists
testResultsLine = getCmndOutput("grep 'tests failed out of' "+getTestOutputFileName(),
True, False)
print("testResultsLine = '" + testResultsLine + "'")
reCtestFailTotalMatch = reCtestFailTotal.match(testResultsLine)
if reCtestFailTotalMatch:
numFailedTests = int(reCtestFailTotalMatch.group(1))
numTotalTests = int(reCtestFailTotalMatch.group(2))
numPassedTests = numTotalTests - numFailedTests
else:
numTotalTests = None
numPassedTests = None
testsPassed = False
if not os.path.exists(getTestSuccessFileName()):
print("\nThe tests did not run and pass!\n")
testsPassed = False
elif numTotalTests == None:
print("\nCTest was invoked but no tests were run!\n")
testsPassed = False
elif numTotalTests == numPassedTests:
print("\nAll of the tests ran passed!\n")
testsPassed = True
else:
print("\n" + str(numTotalTests-numPassedTests) + " tests failed!\n")
testsPassed = False
else:
print("\nRunning the tests was not performed!\n")
print("")
print("E.2) Construct the email message ...")
print("")
# 2.a) Construct the subject line
overallPassed = None
buildCaseStatus = ""
selectedFinalStatus = False
if inOptions.doTest and not selectedFinalStatus:
if testOutputExists:
if numTotalTests:
buildCaseStatus += "passed="+str(numPassedTests)+",notpassed="+str(numFailedTests)
else:
buildCaseStatus += "no tests run"
if testsPassed and numTotalTests > 0:
overallPassed = True
else:
overallPassed = False
selectedFinalStatus = True
elif not inOptions.doBuild and not buildOutputExists:
buildCaseStatus += "no active build exists"
overallPassed = False
selectedFinalStatus = True
if inOptions.doBuild and not selectedFinalStatus:
if buildPassed:
buildCaseStatus += "build-only passed"
overallPassed = True
selectedFinalStatus = True
elif buildOutputExists:
buildCaseStatus += "build failed"
overallPassed = False
selectedFinalStatus = True
if inOptions.doConfigure and not selectedFinalStatus:
if configurePassed:
buildCaseStatus += "configure-only passed"
overallPassed = True
selectedFinalStatus = True
elif buildTestCase.skippedConfigureDueToNoEnables:
buildCaseStatus += "skipped configure, build, test due to no enabled packages"
overallPassed = True
selectedFinalStatus = True
elif configureOutputExists:
buildCaseStatus += "configure failed"
overallPassed = False
selectedFinalStatus = True
else:
buildCaseStatus += "pre-configure failed"
overallPassed = False
selectedFinalStatus = True
if inOptions.doPull and not selectedFinalStatus:
if pullPassed:
buildCaseStatus += "pull-only passed"
overallPassed = True
selectedFinalStatus = True
elif pullOutputExists:
buildCaseStatus += "pull FAILED"
overallPassed = False
selectedFinalStatus = True
if not selectedFinalStatus:
raise Exception("Error, final pass/fail status not found!")
subjectLine = "%s/%s: %s" % (inOptions.projectName, buildTestCaseName, buildCaseStatus)
if overallPassed:
subjectLine = "passed: " + subjectLine
else:
subjectLine = "FAILED: " + subjectLine
print("\nsubjectLine = '" + subjectLine + "'\n")
success = overallPassed
# 2.b) Construct the email body
emailBody = subjectLine + "\n\n"
emailBody += getCmndOutput("date", True) + "\n\n"
emailBody += getEnableStatusList(inOptions, enabledPackagesList)
emailBody += "Hostname: " + getHostname() + "\n"
emailBody += "Source Dir: " + inOptions.srcDir + "\n"
emailBody += "Build Dir: " + os.getcwd() + "\n"
emailBody += "\nCMake Cache Varibles: " + ' '.join(cmakeOptions) + "\n"
if inOptions.extraCmakeOptions:
emailBody += "Extra CMake Options: " + inOptions.extraCmakeOptions + "\n"
if inOptions.makeOptions:
emailBody += "Make Options: " + inOptions.makeOptions + "\n"
if inOptions.ctestOptions:
emailBody += "CTest Options: " + inOptions.ctestOptions + "\n"
emailBody += "\n"
emailBody += getStageStatus("Pull", inOptions.doPull, pullPassed, timings.pull)
emailBody += getStageStatus("Configure", inOptions.doConfigure, configurePassed, timings.configure)
emailBody += getStageStatus("Build", inOptions.doBuild, buildPassed, timings.build)
emailBody += getStageStatus("Test", inOptions.doTest, testsPassed, timings.test)
emailBody += "\n"
if inOptions.doTest and testOutputExists and numTotalTests:
fullCTestOutput = readStrFromFile(getTestOutputFileName())
if inOptions.showAllTests:
emailBody += fullCTestOutput
else:
emailBody += extractLinesAfterRegex(fullCTestOutput, r".*\% tests passed.*")
else:
emailBody += "\n***\n*** WARNING: There are no test results!\n***\n\n"
endingTime = time.time()
totalTime = (endingTime - startingTime) / 60.0
emailBody += "\n"+getTotalTimeLineStr(buildTestCaseName, totalTime)+"\n"
#print("emailBody:\n\n\n\n", emailBody, "\n\n\n\n")
writeStrToFile(getEmailBodyFileName(), emailBody)
if overallPassed:
echoRunSysCmnd("touch "+getEmailSuccessFileName())
print("")
print("E.3) Send the email message ...")
print("")
if inOptions.sendEmailTo and buildTestCase.skippedConfigureDueToNoEnables \
and inOptions.abortGracefullyIfNoEnables \
:
print(buildTestCaseName + ": Skipping sending build/test case email " +
"because there were no enables and --abort-gracefully-if-no-" +
"enables was set!")
elif inOptions.sendEmailTo and inOptions.sendBuildCaseEmail=="only-on-failure" \
and overallPassed \
:
print(buildTestCaseName + ": Skipping sending build/test case email " +
"because everything passed and --send-build-case-email=only-on-"
"failure was set!")
elif inOptions.sendEmailTo and inOptions.sendBuildCaseEmail=="never" \
:
print(buildTestCaseName + ": Skipping sending build/test case email " +
"because everything passed and --send-build-case-email=never was " +
"set!")
elif inOptions.sendEmailTo and inOptions.sendEmailOnlyOnFailure and success:
print(buildTestCaseName + ": Skipping sending build/test case email " +
"because it passed and --send-email-only-on-failure was set!")
elif inOptions.sendEmailTo and buildTestCase.skippedConfigureDueToNoEnables \
and not inOptions.skipCaseSendEmail \
:
print("\nSkipping sending final status email for " + buildTestCase.name +
" because it had no packages enabled and --skip-case-no-email was " +
"set!")
elif inOptions.sendEmailTo:
emailAddresses = getEmailAddressesSpaceString(inOptions.sendEmailTo)
echoRunSysCmnd("mailx -s \""+subjectLine+"\" "+emailAddresses+" < "+getEmailBodyFileName())
else:
print("Not sending email because no email addresses were given!")
# 3) Return final result
return success
def getBuildTestCaseSummary(testCaseName, trimDown = True):
# Get the email file
absEmailBodyFileName = testCaseName+"/"+getEmailBodyFileName()
if os.path.exists(absEmailBodyFileName):
testCaseEmailStrArray = open(absEmailBodyFileName, 'r').readlines()
else:
testCaseEmailStrArray = None
# Get the first line (which is the summary)
testSummaryLine = None
if testCaseEmailStrArray:
summaryLine = testCaseEmailStrArray[0].strip()
if trimDown:
summaryLineArray = summaryLine.split(":")
testSummaryLine = summaryLineArray[0].strip() + ": " + summaryLineArray[2].strip()
else:
testSummaryLine = summaryLine
else:
testSummaryLine = \
"Error, The build/test was never completed!" \
" (the file '"+absEmailBodyFileName+"' does not exist.)"
return testSummaryLine
def getTestCaseEmailSummary(testCaseName, testCaseNum):
# Get the email file
absEmailBodyFileName = testCaseName+"/"+getEmailBodyFileName()
if os.path.exists(absEmailBodyFileName):
testCaseEmailStrArray = open(absEmailBodyFileName, 'r').readlines()
else:
testCaseEmailStrArray = None
# Write the entry
testCaseHeader = str(testCaseNum)+") "+testCaseName+" Results:"
summaryEmailSectionStr = \
"\n"+testCaseHeader+ \
"\n"+getStrUnderlineStr(len(testCaseHeader))+"\n" \
"\n"
if testCaseEmailStrArray:
for line in testCaseEmailStrArray:
summaryEmailSectionStr += " " + line
summaryEmailSectionStr += "\n"
else:
summaryEmailSectionStr += \
"Error, The build/test was never completed!" \
" (the file '"+absEmailBodyFileName+"' does not exist.)\n"
return summaryEmailSectionStr
def getSummaryEmailSectionStr(inOptions, buildTestCaseList):
summaryEmailSectionStr = ""
for buildTestCase in buildTestCaseList:
if buildTestCase.runBuildTestCase and not buildTestCase.skippedConfigureDueToNoEnables:
summaryEmailSectionStr += \
getTestCaseEmailSummary(buildTestCase.name, buildTestCase.buildIdx)
return summaryEmailSectionStr
def cmakeScopedDefine(projectName, name, value):
"""
Formats a CMake -D<projectName>_<name>=<value> argument.
"""
return '-D%s_%s=%s' % (projectName, name, value)
def getEnablesLists(inOptions, validPackageTypesList, isDefaultBuild,
skipCaseIfNoChangeFromDefaultEnables, tribitsGitRepos,
baseTestDir, verbose \
):
projectName = inOptions.projectName
cmakePkgOptions = []
enablePackagesList = []
gitRepoList = tribitsGitRepos.gitRepoList()
enableAllPackages = False
if inOptions.enableAllPackages == "on":
if verbose:
print("\nEnabling all packages on request since " +
"--enable-all-packages=on! ...")
print("\nSkipping detection of changed packages since " +
"--enable-all-packages=on ...")
enableAllPackages = True
elif inOptions.enablePackages:
if verbose:
print("\nEnabling only the explicitly specified packages '" +
inOptions.enablePackages + "' ...")
enablePackagesList = inOptions.enablePackages.split(',')
else:
for gitRepo in gitRepoList:
diffOutFileName = baseTestDir+"/"+getModifiedFilesOutputFileName(gitRepo.repoName)
if verbose:
print("\nDetermining the set of packages to enable by examining " +
diffOutFileName + " ...")
if os.path.exists(diffOutFileName):
changedFileDiffOutputStr = open(diffOutFileName, 'r').read()
#print("\nchangedFileDiffOutputStr:\n", changedFileDiffOutputStr)
extractPackageEnablesFromChangeStatus(changedFileDiffOutputStr, inOptions, gitRepo,
enablePackagesList, verbose)
else:
if verbose:
print("\nThe file " + diffOutFileName + " does not exist!\n")
if not enableAllPackages and inOptions.enableExtraPackages:
if verbose:
print("\nEnabling extra explicitly specified packages '" +
inOptions.enableExtraPackages + "' ...")
enablePackagesList += inOptions.enableExtraPackages.split(',')
if verbose:
print("\nFull package enable list: [" + ','.join(enablePackagesList) + "]")
if inOptions.disablePackages:
if verbose:
print("\nRemoving package enables: [" + inOptions.disablePackages + "]")
for disablePackage in inOptions.disablePackages.split(","):
packageIdx = findInSequence(enablePackagesList, disablePackage)
if packageIdx >= 0:
del enablePackagesList[packageIdx]
if verbose:
print("\nFiltering the set of enabled packages according to allowed " +
"package types ...")
origEnablePackagesList = enablePackagesList[:]
enablePackagesList = getDefaultProjectDependenices().filterPackageNameList(
enablePackagesList, validPackageTypesList, verbose)
if verbose:
print("\nFinal package enable list: [" + ','.join(enablePackagesList) + "]")
if tribitsGitRepos.numTribitsAllExtraRepos() > 0:
cmakePkgOptions.extend(
[
cmakeScopedDefine(
projectName, "PRE_REPOSITORIES:STRING",
','.join(tribitsGitRepos.tribitsPreRepoNamesList())),
cmakeScopedDefine(
projectName, "EXTRA_REPOSITORIES:STRING",
','.join(tribitsGitRepos.tribitsExtraRepoNamesList())),
cmakeScopedDefine(
projectName, "ENABLE_KNOWN_EXTERNAL_REPOS_TYPE", inOptions.extraReposType),
cmakeScopedDefine(
projectName, "EXTRAREPOS_FILE", getExtraReposFilePath(inOptions)),
]
)
for pkg in enablePackagesList:
cmakePkgOptions.append(cmakeScopedDefine(projectName, "ENABLE_"+pkg+":BOOL", "ON"))
cmakePkgOptions.append(cmakeScopedDefine(projectName, "ENABLE_ALL_OPTIONAL_PACKAGES:BOOL", "ON"))
if inOptions.enableAllPackages == 'on':
cmakePkgOptions.append(cmakeScopedDefine(projectName, "ENABLE_ALL_PACKAGES:BOOL", "ON"))
if inOptions.enableFwdPackages:
if verbose:
print("\nEnabling forward packages on request!")
cmakePkgOptions.append(cmakeScopedDefine(projectName, "ENABLE_ALL_FORWARD_DEP_PACKAGES:BOOL", "ON"))
else:
cmakePkgOptions.append(cmakeScopedDefine(projectName, "ENABLE_ALL_FORWARD_DEP_PACKAGES:BOOL", "OFF"))
if inOptions.disablePackages:
if verbose:
print("\nAdding hard disables for specified packages '" +
inOptions.disablePackages + "' ...\n")
disablePackagesList = inOptions.disablePackages.split(',')
for pkg in disablePackagesList:
cmakePkgOptions.append(cmakeScopedDefine(projectName, "ENABLE_"+pkg+":BOOL", "OFF"))
if verbose:
print("\ncmakePkgOptions: " + str(cmakePkgOptions))
return (cmakePkgOptions, enablePackagesList)
def runBuildTestCase(inOptions, tribitsGitRepos, buildTestCase, timings):
success = True
startingTime = time.time()
baseTestDir = os.getcwd()
buildTestCaseName = buildTestCase.name
if not performAnyActions(inOptions):
print("\nNo other actions to perform!\n")
return success
print("\nCreating a new build directory if it does not already exist ...")
createDir(buildTestCaseName)
absBuildDir = os.path.join(baseTestDir, buildTestCaseName)
echoChDir(absBuildDir)
try:
print("")
print("A) Get the CMake configure options (" + buildTestCaseName + ") ...")
print("")
preConfigurePassed = True
projectName = inOptions.projectName
# A.1) Set the base options
cmakeBaseOptions = []
if inOptions.extraCmakeOptions:
cmakeBaseOptions.extend(commandLineOptionsToList(inOptions.extraCmakeOptions))
cmakeBaseOptions.append(cmakeScopedDefine(projectName,
"TRIBITS_DIR:PATH", inOptions.tribitsDir))
cmakeBaseOptions.append(cmakeScopedDefine(projectName,
"ENABLE_TESTS:BOOL", "ON"))
cmakeBaseOptions.append(cmakeScopedDefine(projectName,
"TEST_CATEGORIES:STRING", inOptions.testCategories))
cmakeBaseOptions.append(cmakeScopedDefine(projectName,
"ALLOW_NO_PACKAGES:BOOL", "OFF"))
if inOptions.ctestTimeOut:
cmakeBaseOptions.append(("-DDART_TESTING_TIMEOUT:STRING="+str(inOptions.ctestTimeOut)))
cmakeBaseOptions.extend(buildTestCase.extraCMakeOptions)
result = readAndAppendCMakeOptions(
inOptions.projectName,
os.path.join("..", getCommonConfigFileName()),
cmakeBaseOptions,
True)
if not result: preConfigurePassed = False
reuslt = readAndAppendCMakeOptions(
inOptions.projectName,
os.path.join("..", getBuildSpecificConfigFileName(buildTestCaseName)),
cmakeBaseOptions,
buildTestCase.isDefaultBuild)
if not result: preConfigurePassed = False
print("\ncmakeBaseOptions: " + str(cmakeBaseOptions))
# A.2) Set the package enable options
cmakePkgOptions = []
enablePackagesList = []
if preConfigurePassed:
(cmakePkgOptions, enablePackagesList) = \
getEnablesLists(inOptions, buildTestCase.validPackageTypesList,
buildTestCase.isDefaultBuild,
buildTestCase.skipCaseIfNoChangeFromDefaultEnables, tribitsGitRepos,
baseTestDir, True)
# A.3) Set the combined options
cmakeOptions = []
if preConfigurePassed:
cmakeOptions = cmakeBaseOptions + cmakePkgOptions
print("\ncmakeOptions = " + str(cmakeOptions))
print("\nCreating base configure file do-configure.base ...")
createConfigureFile(cmakeBaseOptions, "cmake", inOptions.srcDir,
"do-configure.base")
print("\nCreating package-enabled configure file do-configure ...")
createConfigureFile(cmakePkgOptions, "./do-configure.base", None, "do-configure")
print("")
print("B) Do the configuration with CMake (" + buildTestCaseName + ") ...")
print("")
configurePassed = False
if inOptions.doConfigure and not preConfigurePassed:
print("\nSKIPPED: " + buildTestCaseName + " configure skipped because " +
"pre-configure failed (see above)!\n")
elif not (enablePackagesList or inOptions.enableAllPackages == 'on'):
print("\nSKIPPED: " + buildTestCaseName + " configure skipped because " +
"no packages are enabled!\n")
buildTestCase.skippedConfigureDueToNoEnables = True
elif inOptions.doConfigure:
removeIfExists("CMakeCache.txt")
removeDirIfExists("CMakeFiles")
cmnd = "./do-configure"
(configureRtn, timings.configure) = echoRunSysCmnd(cmnd,
outFile=getConfigureOutputFileName(),
timeCmnd=True, returnTimeCmnd=True, throwExcept=False
)
if configureRtn == 0:
print("\nConfigure passed!\n")
echoRunSysCmnd("touch "+getConfigureSuccessFileName())
configurePassed = True
else:
print("\nConfigure failed returning " + str(configureRtn) + "!\n")
raise Exception("Configure failed!")
else:
print("\nSkipping configure on request!\n")
if os.path.exists(getConfigureSuccessFileName()):
print("\nA current successful configure exists!\n")
configurePassed = True
else:
print("\nFAILED: A current successful configure does *not* exist!\n")
print("")
print("C) Do the build ("+buildTestCaseName+") ...")
print("")
buildPassed = False
if inOptions.doBuild and configurePassed:
cmnd = "make"
if inOptions.makeOptions:
cmnd += " " + inOptions.makeOptions
(buildRtn, timings.build) = echoRunSysCmnd(cmnd,
outFile=getBuildOutputFileName(),
timeCmnd=True, returnTimeCmnd=True, throwExcept=False
)
if buildRtn == 0:
print("\nBuild passed!\n")
echoRunSysCmnd("touch "+getBuildSuccessFileName())
buildPassed = True
else:
print("\nBuild failed returning " + str(buildRtn) + "!\n")
raise Exception("Build failed!")
elif inOptions.doBuild and not configurePassed:
print("\nSKIPPED: " + buildTestCaseName + " build skipped because " +
"configure did not pass!\n")
else:
print("\nSkipping the build on request!\n")
if os.path.exists(getBuildSuccessFileName()):
print("\nA current successful build exists!\n")
buildPassed = True
else:
print("\nFAILED: A current successful build does *not* exist!\n")
print("")
print("D) Run the tests (" + buildTestCaseName + ") ...")
print("")
testPassed = False
if inOptions.doTest and buildPassed:
cmnd = "ctest"
if inOptions.ctestOptions:
cmnd += " " + inOptions.ctestOptions
(testRtn, timings.test) = echoRunSysCmnd(cmnd,
outFile=getTestOutputFileName(),
timeCmnd=True, returnTimeCmnd=True, throwExcept=False
)
if testRtn == 0:
print("\nNo tests failed!\n")
echoRunSysCmnd("touch "+getTestSuccessFileName())
else:
errStr = "FAILED: ctest failed returning "+str(testRtn)+"!"
print("\n" + errStr + "\n")
raise Exception(errStr)
elif inOptions.doTest and buildTestCase.skippedConfigureDueToNoEnables:
print("\nSKIPPED: " + buildTestCaseName + " tests skipped because no " +
"packages are enabled!")
echoRunSysCmnd("touch "+getTestSuccessFileName())
# NOTE: We have to create this test success file because the presents of
# this file is used to determine in the build/test case is successful
# and therefore is okay to push. This is needed when the script is run
# a second time to determine if a build/test is successful and therefore
# allow a push.
else:
print("\nSkipping the tests on request!\n")
except Exception as e:
success = False
printStackTrace()
print("")
print("E) Analyze the overall results and send email notification (" +
buildTestCaseName + ") ...")
print("")
if performAnyActions(inOptions):
result = analyzeResultsSendEmail(inOptions, buildTestCase,
enablePackagesList, cmakeOptions, startingTime, timings)
if not result: success = False
else:
print("No actions performed, nothing to analyze!")
return success
def cleanBuildTestCaseOutputFiles(runBuildTestCaseBool, inOptions, baseTestDir, buildTestCaseName):
if runBuildTestCaseBool and not os.path.exists(buildTestCaseName):
print("\nSkipping cleaning build/test files for " + buildTestCaseName +
" because dir does not exist!\n")
elif runBuildTestCaseBool and os.path.exists(buildTestCaseName):
if inOptions.wipeClean:
print("\nRemoving the existing build directory " + buildTestCaseName +
" (--wipe-clean) ...")
removeDirIfExists(buildTestCaseName)
elif doRemoveOutputFiles(inOptions):
echoChDir(buildTestCaseName)
if inOptions.doConfigure or inOptions.doPull:
removeIfExists(getConfigureOutputFileName())
removeIfExists(getConfigureSuccessFileName())
if inOptions.doBuild or inOptions.doConfigure or inOptions.doPull:
removeIfExists(getBuildOutputFileName())
removeIfExists(getBuildSuccessFileName())
if inOptions.doTest or inOptions.doBuild or inOptions.doConfigure or inOptions.doPull:
removeIfExists(getTestOutputFileName())
removeIfExists(getTestSuccessFileName())
removeIfExists(getEmailBodyFileName())
removeIfExists(getEmailSuccessFileName())
echoChDir("..")
def runBuildTestCaseDriver(inOptions, tribitsGitRepos, baseTestDir, buildTestCase, timings):
success = True
buildTestCaseName = buildTestCase.name
print("\n***")
print("*** Doing build and test of "+buildTestCaseName+" ...")
print("***\n")
if buildTestCase.runBuildTestCase:
try:
echoChDir(baseTestDir)
writeDefaultBuildSpecificConfigFile(buildTestCaseName)
result = runBuildTestCase(inOptions, tribitsGitRepos, buildTestCase, timings)
if not result: success = False
except Exception as e:
success = False
printStackTrace()
else:
print("\nSkipping " + buildTestCaseName + " build/test on request!\n")
return success
def checkBuildTestCaseStatus(buildTestCase, inOptions):
runBuildTestCaseBool = buildTestCase.runBuildTestCase
buildTestCaseName = buildTestCase.name
skippedConfigureDueToNoEnables = buildTestCase.skippedConfigureDueToNoEnables
statusMsg = None
timeInMin = -1.0
if not runBuildTestCaseBool:
buildTestCaseActionsPass = True
buildTestCaseOkayToCommit = True
statusMsg = \
"Test case "+buildTestCaseName+" was not run! => Does not affect push readiness!"
return (buildTestCaseActionsPass, buildTestCaseOkayToCommit, statusMsg, timeInMin)
if skippedConfigureDueToNoEnables:
buildTestCaseActionsPass = True
buildTestCaseOkayToCommit = True
statusMsg = \
"Skipped configure, build, test due to no enabled packages! => Does not affect push readiness!"
return (buildTestCaseActionsPass, buildTestCaseOkayToCommit, statusMsg, timeInMin)
if not os.path.exists(buildTestCaseName) and not performAnyBuildTestActions(inOptions):
buildTestCaseActionsPass = True
buildTestCaseOkayToCommit = False
statusMsg = "No configure, build, or test for "+buildTestCaseName+" was requested!"
return (buildTestCaseActionsPass, buildTestCaseOkayToCommit, statusMsg, timeInMin)
if not os.path.exists(buildTestCaseName):
buildTestCaseActionsPass = False
buildTestCaseOkayToCommit = False
statusMsg = "The directory "+buildTestCaseName+" does not exist!"
emailSuccessFileName = buildTestCaseName+"/"+getEmailSuccessFileName()
if os.path.exists(emailSuccessFileName):
buildTestCaseActionsPass = True
else:
buildTestCaseActionsPass = False
testSuccessFileName = buildTestCaseName+"/"+getTestSuccessFileName()
if os.path.exists(testSuccessFileName):
buildTestCaseOkayToCommit = True
else:
buildTestCaseOkayToCommit = False
if not statusMsg:
statusMsg = getBuildTestCaseSummary(buildTestCaseName)
emailBodyFileName = buildTestCaseName+"/"+getEmailBodyFileName()
if os.path.exists(emailBodyFileName):
timeInMinLine = getCmndOutput("grep '"+getTotalTimeBeginStr(buildTestCaseName)+"' " + \
emailBodyFileName, True, False)
timeInMin = getTimeInMinFromTotalTimeLine(buildTestCaseName, timeInMinLine)
return (buildTestCaseActionsPass, buildTestCaseOkayToCommit, statusMsg, timeInMin)
def getUserCommitMessageStr(inOptions):
absCommitMsgHeaderFile = inOptions.commitMsgHeaderFile
if not os.path.isabs(absCommitMsgHeaderFile):
absCommitMsgHeaderFile = os.path.join(inOptions.srcDir, absCommitMsgHeaderFile)
print("\nExtracting commit message subject and header from the file '" +
absCommitMsgHeaderFile + "' ...\n")
commitMsgHeaderFileStr = open(absCommitMsgHeaderFile, 'r').read()
commitEmailBodyStr = commitMsgHeaderFileStr
return commitEmailBodyStr
def getAutomatedStatusSummaryHeaderKeyStr():
return "Build/Test Cases Summary"
def getAutomatedStatusSummaryHeaderStr():
commitEmailBodyStr = "\n" \
+getAutomatedStatusSummaryHeaderKeyStr()+"\n"
return commitEmailBodyStr
def getEnableStatusList(inOptions, enabledPackagesList):
enabledStatusStr = ""
enabledStatusStr += "Enabled Packages: " + ', '.join(enabledPackagesList) + "\n"
if inOptions.disablePackages:
enabledStatusStr += "Disabled Packages: " + inOptions.disablePackages + "\n"
if inOptions.enableAllPackages == "on":
enabledStatusStr += "Enabled all Packages\n"
elif inOptions.enableFwdPackages:
enabledStatusStr += "Enabled all Forward Packages\n"
return enabledStatusStr
# Extract the original log message from the output from:
#
# git cat-file -p HEAD
#
# This function strips off the git-generated header info and strips off the
# trailing build/test summary data.
#
# NOTE: This function assumes that there will be at least one blank line
# between the buid/test summay data block and the original text message. If
# there is not, this function will throw!
#
def getLastCommitMessageStrFromRawCommitLogStr(rawLogOutput):
origLogStrList = []
pastHeader = False
numBlankLines = 0
lastNumBlankLines = 0
foundStatusHeader = False
for line in rawLogOutput.splitlines():
#print("\nline = '" + line + "'\n")
if pastHeader:
origLogStrList.append(line)
if line == "":
numBlankLines += 1
elif numBlankLines > 0:
lastNumBlankLines = numBlankLines
numBlankLines = 0
if line == getAutomatedStatusSummaryHeaderKeyStr():
foundStatusHeader = True
break
if line == "":
pastHeader = True
if foundStatusHeader:
#print("\nlastNumBlankLines =", lastNumBlankLines)
#print("origLogStrList[-1] = '" + origLogStrList[-1] + "'")
#print("origLogStrList[-2] = '" + origLogStrList[-2] + "'")
if origLogStrList[-2] != "":
raise Exception("Error, there must be at least one blank line before the" \
" build/test summary block! This is a corrupted commit message. Please" \
" use 'git commit --amend' and manually remove the 'Build/test Cases Summary' block.")
origLogStrList = origLogStrList[0:-lastNumBlankLines]
lastCommitMessageStr = '\n'.join(origLogStrList)
else:
lastCommitMessageStr = ('\n'.join(origLogStrList))+'\n'
lastNumBlankLines = -1 # Flag we did not find status header
return (lastCommitMessageStr, lastNumBlankLines)
def getLastCommitMessageStr(inOptions, gitRepo):
# Get the raw output from the last current commit log
rawLogOutput = getCmndOutput(
inOptions.git+" cat-file -p HEAD",
workingDir=getGitRepoDir(inOptions.srcDir, gitRepo.repoDir)
)
return getLastCommitMessageStrFromRawCommitLogStr(rawLogOutput)[0]
def trimLineToLen(lineIn, numChars):
if len(lineIn) > numChars:
return lineIn[:numChars]+".."
return lineIn
def getLocalCommitsSummariesStr(inOptions, gitRepo):
# Get the list of local commits other than this one
if gitRepo.gitRepoStats.numCommitsInt() > 0:
rawLocalCommitsStr = getCmndOutput(
inOptions.git+" log --oneline "+gitRepo.gitRepoStats.branch \
+" ^"+gitRepo.gitRepoStats.trackingBranch,
True,
workingDir=getGitRepoDir(inOptions.srcDir, gitRepo.repoDir)
)
else:
rawLocalCommitsStr = ""
if gitRepo.repoName:
repoName = gitRepo.repoName
repoNameModifier = " ("+gitRepo.repoName+")"
else:
repoName = ""
repoNameModifier = ""
print("\nLocal commits for this build/test group" + repoNameModifier + ":" +
"\n----------------------------------------" )
if rawLocalCommitsStr == "\n" or rawLocalCommitsStr == "":
localCommitsExist = False
else:
localCommitsExist = True
if localCommitsExist:
print(rawLocalCommitsStr)
else:
print("No local commits exit!")
localCommitsStr = \
"*** Commits for repo "+repoName+":"
if localCommitsExist:
for localCommitLine in rawLocalCommitsStr.splitlines():
localCommitsStr += ("\n "+trimLineToLen(localCommitLine, 90))
return localCommitsStr
def getLocalCommitsSHA1ListStr(inOptions, gitRepo):
# Get the raw output from the last current commit log
rawLocalCommitsStr = getCmndOutput(
inOptions.git+" log --pretty=format:'%h' "\
+gitRepo.gitRepoStats.branch+" ^"+gitRepo.gitRepoStats.trackingBranch,
True,
workingDir=getGitRepoDir(inOptions.srcDir, gitRepo.repoDir)
)
rawLocalCommitsArray = rawLocalCommitsStr.splitlines()
if len(rawLocalCommitsArray) > 1:
return ("Other local commits for this build/test group: "
+ (", ".join(rawLocalCommitsArray[1:]))) + "\n"
return ""
# NOTE: Above, you have to use:
#
# git log --pretty='%h' <currentbranch> ^<trackingbranch>
#
# and pop off the top commit as shown above instead of:
#
# git log --pretty='%h' <currentbranch>^ ^<trackingbranch>
#
# The latter returns nothing when the top commit is a merge commit.
def getLocalCommitsExist(inOptions, gitRepo):
if gitRepo.gitRepoStats.numCommitsInt() > 0:
return True
return False
def matchProjectName(line):
"""
Attempts to match and return the value of PROJECT_NAME in a line like
SET(PROJECT_NAME <name>)
If no match can be made, None is returned.
"""
matchRegex = r'\s*[Ss][Ee][Tt]\s*\(\s*PROJECT_NAME\s+([^\)\s]*)\s*\).*'
match = re.search(matchRegex, line)
if match:
return match.group(1)
else:
return None
def getProjectName(sourceDirectory):
"""
Reads the project name from <root>/ProjectName.cmake
"""
projectNameFile = os.path.join(sourceDirectory, 'ProjectName.cmake')
if not os.path.exists(projectNameFile):
raise Exception(
"%s is required to exist for a valid Tribits project." % projectNameFile)
content = open(projectNameFile, "r")
line = content.readline()
while line:
name = matchProjectName(line)
if name:
return name
line = content.readline()
raise Exception(
'The file %s does not set the PROJECT_NAME variable. ' +
'This is required of any Tribits project.')
def getRepoStatTableDirName(inOptions, repoDir):
if repoDir == "":
repoStatTableDirName = gitdist.getBaseRepoTblName(
gitdist.getBaseDirNameFromPath(os.path.abspath(inOptions.srcDir)))
else:
repoStatTableDirName = repoDir
return repoStatTableDirName
def checkinTest(tribitsDir, inOptions, configuration={}):
"""
Main function for checkin testing.
"""
if not inOptions.projectName:
inOptions.projectName = getProjectName(inOptions.srcDir)
print("\n**********************************************")
print("*** Performing checkin testing of %s ***" % inOptions.projectName)
print("**********************************************")
setattr(inOptions, "tribitsDir", tribitsDir)
ciSupportDir = os.path.join(tribitsDir, 'ci_support')
setattr(inOptions, "ciSupportDir", ciSupportDir)
print("\nciSupportDir = " + ciSupportDir)
print("\nsrcDir = " + inOptions.srcDir)
baseTestDir = os.getcwd()
print("\nbaseTestDir = " + baseTestDir)
if inOptions.withoutDefaultBuilds:
inOptions.defaultBuilds = ''
if inOptions.doAll:
inOptions.doPull = True
inOptions.doConfigure = True
inOptions.doBuild = True
inOptions.doTest = True
if inOptions.localDoAll:
inOptions.allowNoPull = True
inOptions.doConfigure = True
inOptions.doBuild = True
inOptions.doTest = True
assertAndSetupGit(inOptions)
if inOptions.overallNumProcs:
inOptions.makeOptions = "-j"+inOptions.overallNumProcs+" "+inOptions.makeOptions
inOptions.ctestOptions = "-j"+inOptions.overallNumProcs+" "+inOptions.ctestOptions
assertExtraBuildConfigFiles(inOptions.extraBuilds)
assertExtraBuildConfigFiles(inOptions.stExtraBuilds)
if not inOptions.skipDepsUpdate:
removeIfExists(getProjectDependenciesXmlFileName(inOptions.projectName))
removeIfExists(getProjectDependenciesXmlGenerateOutputFileName(inOptions.projectName))
removeIfExists(getProjectExtraReposPythonOutFile(inOptions.projectName))
print("\n***")
print("*** 0) Read project dependencies files and build dependencies graph ...")
print("***")
tribitsGitRepos = TribitsGitRepos()
tribitsGitRepos.initFromCommandlineArguments(inOptions)
#print("\ntribitsGitRepos =", tribitsGitRepos)
createAndGetProjectDependencies(inOptions, baseTestDir, tribitsGitRepos)
# Assert the names of packages passed in
assertPackageNames("--enable-packages", inOptions.enablePackages)
assertPackageNames("--enable-extra-packages", inOptions.enableExtraPackages)
assertPackageNames("--disable-packages", inOptions.disablePackages)
success = True
timings = Timings()
subjectLine = None
# Set up build/test cases array
buildTestCaseList = []
cmakeConfig = configuration.get('cmake', {})
commonConfigOptions = cmakeConfig.get('common', [])
defaultBuilds = cmakeConfig.get('default-builds', [])
requestedDefaultBuilds = inOptions.defaultBuilds
for buildname, buildopts in defaultBuilds:
setBuildTestCaseInList(
buildTestCaseList,
buildname,
buildname in requestedDefaultBuilds,
["PT"],
True,
False,
commonConfigOptions \
+ [ cmakeScopedDefine(inOptions.projectName,
"ENABLE_SECONDARY_TESTED_CODE:BOOL", "OFF") ] \
+ buildopts \
)
if inOptions.stExtraBuilds:
for ssExtraBuild in inOptions.stExtraBuilds.split(','):
setBuildTestCaseInList(buildTestCaseList, ssExtraBuild, True,
["PT", "ST"], False, True, [])
allValidPackageTypesList = ["PT", "ST", "EX"]
if inOptions.extraBuilds:
for extraBuild in inOptions.extraBuilds.split(','):
setBuildTestCaseInList(buildTestCaseList, extraBuild, True,
allValidPackageTypesList, False, False, [])
try:
print("\n***")
print("*** 1) Clean old output files ...")
print("***")
if inOptions.doPull:
for gitRepo in tribitsGitRepos.gitRepoList():
removeIfExists(getInitialPullOutputFileName(gitRepo.repoName))
removeIfExists(getInitialExtraPullOutputFileName(gitRepo.repoName))
removeIfExists(getInitialPullSuccessFileName())
for gitRepo in tribitsGitRepos.gitRepoList():
removeIfExists(getFinalCommitBodyFileName(gitRepo.repoName))
removeIfExists(getFinalCommitOutputFileName(gitRepo.repoName))
removeIfExists(getCommitStatusEmailBodyFileName())
for gitRepo in tribitsGitRepos.gitRepoList():
removeIfExists(getModifiedFilesOutputFileName(gitRepo.repoName))
removeIfExists(getFinalPullOutputFileName(gitRepo.repoName))
removeIfExists(getPushOutputFileName(gitRepo.repoName))
if inOptions.executeOnReadyToPush:
removeIfExists(getExtraCommandOutputFileName())
for buildTestCase in buildTestCaseList:
cleanBuildTestCaseOutputFiles(
buildTestCase.runBuildTestCase, inOptions, baseTestDir, buildTestCase.name)
print("\n***")
print("*** 2) Get repo status")
print("***\n")
hasChangesToPush = getReposStats(inOptions, tribitsGitRepos)
# Determine if we will need to perform git diffs of
if inOptions.enableAllPackages == "on":
print("\n--enable-all-packages=on => git diffs w.r.t. tracking branch " +
"*will not* be needed to look for changed files!")
gitDiffsWrtTrackingBranchAreNeeded = False
elif (inOptions.enablePackages != "" and inOptions.enableAllPackages == "off"):
print("\n--enable-packages!='' and --enable-all-packages='off'" +
" => git diffs w.r.t. tracking branch *will not* be needed to " +
"look for changed files!")
gitDiffsWrtTrackingBranchAreNeeded = False
elif (inOptions.enablePackages == "" or inOptions.enableAllPackages == "auto"):
# If the user has not specified a set of packages to enable, or allows
# for logic that determines if all packages should be enabled (because
# base-level CMake files have changed), then we need to do git diffs to
# look for changed files. This is the default set of arguments.
print("\n--enable-packages='' or --enable-all-packages='auto'" +
" => git diffs w.r.t. tracking branch *will* be needed to look " +
"for changed files!")
gitDiffsWrtTrackingBranchAreNeeded = True
else:
# We should never get here, but just in case, let's do the diffs.
print("git diffs w.r.t. tracking branch may be needed to look for " +
"changed files?")
gitDiffsWrtTrackingBranchAreNeeded = True
# Determine if all repos must be on a branch and have a tracking branch
if gitDiffsWrtTrackingBranchAreNeeded:
print("\nNeed git diffs w.r.t. tracking branch so all repos must be on a" +
" branch and have a tracking branch!")
reposMustHaveTrackingBranch = True
elif inOptions.doPull:
print("\nDoing a pull so all repos must be on a branch and have a "
"tracking branch!")
reposMustHaveTrackingBranch = True
elif inOptions.doPush:
print("\nDoing a push so all repos must be on a branch and have a "
"tracking branch!")
reposMustHaveTrackingBranch = True
else:
print("\nNo need for repos to be on a branch with a tracking branch!")
reposMustHaveTrackingBranch = False
# Assert that all of the repos are on a branch with a tracking branch
if reposMustHaveTrackingBranch:
repoIdx = 0
for gitRepo in tribitsGitRepos.gitRepoList():
assertRepoHasBranchAndTrackingBranch(inOptions, gitRepo)
print("\n***")
print("*** 3) Pull updated commits for %s ..." % inOptions.projectName)
print("***")
reposAreClean = True
pullPassed = True
doingAtLeastOnePull = inOptions.doPull
pulledSomeChanges = False
pulledSomeExtraChanges = False
if not doingAtLeastOnePull:
print("\nSkipping all pulls on request!\n")
if doingAtLeastOnePull and pullPassed:
#
print("\n3.a) Check that there are no uncommited and no new unknown "
"files before doing the pull(s) ...\n")
#
repoIdx = 0
print(tribitsGitRepos.gitRepoList())
for gitRepo in tribitsGitRepos.gitRepoList():
print("\n3.a." + str(repoIdx) + ") Git Repo: '" + gitRepo.repoName +
"'")
# See if the repo is clean
if gitRepo.gitRepoStats.numModifiedInt() > 0:
repoNotCleanMsg = "\nERROR: There are changed uncommitted files => cannot continue!"
reposAreClean = False
if gitRepo.gitRepoStats.numUntrackedInt() > 0:
repoNotCleanMsg = "\nERROR: There are newly created uncommitted files => Cannot continue!"
reposAreClean = False
if not reposAreClean:
print(repoNotCleanMsg)
gitStatusOutput = getCmndOutput(inOptions.git+" status", True, throwOnError=False,
workingDir=getGitRepoDir(inOptions.srcDir, gitRepo.repoDir))
print(
"\nOutput from 'git status':\n" +
"\n--------------------------------------------------------------\n" +
gitStatusOutput +
"\n--------------------------------------------------------------\n")
print(
"\nExplanation: In order to do a meaningful test to allow a push, all files\n"
"in the local repo must be committed. Otherwise, if there are changed but not\n"
"committed files or new unknown files that are used in the build or the test, then\n"
"what you are testing is *not* what you will be pushing. If you have changes that\n"
"you don't want to push, then try using 'git stash' before you run this script to\n"
"stash away all of the changes you don't want to push. That way, what you are testing\n"
"will be consistent with what you will be pushing.\n")
pullPassed = False
#print("gitRepo =", gitRepo)
repoIdx += 1
if doingAtLeastOnePull and pullPassed:
# NOTE: We want to pull first from the global repo and then from the
# extra repo so the extra repo's revisions will get rebased on top of
# the others. This is what you would want and expect for the remote
# test/push process where multiple pulls may be needed before it works.
#
print("\n3.b) Pull updates from remote tracking branch ...")
#
if inOptions.doPull and pullPassed:
repoIdx = 0
for gitRepo in tribitsGitRepos.gitRepoList():
print("\n3.b." + str(repoIdx) + ") Git Repo: " + gitRepo.repoName)
echoChDir(baseTestDir)
(pullRtn, pullTimings, pullGotChanges) = executePull(
gitRepo,
inOptions, baseTestDir,
getInitialPullOutputFileName(gitRepo.repoName))
if pullGotChanges:
pulledSomeChanges = True
timings.pull += pullTimings
if pullRtn != 0:
print("\nPull failed!\n")
pullPassed = False
break
repoIdx += 1
else:
print("\nSkipping initial pull from remote tracking branch!\n")
#
print("\n3.c) Pull extra updates for --extra-pull-from='" +
inOptions.extraPullFrom + "' ...")
#
timings.pull = 0
if inOptions.extraPullFrom and pullPassed:
repoExtraRemotePullsList = \
parseExtraPullFromArgs(tribitsGitRepos.gitRepoList(), inOptions.extraPullFrom)
repoIdx = 0
for repoExtraRemotePulls in repoExtraRemotePullsList:
gitRepo = repoExtraRemotePulls.gitRepo
remoteRepoAndBranchList = repoExtraRemotePulls.remoteRepoAndBranchList
if not remoteRepoAndBranchList:
continue
print("\n3.c." + str(repoIdx) + ") Git Repo: " + gitRepo.repoName)
echoChDir(baseTestDir)
for remoteRepoAndBranch in remoteRepoAndBranchList:
(pullRtn, pullTimings, pullGotChanges) = executePull(
gitRepo,
inOptions, baseTestDir,
getInitialExtraPullOutputFileName(gitRepo.repoName),
remoteRepoAndBranch )
if pullGotChanges:
pulledSomeChanges = True
pulledSomeExtraChanges = True
timings.pull += pullTimings
if pullRtn != 0:
print("\nPull failed!\n")
pullPassed = False
break
if pullRtn != 0:
break
repoIdx += 1
else:
print("\nSkipping extra pull from '" + inOptions.extraPullFrom + "'!\n")
# Given overall status of the pulls and determine if to abort gracefully
if pulledSomeChanges:
print("\nThere where at least some changes pulled!")
else:
print("\nNo changes were pulled!")
# Determine if extra changes were pulled and if to get repo status again
if pulledSomeExtraChanges:
print("\nExtra pull pulled new commits so need to get repo status "
"again ...\n")
if getReposStats(inOptions, tribitsGitRepos):
hasChangesToPush = True
#
print("\nDetermine overall pull pass/fail ...\n")
#
echoChDir(baseTestDir)
# Check for prior successful initial pull
currentSuccessfullPullExists = os.path.exists(getInitialPullSuccessFileName())
if inOptions.doPull:
if pullPassed:
print("\nPull passed!\n")
echoRunSysCmnd("touch "+getInitialPullSuccessFileName())
else:
print("\nPull failed!\n")
elif currentSuccessfullPullExists:
print("\nA previous pull was performed and was successful!")
pullPassed = True
elif inOptions.allowNoPull:
print("\nNot performing pull since --allow-no-pull was passed in\n")
pullPassed = True
else:
print("\nNo previous successful pull is still current!")
pullPassed = False
# Update for current successful pull
currentSuccessfullPullExists = os.path.exists(getInitialPullSuccessFileName())
print("\n***")
print("*** 4) Get the list of all the modified files ...")
print("***")
if pullPassed:
if gitDiffsWrtTrackingBranchAreNeeded:
for gitRepo in tribitsGitRepos.gitRepoList():
getCurrentDiffOutputAndLogModified(inOptions, gitRepo, baseTestDir)
else:
print("\nSkipping getting list of modified files because not "
"needed!\n")
else:
print("\nSkipping getting list of modified files because pull failed!\n")
print("\n***")
print("*** 5) Running the different build/test cases ...")
print("***")
# Determine if we will run the build/test cases or not
# Set runBuildCases flag and other logic
abortGracefullyDueToNoUpdates = False
abortGracefullyDueToNoChangesToPush = False
if not performAnyBuildTestActions(inOptions):
print("\nNot performing any build cases because no --configure, " +
"--build or --test was specified!\n")
runBuildCases = False
elif doingAtLeastOnePull:
if reposAreClean and not pulledSomeChanges and \
inOptions.abortGracefullyIfNoChangesPulled \
:
print("\nNot performing any build cases because pull did not bring "
"any *new* commits and --abort-gracefully-if-no-changes-pulled "
"was set!\n")
abortGracefullyDueToNoUpdates = True
runBuildCases = False
elif reposAreClean and not hasChangesToPush and \
inOptions.abortGracefullyIfNoChangesToPush \
:
print("\nNot perfoming any build cases because there are no local "
"changes to push and --abort-gracefully-if-no-changes-to-push!\n")
abortGracefullyDueToNoChangesToPush = True
runBuildCases = False
elif pullPassed:
print("\nThe pull passsed, running the build/test cases ...\n")
runBuildCases = True
else:
print("\nNot running any build/test cases because the pull failed!\n")
runBuildCases = False
else:
if inOptions.allowNoPull:
print("\nNo pull was attempted but we are running the build/test cases "
"anyway because --allow-no-pull was specified ...\n")
runBuildCases = True
elif os.path.exists(getInitialPullSuccessFileName()):
print("\nA previous pull was successful, running build/test cases "
"...!\n")
runBuildCases = True
else:
print("\nNot running any build/test cases because no pull was "
"attempted!\n\nHint: Use --allow-no-pull to allow build/test "
"cases to run without having to do a pull first!")
runBuildCases = False
# Run the build/test cases
buildTestCasesPassed = True
if runBuildCases:
echoChDir(baseTestDir)
writeDefaultCommonConfigFile()
print("\nSetting up to run the build/test cases:")
for i in range(len(buildTestCaseList)):
buildTestCase = buildTestCaseList[i]
print(str(i) + ") " + buildTestCase.name + ": ", end="")
if buildTestCase.runBuildTestCase:
print("Will attempt to run!")
else:
print("Will *not* attempt to run on request!")
for buildTestCase in buildTestCaseList:
buildTestCase.timings = timings.deepCopy()
result = runBuildTestCaseDriver(
inOptions,
tribitsGitRepos,
baseTestDir,
buildTestCase,
buildTestCase.timings
)
if not result:
buildTestCasesPassed = False
success = False
print("\n***")
print("*** 6) Determine overall success and push readiness ...")
print("***")
okayToCommit = False
okayToPush = False
forcedCommitPush = False
abortedCommitPush = False
atLeastOneConfigureBuildAttemptPassed = False
if inOptions.doPushReadinessCheck:
echoChDir(baseTestDir)
okayToCommit = success
subjectLine = None
commitEmailBodyExtra = ""
shortCommitEmailBodyExtra = ""
(cmakePkgOptions, enabledPackagesList) = \
getEnablesLists(inOptions, allValidPackageTypesList, False, False,
tribitsGitRepos, baseTestDir, False)
enableStatsListStr = getEnableStatusList(inOptions, enabledPackagesList)
commitEmailBodyExtra += enableStatsListStr
shortCommitEmailBodyExtra += enableStatsListStr
commitEmailBodyExtra += \
"\nBuild test results:" \
"\n-------------------\n"
for i in range(len(buildTestCaseList)):
buildTestCase = buildTestCaseList[i]
buildTestCaseName = buildTestCase.name
(buildTestCaseActionsPass, buildTestCaseOkayToCommit, statusMsg, timeInMin) = \
checkBuildTestCaseStatus(buildTestCase, inOptions)
buildTestCaseStatusStr = str(i)+") "+buildTestCaseName+" => "+statusMsg
if not buildTestCaseOkayToCommit:
buildTestCaseStatusStr += " => Not ready to push!"
buildTestCaseStatusStr += " ("+formatMinutesStr(timeInMin)+")\n"
print(buildTestCaseStatusStr)
commitEmailBodyExtra += buildTestCaseStatusStr
shortCommitEmailBodyExtra += buildTestCaseStatusStr
#print("buildTestCaseActionsPass =", buildTestCaseActionsPass)
if not buildTestCaseActionsPass:
success = False
if not buildTestCaseOkayToCommit:
okayToCommit = False
#print("buildTestCaseOkayToCommit =", buildTestCaseOkayToCommit)
if buildTestCase.runBuildTestCase and buildTestCaseOkayToCommit \
and not buildTestCase.skippedConfigureDueToNoEnables \
:
#print("Setting atLeastOneConfigureBuildAttemptPassed=True")
atLeastOneConfigureBuildAttemptPassed = True
if not atLeastOneConfigureBuildAttemptPassed:
print("\nThere were no successfuly attempts to configure/build/test!")
okayToCommit = False
if not okayToCommit:
print("\nAt least one of the actions (pull, configure, built, test)"
" failed or was not performed correctly!\n")
# Determine if we should do a forced push
if inOptions.doPushReadinessCheck and not okayToCommit and inOptions.forcePush \
:
forcedPushMsg = \
"\n***" \
"\n*** WARNING: The acceptance criteria for doing a push has *not*" \
"\n*** been met, but a push is being forced anyway by --force-push!" \
"\n***\n"
print(forcedPushMsg)
okayToCommit = True
forcedCommitPush = True
# Determine if a push is ready to try or not
if okayToCommit:
if currentSuccessfullPullExists:
print("\nA current successful pull also exists => Ready for final "
"push!\n")
okayToPush = True
else:
commitEmailBodyExtra += \
"\nA current successful pull does *not* exist => Not ready for final push!\n" \
"\nExplanation: In order to safely push, the local working directory needs\n" \
"to be up-to-date with the global repo or a full integration has not been\n" \
"performed!\n"
print(commitEmailBodyExtra)
okayToPush = False
abortedCommitPush = True
else:
okayToPush = False
if okayToPush:
print("\n => A PUSH IS READY TO BE PERFORMED!")
else:
print("\n => A PUSH IS *NOT* READY TO BE PERFORMED!")
else:
print("\nSkipping push readiness check on request!")
okayToCommit = False
okayToPush = False
print("\n***")
print("*** 7) Do final push ...")
print("***")
# Attempt the final pull, commit amend, and push
pullFinalPassed = True
amendFinalCommitPassed = True
pushPassed = True
didPush = False
if not inOptions.doPush:
print("\nNot doing the push but sending an email"
" about the commit/push readiness status ...")
if okayToPush:
subjectLine = "READY TO PUSH"
else:
subjectLine = "NOT READY TO PUSH"
elif not okayToPush:
print("\nNot performing push due to prior errors\n")
pushPassed = False
else: # inOptions.doPush and okayToPush:
#
print("\n7.a) Performing a final pull to make sure there are no "
"conflicts for push ...\n")
#
if not okayToPush:
print("\nSkippng final pull due to prior errors!\n")
pullFinalPassed = False
else:
print("\nExplanation: In order to push, the local repo needs to be "
"up-to-date\nwith the global repo or the push will not be "
"allowed. Therefore, a pull\nbefore the push must be performed "
"if there are updates in the global reop\nregardless if --pull "
"was specified or not. Also, a rebase might be done in\norder "
"to get a linear history required by the hooks in the main "
"repository.\n")
doFinalRebase = inOptions.rebase
if not doFinalRebase:
print("Skipping the final rebase on request! (see --no-rebase "
"option)")
pullFinalPassed = True
repoIdx = 0
for gitRepo in tribitsGitRepos.gitRepoList():
print("\n7.a." + str(repoIdx) + ") Git Repo: '" + gitRepo.repoName +
"'")
(pull2Rtn, pull2Time, pullGotChanges) = \
executePull(gitRepo, inOptions, baseTestDir,
getFinalPullOutputFileName(gitRepo.repoName), None,
doFinalRebase )
if pull2Rtn != 0:
pullFinalPassed = False
break
repoIdx += 1
if pullFinalPassed:
print("\nFinal pull passed!\n")
else:
print("\nFinal pull failed!\n")
if not pullFinalPassed: okayToPush = False
#
print("\n7.b) Amending the final commit message by appending test "
"results ...\n")
#
if not inOptions.appendTestResults:
print("\nSkipping appending test results on request (--no-append-test-"
"results)!\n")
elif not okayToPush:
print("\nSkippng appending test results due to prior errors!\n")
amendFinalCommitPassed = False
else: # inOptions.appendTestResults and okayToPush
print("\nAttempting to amend the final commmit message ...\n")
repoIdx = 0
for gitRepo in tribitsGitRepos.gitRepoList():
print("\n7.b." + str(repoIdx) + ") Git Repo: '" + gitRepo.repoName +
"'")
try:
if gitRepo.gitRepoStats.numCommitsInt() > 0:
# Get info about current commit and local commits
lastCommitMessageStr = getLastCommitMessageStr(inOptions, gitRepo)
localCommitSHA1ListStr = getLocalCommitsSHA1ListStr(inOptions, gitRepo)
# Get then final commit message
finalCommitEmailBodyStr = lastCommitMessageStr
finalCommitEmailBodyStr += getAutomatedStatusSummaryHeaderStr()
finalCommitEmailBodyStr += shortCommitEmailBodyExtra.encode("utf8")
finalCommitEmailBodyStr += localCommitSHA1ListStr
if forcedCommitPush:
finalCommitEmailBodyStr += "WARNING: Forced the push!\n"
finalCommitEmailBodyFileName = getFinalCommitBodyFileName(gitRepo.repoName)
writeStrToFile(finalCommitEmailBodyFileName, finalCommitEmailBodyStr)
# Amend the final commit message
commitAmendRtn = echoRunSysCmnd(
inOptions.git+" commit --amend" \
" -F "+os.path.join(baseTestDir, finalCommitEmailBodyFileName),
workingDir=getGitRepoDir(inOptions.srcDir, gitRepo.repoDir),
outFile=os.path.join(baseTestDir, getFinalCommitOutputFileName(gitRepo.repoName)),
timeCmnd=True, throwExcept=False
)
if commitAmendRtn != 0:
amendFinalCommitPassed = False
break
else:
print("\nSkipping amending last commit because there are no "
"local commits!\n")
except Exception as e:
success = False
amendFinalCommitPassed = False
printStackTrace()
repoIdx += 1
# end for
if amendFinalCommitPassed:
print("\nAppending test results to last commit passed!\n")
else:
print("\nAppending test results to last commit failed!\n")
if not amendFinalCommitPassed: okayToPush = False
# End final pull and amend commit message block
# Jump out if the above if block and get the list of local commits. You
# have to get this list after a final rebase and after the top commit is
# amended so that you get the right SHA1s. But you have to do this
# *before* the push or there will not be any local commits!
allLocalCommitSummariesStr = ""
if inOptions.doPushReadinessCheck:
repoIdx = 0
for gitRepo in tribitsGitRepos.gitRepoList():
localCommitSummariesStr = \
getLocalCommitsSummariesStr(inOptions, gitRepo)
if allLocalCommitSummariesStr:
allLocalCommitSummariesStr += ("\n" + localCommitSummariesStr)
else:
allLocalCommitSummariesStr = localCommitSummariesStr
repoIdx += 1
# Jump back into the push block and do the actual push
if inOptions.doPush:
#
print("\n7.c) Pushing the the local commits to the global repo ...\n")
#
if not okayToPush:
print("\nNot performing push due to prior errors!\n")
pushPassed = False
else:
print("\nAttempting to do the push ...")
debugSkipPush = os.environ.get("CHECKIN_TEST_SKIP_PUSH","")
#print("debugSkipPush =", debugSkipPush)
#debugSkipPush = True
didAtLeastOnePush = False
repoIdx = 0
for gitRepo in tribitsGitRepos.gitRepoList():
print("\n7.c." + str(repoIdx) + ") Git Repo: '" + gitRepo.repoName +
"'")
if gitRepo.gitRepoStats.numCommitsInt() > 0:
if not debugSkipPush:
pushRtn = echoRunSysCmnd(
inOptions.git+" push "+pushToTrackingBranchArgs(gitRepo),
workingDir=getGitRepoDir(inOptions.srcDir, gitRepo.repoDir),
outFile=os.path.join(baseTestDir, getPushOutputFileName(gitRepo.repoName)),
throwExcept=False, timeCmnd=True )
didAtLeastOnePush = True
else:
print("\nSkipping push due to debug override ...")
pushRtn = 0
if pushRtn != 0:
pushPassed = False
break
else:
print("\nSkipping push to '" + gitRepo.repoName + "' because " +
"there are no commits!")
repoIdx += 1
# end for
if pushPassed:
if didAtLeastOnePush:
print("\nPush passed!\n")
didPush = True
else:
print("\nPush failed because the push was never attempted!")
else:
print("\nPush failed!\n")
if not pushPassed: okayToPush = False
# End push block
print("\n***")
print("*** 8) Set up to run execute extra command on ready to push ...")
print("***")
if inOptions.executeOnReadyToPush and not okayToPush:
print("\nNot executing final command (" + inOptions.executeOnReadyToPush +
") since a push is not okay to be performed!\n")
elif inOptions.executeOnReadyToPush and okayToPush:
executeCmndStr = "\nExecuting final command ("+inOptions.executeOnReadyToPush+") since" \
+" a push is okay to be performed!\n"
commitEmailBodyExtra += executeCmndStr
print(executeCmndStr)
else:
print("\nNot executing final command since none was given ...\n")
print("\n***")
print("*** 9) Create and send push (or readiness status) notification email ...")
print("***\n")
allConfiguresAbortedDueToNoEnablesGracefullAbort = False
if inOptions.doPushReadinessCheck:
#
print("\n9.a) Getting final status to send out in the summary email ...\n")
#
grepCheckinTestOutForFailed_msg = \
"\n\nTo find out more about this failure, grep the 'checkin-test.out' log" \
" file for 'failed'. In some cases, the failure will be obvious. In other" \
" cases, a system command failed and the details about the failure will be in" \
" the output file for the command that failed.\n\n"
# Determine if all configures were aborted because no package enables
allConfiguresAbortedDueToNoEnablesGracefullAbort = True
for buildTestCase in buildTestCaseList:
if not buildTestCase.skippedConfigureDueToNoEnables:
allConfiguresAbortedDueToNoEnablesGracefullAbort = False
if not pullPassed:
subjectLine = "INITIAL PULL FAILED"
commitEmailBodyExtra += "\n\nFailed because initial pull failed!" \
+grepCheckinTestOutForFailed_msg
success = False
elif abortGracefullyDueToNoUpdates:
subjectLine = "ABORTED DUE TO NO UPDATES"
commitEmailBodyExtra += "\n\nAborted because no updates and --abort-gracefully-if-no-changes-pulled was set!\n\n"
success = True
elif abortGracefullyDueToNoChangesToPush:
subjectLine = "ABORTED DUE TO NO CHANGES TO PUSH"
commitEmailBodyExtra += "\n\nAborted because no changes to push and --abort-gracefully-if-no-changes-to-push was set!\n\n"
success = True
elif allConfiguresAbortedDueToNoEnablesGracefullAbort:
subjectLine = "ABORTED DUE TO NO ENABLES"
commitEmailBodyExtra += "\n\nAborted because no enables and --abort-gracefully-if-no-enables was set!\n\n"
success = True
elif not pullFinalPassed:
subjectLine = "FINAL PULL FAILED"
commitEmailBodyExtra += "\n\nFailed because the final pull failed!" \
+grepCheckinTestOutForFailed_msg
success = False
elif not amendFinalCommitPassed:
subjectLine = "AMEND COMMIT FAILED"
commitEmailBodyExtra += "\n\nFailed because the final test commit amend failed!" \
+grepCheckinTestOutForFailed_msg
success = False
elif inOptions.doPush and pushPassed and forcedCommitPush:
subjectLine = "DID FORCED PUSH"
commitEmailBodyExtra += forcedPushMsg
success = True
commitEmailBodyExtra += forcedPushMsg
elif not buildTestCasesPassed:
subjectLine = "FAILED CONFIGURE/BUILD/TEST"
commitEmailBodyExtra += "\n\nFailed because one of the build/test cases failed!\n"
success = False
elif inOptions.doPush:
if didPush and not forcedCommitPush:
subjectLine = "DID PUSH"
elif abortedCommitPush:
subjectLine = "ABORTED COMMIT/PUSH"
commitEmailBodyExtra += "\n\nCommit/push was never attempted since commit/push" \
" criteria failed!\n\n"
success = False
else:
subjectLine = "PUSH FAILED"
commitEmailBodyExtra += "\n\nFailed because push failed!" \
+grepCheckinTestOutForFailed_msg
success = False
else:
if okayToPush:
subjectLine = "READY TO PUSH"
else:
subjectLine = "NOT READY TO PUSH"
#
print("\n9.b) Create and send out push (or readiness status) notification email ...")
#
subjectLine += ": %s: %s" % (inOptions.projectName, getHostname())
emailBodyStr = subjectLine + "\n\n"
emailBodyStr += getCmndOutput("date", True) + "\n\n"
emailBodyStr += commitEmailBodyExtra + "\n"
emailBodyStr += allLocalCommitSummariesStr + "\n"
emailBodyStr += getSummaryEmailSectionStr(inOptions, buildTestCaseList)
print("\nCommit status email being sent:\n"
"--------------------------------\n\n\n\n" + emailBodyStr +
"\n\n\n\n")
summaryCommitEmailBodyFileName = getCommitStatusEmailBodyFileName()
writeStrToFile(summaryCommitEmailBodyFileName, emailBodyStr)
if inOptions.sendEmailTo and abortGracefullyDueToNoUpdates:
print("\nSkipping sending final email because there were no updates"
" and --abort-gracefully-if-no-changes-pulled was set!")
elif inOptions.sendEmailTo and abortGracefullyDueToNoChangesToPush:
print("\nSkipping sending final email because there are no local "
"changes to push and --abort-gracefully-if-no-changes-to-push "
"was set!")
elif inOptions.sendEmailTo and allConfiguresAbortedDueToNoEnablesGracefullAbort:
print("\nSkipping sending final email because there were no enables"
" and --abort-gracefully-if-no-enables was set!")
elif inOptions.sendEmailTo and inOptions.sendEmailOnlyOnFailure and success:
print("\nSkipping sending final email because it passed"
" and --send-email-only-on-failure was set!")
elif inOptions.sendEmailTo:
emailAddresses = getEmailAddressesSpaceString(inOptions.sendEmailTo)
if inOptions.sendEmailToOnPush and didPush:
emailAddresses += " " + getEmailAddressesSpaceString(inOptions.sendEmailToOnPush)
echoRunSysCmnd("mailx -s \""+subjectLine+"\" " \
+emailAddresses+" < "+summaryCommitEmailBodyFileName)
else:
print("\nNot sending push readiness status email because --send-email-"
"to is empty!")
else:
print("\nNot performing push or sending out push readiness status on "
"request!")
print("\n***")
print("*** 10) Run execute extra command on ready to push ...")
print("***")
if inOptions.executeOnReadyToPush and okayToPush:
print(executeCmndStr)
extraCommandRtn = echoRunSysCmnd(
inOptions.executeOnReadyToPush,
workingDir=baseTestDir,
outFile=os.path.join(baseTestDir, getExtraCommandOutputFileName()),
throwExcept=False, timeCmnd=True )
if extraCommandRtn == 0:
print("\nExtra command passed!\n")
else:
print("\nExtra command failed!\n")
success = False
else:
print("\nNot executing final command ...\n")
if not performAnyActions(inOptions) and not inOptions.doPush:
print("\n***\n"
"*** WARNING: No actions were performed!\n"
"***\n"
"*** Hint: Specify --do-all to perform full integration pull/build/test\n"
"*** or --push to push the commits for a previously run test!\n"
"***\n\n")
except Exception as e:
success = False
printStackTrace()
g_sysCmndInterceptor.assertAllCommandsRun()
# Print the final status at the very end
if subjectLine:
print("\n\n" + subjectLine + "\n\n")
return success
|
axonchisel/ax_metrics | refs/heads/master | py/axonchisel/metrics/io/emfetch/plugins/__init__.py | 1 | """
Ax_Metrics - Default EMFetcher plugin class aggregation
------------------------------------------------------------------------------
Author: Dan Kamins <dos at axonchisel dot net>
Copyright (c) 2014 Dan Kamins, AxonChisel.net
"""
# ----------------------------------------------------------------------------
#
# Default Plugins
#
from .emf_random import EMFetcher_random
from .emf_http import EMFetcher_http
|
smart-developerr/my-first-blog | refs/heads/master | Lib/site-packages/django/contrib/sessions/apps.py | 591 | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SessionsConfig(AppConfig):
name = 'django.contrib.sessions'
verbose_name = _("Sessions")
|
evanma92/routeh | refs/heads/master | flask/lib/python2.7/site-packages/sqlalchemy/testing/exclusions.py | 21 | # testing/exclusions.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import operator
from .plugin.plugin_base import SkipTest
from ..util import decorator
from . import config
from .. import util
import inspect
import contextlib
def skip_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.skips.add(pred)
return rule
def fails_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.fails.add(pred)
return rule
class compound(object):
def __init__(self):
self.fails = set()
self.skips = set()
def __add__(self, other):
return self.add(other)
def add(self, *others):
copy = compound()
copy.fails.update(self.fails)
copy.skips.update(self.skips)
for other in others:
copy.fails.update(other.fails)
copy.skips.update(other.skips)
return copy
def not_(self):
copy = compound()
copy.fails.update(NotPredicate(fail) for fail in self.fails)
copy.skips.update(NotPredicate(skip) for skip in self.skips)
return copy
@property
def enabled(self):
return self.enabled_for_config(config._current)
def enabled_for_config(self, config):
for predicate in self.skips.union(self.fails):
if predicate(config):
return False
else:
return True
def matching_config_reasons(self, config):
return [
predicate._as_string(config) for predicate
in self.skips.union(self.fails)
if predicate(config)
]
def __call__(self, fn):
if hasattr(fn, '_sa_exclusion_extend'):
fn._sa_exclusion_extend(self)
return fn
def extend(other):
self.skips.update(other.skips)
self.fails.update(other.fails)
@decorator
def decorate(fn, *args, **kw):
return self._do(config._current, fn, *args, **kw)
decorated = decorate(fn)
decorated._sa_exclusion_extend = extend
return decorated
@contextlib.contextmanager
def fail_if(self):
all_fails = compound()
all_fails.fails.update(self.skips.union(self.fails))
try:
yield
except Exception as ex:
all_fails._expect_failure(config._current, ex)
else:
all_fails._expect_success(config._current)
def _do(self, config, fn, *args, **kw):
for skip in self.skips:
if skip(config):
msg = "'%s' : %s" % (
fn.__name__,
skip._as_string(config)
)
raise SkipTest(msg)
try:
return_value = fn(*args, **kw)
except Exception as ex:
self._expect_failure(config, ex, name=fn.__name__)
else:
self._expect_success(config, name=fn.__name__)
return return_value
def _expect_failure(self, config, ex, name='block'):
for fail in self.fails:
if fail(config):
print(("%s failed as expected (%s): %s " % (
name, fail._as_string(config), str(ex))))
break
else:
util.raise_from_cause(ex)
def _expect_success(self, config, name='block'):
if not self.fails:
return
for fail in self.fails:
if not fail(config):
break
else:
raise AssertionError(
"Unexpected success for '%s' (%s)" %
(
name,
" and ".join(
fail._as_string(config)
for fail in self.fails
)
)
)
def only_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return skip_if(NotPredicate(predicate), reason)
def succeeds_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return fails_if(NotPredicate(predicate), reason)
class Predicate(object):
@classmethod
def as_predicate(cls, predicate, description=None):
if isinstance(predicate, compound):
return cls.as_predicate(predicate.enabled_for_config, description)
elif isinstance(predicate, Predicate):
if description and predicate.description is None:
predicate.description = description
return predicate
elif isinstance(predicate, (list, set)):
return OrPredicate(
[cls.as_predicate(pred) for pred in predicate],
description)
elif isinstance(predicate, tuple):
return SpecPredicate(*predicate)
elif isinstance(predicate, util.string_types):
tokens = predicate.split(" ", 2)
op = spec = None
db = tokens.pop(0)
if tokens:
op = tokens.pop(0)
if tokens:
spec = tuple(int(d) for d in tokens.pop(0).split("."))
return SpecPredicate(db, op, spec, description=description)
elif util.callable(predicate):
return LambdaPredicate(predicate, description)
else:
assert False, "unknown predicate type: %s" % predicate
def _format_description(self, config, negate=False):
bool_ = self(config)
if negate:
bool_ = not negate
return self.description % {
"driver": config.db.url.get_driver_name(),
"database": config.db.url.get_backend_name(),
"doesnt_support": "doesn't support" if bool_ else "does support",
"does_support": "does support" if bool_ else "doesn't support"
}
def _as_string(self, config=None, negate=False):
raise NotImplementedError()
class BooleanPredicate(Predicate):
def __init__(self, value, description=None):
self.value = value
self.description = description or "boolean %s" % value
def __call__(self, config):
return self.value
def _as_string(self, config, negate=False):
return self._format_description(config, negate=negate)
class SpecPredicate(Predicate):
def __init__(self, db, op=None, spec=None, description=None):
self.db = db
self.op = op
self.spec = spec
self.description = description
_ops = {
'<': operator.lt,
'>': operator.gt,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge,
'in': operator.contains,
'between': lambda val, pair: val >= pair[0] and val <= pair[1],
}
def __call__(self, config):
engine = config.db
if "+" in self.db:
dialect, driver = self.db.split('+')
else:
dialect, driver = self.db, None
if dialect and engine.name != dialect:
return False
if driver is not None and engine.driver != driver:
return False
if self.op is not None:
assert driver is None, "DBAPI version specs not supported yet"
version = _server_version(engine)
oper = hasattr(self.op, '__call__') and self.op \
or self._ops[self.op]
return oper(version, self.spec)
else:
return True
def _as_string(self, config, negate=False):
if self.description is not None:
return self._format_description(config)
elif self.op is None:
if negate:
return "not %s" % self.db
else:
return "%s" % self.db
else:
if negate:
return "not %s %s %s" % (
self.db,
self.op,
self.spec
)
else:
return "%s %s %s" % (
self.db,
self.op,
self.spec
)
class LambdaPredicate(Predicate):
def __init__(self, lambda_, description=None, args=None, kw=None):
spec = inspect.getargspec(lambda_)
if not spec[0]:
self.lambda_ = lambda db: lambda_()
else:
self.lambda_ = lambda_
self.args = args or ()
self.kw = kw or {}
if description:
self.description = description
elif lambda_.__doc__:
self.description = lambda_.__doc__
else:
self.description = "custom function"
def __call__(self, config):
return self.lambda_(config)
def _as_string(self, config, negate=False):
return self._format_description(config)
class NotPredicate(Predicate):
def __init__(self, predicate, description=None):
self.predicate = predicate
self.description = description
def __call__(self, config):
return not self.predicate(config)
def _as_string(self, config, negate=False):
if self.description:
return self._format_description(config, not negate)
else:
return self.predicate._as_string(config, not negate)
class OrPredicate(Predicate):
def __init__(self, predicates, description=None):
self.predicates = predicates
self.description = description
def __call__(self, config):
for pred in self.predicates:
if pred(config):
return True
return False
def _eval_str(self, config, negate=False):
if negate:
conjunction = " and "
else:
conjunction = " or "
return conjunction.join(p._as_string(config, negate=negate)
for p in self.predicates)
def _negation_str(self, config):
if self.description is not None:
return "Not " + self._format_description(config)
else:
return self._eval_str(config, negate=True)
def _as_string(self, config, negate=False):
if negate:
return self._negation_str(config)
else:
if self.description is not None:
return self._format_description(config)
else:
return self._eval_str(config)
_as_predicate = Predicate.as_predicate
def _is_excluded(db, op, spec):
return SpecPredicate(db, op, spec)(config._current)
def _server_version(engine):
"""Return a server_version_info tuple."""
# force metadata to be retrieved
conn = engine.connect()
version = getattr(engine.dialect, 'server_version_info', ())
conn.close()
return version
def db_spec(*dbs):
return OrPredicate(
[Predicate.as_predicate(db) for db in dbs]
)
def open():
return skip_if(BooleanPredicate(False, "mark as execute"))
def closed():
return skip_if(BooleanPredicate(True, "marked as skip"))
def fails():
return fails_if(BooleanPredicate(True, "expected to fail"))
@decorator
def future(fn, *arg):
return fails_if(LambdaPredicate(fn), "Future feature")
def fails_on(db, reason=None):
return fails_if(SpecPredicate(db), reason)
def fails_on_everything_except(*dbs):
return succeeds_if(
OrPredicate([
SpecPredicate(db) for db in dbs
])
)
def skip(db, reason=None):
return skip_if(SpecPredicate(db), reason)
def only_on(dbs, reason=None):
return only_if(
OrPredicate([SpecPredicate(db) for db in util.to_list(dbs)])
)
def exclude(db, op, spec, reason=None):
return skip_if(SpecPredicate(db, op, spec), reason)
def against(config, *queries):
assert queries, "no queries sent!"
return OrPredicate([
Predicate.as_predicate(query)
for query in queries
])(config)
|
harukaeru/Brython-Django | refs/heads/master | static/brython/www/tests/from_import_test/a.py | 31 | x = 1 |
winndows/cinder | refs/heads/master | cinder/volume/drivers/emc/emc_cli_fc.py | 5 | # Copyright (c) 2012 - 2015 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fibre Channel Driver for EMC VNX array based on CLI."""
from oslo_log import log as logging
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vnx_cli
from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
class EMCCLIFCDriver(driver.FibreChannelDriver):
"""EMC FC Driver for VNX using CLI.
Version history:
1.0.0 - Initial driver
2.0.0 - Thick/thin provisioning, robust enhancement
3.0.0 - Array-based Backend Support, FC Basic Support,
Target Port Selection for MPIO,
Initiator Auto Registration,
Storage Group Auto Deletion,
Multiple Authentication Type Support,
Storage-Assisted Volume Migration,
SP Toggle for HA
3.0.1 - Security File Support
4.0.0 - Advance LUN Features (Compression Support,
Deduplication Support, FAST VP Support,
FAST Cache Support), Storage-assisted Retype,
External Volume Management, Read-only Volume,
FC Auto Zoning
4.1.0 - Consistency group support
5.0.0 - Performance enhancement, LUN Number Threshold Support,
Initiator Auto Deregistration,
Force Deleting LUN in Storage Groups,
robust enhancement
5.1.0 - iSCSI multipath enhancement
5.2.0 - Pool-aware scheduler support
5.3.0 - Consistency group modification support
6.0.0 - Over subscription support
Create consistency group from cgsnapshot support
Multiple pools support enhancement
Manage/unmanage volume revise
White list target ports support
Snap copy support
Support efficient non-disruptive backup
"""
def __init__(self, *args, **kwargs):
super(EMCCLIFCDriver, self).__init__(*args, **kwargs)
self.cli = emc_vnx_cli.getEMCVnxCli(
'FC',
configuration=self.configuration)
self.VERSION = self.cli.VERSION
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a volume."""
return self.cli.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
return self.cli.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
return self.cli.create_cloned_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
self.cli.extend_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a volume."""
self.cli.delete_volume(volume)
def migrate_volume(self, ctxt, volume, host):
"""Migrate volume via EMC migration functionality."""
return self.cli.migrate_volume(ctxt, volume, host)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
return self.cli.retype(ctxt, volume, new_type, diff, host)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.cli.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.cli.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
@zm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
The initiator_target_map is a map that represents the remote wwn(s)
and a list of wwns which are visible to the remote wwn(s).
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
'access_mode': 'rw'
'initiator_target_map': {
'1122334455667788': ['1234567890123']
}
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
'access_mode': 'rw'
'initiator_target_map': {
'1122334455667788': ['1234567890123',
'0987654321321']
}
}
}
"""
conn_info = self.cli.initialize_connection(volume,
connector)
LOG.debug("Exit initialize_connection"
" - Returning FC connection info: %(conn_info)s.",
{'conn_info': conn_info})
return conn_info
@zm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
conn_info = self.cli.terminate_connection(volume, connector)
LOG.debug("Exit terminate_connection"
" - Returning FC connection info: %(conn_info)s.",
{'conn_info': conn_info})
return conn_info
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
data = self.cli.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'EMCCLIFCDriver'
data['storage_protocol'] = 'FC'
self._stats = data
def manage_existing(self, volume, existing_ref):
"""Manage an existing lun in the array.
The lun should be in a manageable pool backend, otherwise
error would return.
Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
manage_existing_ref:{
'source-id':<lun id in VNX>
}
or
manage_existing_ref:{
'source-name':<lun name in VNX>
}
"""
self.cli.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing."""
return self.cli.manage_existing_get_size(volume, existing_ref)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
return self.cli.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group):
"""Deletes a consistency group."""
return self.cli.delete_consistencygroup(
self, context, group)
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates a cgsnapshot."""
return self.cli.create_cgsnapshot(
self, context, cgsnapshot)
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a cgsnapshot."""
return self.cli.delete_cgsnapshot(self, context, cgsnapshot)
def get_pool(self, volume):
"""Returns the pool name of a volume."""
return self.cli.get_pool(volume)
def update_consistencygroup(self, context, group,
add_volumes,
remove_volumes):
"""Updates LUNs in consistency group."""
return self.cli.update_consistencygroup(context, group,
add_volumes,
remove_volumes)
def unmanage(self, volume):
"""Unmanages a volume."""
return self.cli.unmanage(volume)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
"""Creates a consistency group from source."""
return self.cli.create_consistencygroup_from_src(context,
group,
volumes,
cgsnapshot,
snapshots)
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status=None):
"""Returns model update for migrated volume."""
return self.cli.update_migrated_volume(context, volume, new_volume,
original_volume_status)
def create_export_snapshot(self, context, snapshot, connector):
"""Creates a snapshot mount point for snapshot."""
return self.cli.create_export_snapshot(context, snapshot, connector)
def remove_export_snapshot(self, context, snapshot):
"""Removes snapshot mount point for snapshot."""
return self.cli.remove_export_snapshot(context, snapshot)
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
"""Allows connection to snapshot."""
return self.cli.initialize_connection_snapshot(snapshot,
connector,
**kwargs)
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
"""Disallows connection to snapshot."""
return self.cli.terminate_connection_snapshot(snapshot,
connector,
**kwargs)
def backup_use_temp_snapshot(self):
return True
|
vileopratama/vitech | refs/heads/master | src/addons/website_crm/__openerp__.py | 35 | {
'name': 'Contact Form',
'category': 'Website',
'website': 'https://www.odoo.com/page/website-builder',
'summary': 'Create Leads From Contact Form',
'version': '2.0',
'description': """
OpenERP Contact Form
====================
""",
'depends': ['website_form','website_partner', 'crm'],
'data': [
'data/website_crm_data.xml',
'views/website_crm.xml',
],
'installable': True,
'auto_install': True,
}
|
ivbeg/docx2csv | refs/heads/master | docs/conf.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# docx2csv documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import docx2csv
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'docx2csv'
copyright = u'2018, Ivan Begtin'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = docx2csv.__version__
# The full version, including alpha/beta/rc tags.
release = docx2csv.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'docx2csvdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'docx2csv.tex',
u'docx2csv documentation',
u'Ivan Begtin', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'docx2csv',
u'docx2csv documentation',
[u'Ivan Begtin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'docx2csv',
u'docx2csv documentation',
u'Ivan Begtin',
'docx2csv',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# sphinx.ext.intersphinx confs
intersphinx_mapping = {'python': ('https://docs.python.org/2', None)}
|
linjoahow/2015cd_midterm | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/unittest/suite.py | 748 | """TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda: None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
def __init__(self, tests=()):
self._tests = []
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __ne__(self, other):
return not self == other
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = 0
for test in self:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not callable(test):
raise TypeError("{} is not callable".format(repr(test)))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, str):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for test in self:
if result.shouldStop:
break
test(result)
return result
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for test in self:
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if not debug:
test(result)
else:
test.debug()
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
################################
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
# Inspired by the ErrorHolder from Twisted:
# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
# attribute used by TestResult._exc_info_to_string
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return "<ErrorHolder description=%r>" % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
# could call result.addError(...) - but this test-like object
# shouldn't be run anyway
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
|
LWJensen/coala-bears | refs/heads/master | tests/python/PyCommentedCodeBearTest.py | 2 | from queue import Queue
from bears.python.PyCommentedCodeBear import PyCommentedCodeBear
from tests.LocalBearTestHelper import LocalBearTestHelper
from coalib.settings.Section import Section
class PyCommentedCodeBearTest(LocalBearTestHelper):
def setUp(self):
self.uut = PyCommentedCodeBear(Section('name'), Queue())
def test_valid(self):
self.check_validity(self.uut, ['import sys'])
self.check_validity(self.uut, ['a = 1 + 1'])
self.check_validity(self.uut, ['# hey man!'])
self.check_validity(self.uut, ['"""',
'Hey, this is a code sample:',
'>>> import os',
'',
'And when you use it you can simply '
'do: `import os`.',
'"""'])
def test_invalid(self):
self.check_validity(self.uut, ['# import os'], valid=False)
self.check_validity(self.uut, ["# print('comment')"], valid=False)
|
crmccreary/openerp_server | refs/heads/master | openerp/addons/l10n_tr/__openerp__.py | 2 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{ 'name': 'Turkey - Accounting',
'version': '1.beta',
'category': 'Localization/Account Charts',
'description': """
Türkiye için Tek düzen hesap planı şablonu OpenERP Modülü.
==============================================================================
Bu modül kurulduktan sonra, Muhasebe yapılandırma sihirbazı çalışır
* Sihirbaz sizden hesap planı şablonu, planın kurulacağı şirket,banka hesap bilgileriniz,ilgili para birimi gibi bilgiler isteyecek.
""",
'author': 'Ahmet Altınışık',
'maintainer':'https://launchpad.net/~openerp-turkey',
'website':'https://launchpad.net/openerp-turkey',
'depends': [
'account',
'base_vat',
'account_chart',
],
'init_xml': [],
'update_xml': [
'account_code_template.xml',
'account_tdhp_turkey.xml',
'account_tax_code_template.xml',
'account_chart_template.xml',
'account_tax_template.xml',
'l10n_tr_wizard.xml',
],
'demo_xml': [],
'installable': True,
'images': ['images/chart_l10n_tr_1.jpg','images/chart_l10n_tr_2.jpg','images/chart_l10n_tr_3.jpg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sdcooke/django | refs/heads/master | tests/sites_framework/models.py | 281 | from django.contrib.sites.managers import CurrentSiteManager
from django.contrib.sites.models import Site
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class AbstractArticle(models.Model):
title = models.CharField(max_length=50)
objects = models.Manager()
on_site = CurrentSiteManager()
class Meta:
abstract = True
def __str__(self):
return self.title
class SyndicatedArticle(AbstractArticle):
sites = models.ManyToManyField(Site)
class ExclusiveArticle(AbstractArticle):
site = models.ForeignKey(Site, models.CASCADE)
class CustomArticle(AbstractArticle):
places_this_article_should_appear = models.ForeignKey(Site, models.CASCADE)
objects = models.Manager()
on_site = CurrentSiteManager("places_this_article_should_appear")
|
praveen049/pandas | refs/heads/master | example1/fpanda.py | 1 | #!/usr/bin/python
import pandas as pd
s = pd.Series([7, 'Heisenberg', 3.14, -1789710578, 'Happy Eating!'], index = ['A','C','F','G','Z'])
print s
d = {'Chicago': 1000, 'New York': 1300, 'Portland': 900, 'San Francisco': 1100,
'Austin': 450, 'Boston': None}
cities = pd.Series(d)
print cities[2]
print cities.index.values |
zjuwangg/scrapy | refs/heads/master | tests/test_utils_serialize.py | 135 | import json
import unittest
import datetime
from decimal import Decimal
from twisted.internet import defer
from scrapy.utils.serialize import ScrapyJSONEncoder
from scrapy.http import Request, Response
class JsonEncoderTestCase(unittest.TestCase):
def setUp(self):
self.encoder = ScrapyJSONEncoder()
def test_encode_decode(self):
dt = datetime.datetime(2010, 1, 2, 10, 11, 12)
dts = "2010-01-02 10:11:12"
d = datetime.date(2010, 1, 2)
ds = "2010-01-02"
t = datetime.time(10, 11, 12)
ts = "10:11:12"
dec = Decimal("1000.12")
decs = "1000.12"
for input, output in [('foo', 'foo'), (d, ds), (t, ts), (dt, dts),
(dec, decs), (['foo', d], ['foo', ds])]:
self.assertEqual(self.encoder.encode(input), json.dumps(output))
def test_encode_deferred(self):
self.assertIn('Deferred', self.encoder.encode(defer.Deferred()))
def test_encode_request(self):
r = Request("http://www.example.com/lala")
rs = self.encoder.encode(r)
self.assertIn(r.method, rs)
self.assertIn(r.url, rs)
def test_encode_response(self):
r = Response("http://www.example.com/lala")
rs = self.encoder.encode(r)
self.assertIn(r.url, rs)
self.assertIn(str(r.status), rs)
|
dmnfarrell/smallrnaseq | refs/heads/master | smallrnaseq/mirdeep2.py | 2 | #!/usr/bin/env python
"""
Module for miRDeep2 wrappers and utilities
Created July 2014
Copyright (C) Damien Farrell
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os, string, types, re
import shutil, glob, collections
import itertools
import subprocess
#import pylab as plt
import numpy as np
import pandas as pd
#from . import base, utils
mirdeep2options = {'base': [('input',''),('adapter','TGGAATTCTCGGGTGCCAAGG'),('filetype','fastq'),
('bowtieindex',''),('refgenome',''),('species','hsa'),
('mature',''), ('hairpin',''), ('other',''),('mirbase',os.getcwd()),
('randfold',1), ('overwrite',1)]}
mirdeepcols = ['#miRNA','read_count','mean_norm','miRDeep2 score','chr','seed','precursor',
'freq','precursor coordinate','mirbase seed match','star read count','rfam alert',
'consensus mature sequence','consensus star sequence',
'consensus precursor sequence']
def create_mirbase_files(species,path):
"""Generate species specific mature/hairpin files for input to mirdeep"""
mature = os.path.join(path, 'mature.fa')
hairpin = os.path.join(path, 'hairpin.fa')
names=[]
for f in [mature,hairpin]:
fname = os.path.splitext(f)[0]+'_'+species+'.fa'
utils.get_subset_fasta(f, labels=[species], outfile=fname)
names.append(fname)
print ('wrote mirbase files for species %s' %species)
return names
def create_sample_map(path, ext='fastq'):
"""Create filename mapping to run all samples at once.
This is required."""
os.chdir(path)
files = sorted(glob.glob('*.'+ext))
print (files)
fname = 'combined.txt'
i=1
rows=[]
for f in files:
rows.append((os.path.basename(f), 's%02d'%i))
i+=1
res=pd.DataFrame(rows)
res.to_csv(fname, index=False,sep=' ',header=False)
return (fname)
def combine_labels(labels, filename):
"""Combine sample labels with mirdeep combined.txt file so we can match sample ids"""
comb = pd.read_csv(filename,sep=' ',names=['filename','id'])
comb['name'] = comb.filename.apply (lambda x: x.split('.')[0])
labels = labels.merge(comb,on='name')
labels['id'] = labels.id+'(norm)'
return labels
def run_multiple(**kwargs):
"""Prepare and run mirdeep2"""
if kwargs['filetype'] == 'fasta':
ext='fa'
else:
ext='fastq'
path = kwargs['input']
#create filename/id mapping
samplemap = create_sample_map(path, ext)
#get mirbase subset for species if provided
if kwargs['species'] != '':
mature, hairpin = create_mirbase_files(kwargs['species'], kwargs['mirbase'])
kwargs['mature'] = mature
kwargs['hairpin'] = hairpin
if kwargs['other'] != '':
kwargs['other'], h = create_mirbase_files(kwargs['other'], kwargs['mirbase'])
run(samplemap, **kwargs)
return
def run(infile, refgenome, bowtieindex, mature='', hairpin='', other='',
randfold=False, overwrite=True, filetype='fastq', adapter=None,
clean=True, outpath=None, **kwargs):
"""Run all mirdeep2 steps including adapter trimming.
Uses a config file even if we only have one sample."""
label = os.path.splitext(os.path.basename(infile))[0]
print ('running %s' %label)
os.environ["BOWTIE_INDEXES"] = os.path.dirname(bowtieindex)
collapsed = 'collapsedreads.fa'
if filetype=='fasta': mapparams='-c'
else: mapparams='-e -h'
if adapter =='': adapter = 'none'
if randfold == False: params='-c'
else: params = ''
if other=='': other = 'none'
#if mapping has been done already we can skip it
if not os.path.exists('mapped.arf') or overwrite == True:
try:
os.remove('mapped.arf')
os.remove(collapsed)
except:
pass
cmd1 = ('mapper.pl %s -d %s -j -l 18 -m -k %s -s %s'
' -p %s -t mapped.arf -v' %(infile,mapparams,adapter,collapsed,bowtieindex))
print (cmd1)
result = subprocess.check_output(cmd1, shell=True, executable='/bin/bash')
else:
print ('arf file found, skipping mapper step')
#mirdeep core
cmd2 = ('miRDeep2.pl %s %s mapped.arf'
' %s %s %s -z _%s %s'
' -d > report.log' %(collapsed,refgenome,mature,other,hairpin,label,params))
print (cmd2)
result = subprocess.check_output(cmd2, shell=True, executable='/bin/bash')
#remove junk
if clean == True:
tempfiles = glob.glob('dir_*')
for f in tempfiles:
shutil.rmtree(f)
#move results to dest folder
if outpath != None:
pass #move files..
return
def quantifier(path, mature, precursor, star=None, collapsed='collapsedreads.fa', time='novel'):
"""Run quantifier module using custom known mature/precursors"""
current = os.getcwd()
os.chdir(path)
cmd = 'quantifier.pl -p %s -m %s -r %s -y %s -k -d -g 1 -U' %(precursor,mature,collapsed,time)
print (cmd)
result = subprocess.check_output(cmd, shell=True, executable='/bin/bash')
os.chdir(current)
return
def get_pdf_path(path):
"""get path to pdfs"""
ppath = glob.glob(os.path.join(path,'pdf*'))[0]
return ppath
def get_chromosome(x):
val = x.split('_')[0]
try:
return '%02d' %int(val)
except:
return val
def get_coords(x):
"""Get start/end from precursor coords string"""
l = re.split("[\:\..]+",x)
return pd.Series(l[:4],index=['chr','start','end','strand'])
def get_score_stats(path):
"""Get mirdeep results from the summary file"""
resfile = glob.glob(os.path.join(path,'result*.csv'))[0]
if os.path.splitext(resfile)[1] != '.csv':
return
df = pd.read_csv(resfile, sep='\t',header=0,nrows=22)
df = df.dropna()
df['known'] = df['known miRNAs detected by miRDeep2'].apply(lambda r: int(r.split()[0]))
df['FDR'] = df['novel miRNAs, estimated false positives'].apply(lambda r: int(r.split()[0]))
return df
def plot_score_stats(df):
f,axs=plt.subplots(2,2,figsize=(10,6))
grid=axs.flat
df.plot('miRDeep2 score','known',marker='o',ax=grid[0],legend=False)
grid[0].set_title('known miRNAs')
df.plot('miRDeep2 score','novel miRNAs reported by miRDeep2',marker='o',ax=grid[1],legend=False)
grid[1].set_title('novel miRNAs')
df.plot('miRDeep2 score','estimated signal-to-noise',marker='o',ax=grid[2],legend=False)
grid[2].set_title('signal-to-noise')
df.plot('miRDeep2 score','FDR',marker='o',ax=grid[3],legend=False)
grid[3].set_title('FDR')
plt.tight_layout()
f.savefig('mirdeep_score_stats.png')
return
def read_results_file(infile):
"""Get mirdeep results from the summary file"""
if os.path.splitext(infile)[1] != '.csv':
return
scol = 'miRDeep2 score'
df = pd.read_csv(infile, sep='\t',header=23)
df = df.dropna()
#remove junk
idx = df[df['provisional id']=='tag id'].index[0]
df = df.drop(idx)
df['novel'] = np.where(df['miRBase miRNA']=='-',True,False)
colstorename = {'example miRBase miRNA with the same seed':'mirbase seed match',
'significant randfold p-value': 'randfold'}
df = df.rename(columns=colstorename)
#df = df.convert_objects(convert_numeric=True)
df = df.infer_objects()
#df['chr'] = df['provisional id'].apply(get_chromosome)
df['seed'] = df['consensus mature sequence'].apply(lambda x: x[1:8])
coords = df['precursor coordinate'].apply(get_coords)
df = df.join(coords) #pd.concat([df,coords])
return df
def get_results(path):
"""Process known and novel results from mirdeep run.
Combines with expression data and removes redundant entries"""
resfile = glob.glob(os.path.join(path,'result*.csv'))[0]
df = read_results_file(resfile)
#use quantifier module to get novel expression results from predicted precursors
#if not done already
novelmature = os.path.join(path,'novel_mature.fa')
novelprecursor = os.path.join(path,'novel_precursor.fa')
res = os.path.join(path, 'expression_novel.html')
reads = 'collapsedreads.fa'
if not os.path.exists(res):
novel = df[df.novel==True]
mkey = 'consensus mature sequence'
pkey = 'consensus precursor sequence'
utils.dataframe_to_fasta(novel, mkey, 'provisional id', outfile=novelmature)
utils.dataframe_to_fasta(novel, pkey, 'provisional id', outfile=novelprecursor)
quantifier(path, os.path.abspath(novelmature), os.path.abspath(novelprecursor))
#get expression results and merge with prediction results to get other info
files = glob.glob(os.path.join(path,'miRNAs_expressed_all_samples*.csv'))
res=[]
for f in files:
if 'novel' in f:
key='provisional id'
else:
key='miRBase miRNA'
q = pd.read_csv(f,sep='\t')
samples = float(len(q.filter(regex="norm").columns))
#print 'samples: %s' %samples
q['freq'] = q.filter(regex="norm").apply(lambda r: len(r.nonzero()[0])/samples,1)
#apply 5p id so we can merge with results file and keep star seqs
q['id'] = q['#miRNA'].apply(lambda x: x[:-2]+'5p' if str(x).endswith('3p') else x)
#loses information on multiple precursors for a mature seq
q = q.merge(df,left_on=['id'],right_on=key).drop_duplicates('#miRNA')
res.append(q)
res = pd.concat(res)
res = res.apply( lambda x: pd.to_numeric(x, errors='ignore' ))
#get mean normalised count
res['mean_norm'] = res.filter(regex="norm").apply(lambda r: r[r.nonzero()[0]].mean(),1)
res = res.sort_values(by=['read_count'], ascending=False)
res = res.drop_duplicates('#miRNA')
res = res.reset_index(drop=True)
#res['std'] = res.filter(regex="norm").std(1)
#res['cv'] = res['std']/res['mean_norm']
return res
def get_column_names(df):
"""Extract column names for multiple samples"""
cols = [i for i in df.columns if (i.startswith('s') and len(i)<=3)]
normcols = [i+'(norm)' for i in cols]
return cols, normcols
def filter_expr_results(df, cols=None, score=0, freq=0.5, mean_norm=0, total_reads=0):
"""Additional filters for abundances/no. samples"""
if cols is None:
cols = [i for i in df.columns if (i.startswith('s') and len(i)<=3)]
normcols = [i+'(norm)' for i in cols]
df = df[(df['miRDeep2 score']>=score)]
df = df[df.freq>=freq]
df = df[df['read_count']>=total_reads]
df = df[df['mean_norm']>=mean_norm]
df = df[df['randfold'].isin(['yes','-'])]
#n = n[n['rfam alert']=='-']
#df = df.reset_index(drop=True)
return df
def analyse_results(path, outpath=None, **kwargs):
"""General analysis of mirdeep results"""
if outpath != None:
if not os.path.exists(outpath):
os.mkdir(outpath)
os.chdir(outpath)
df = get_results(path)
idcols,normcols = get_column_names(df)
known = df[df.novel==False]
novel = df[df.novel==True]
idmap = get_file_ids(path)
#cutoffs and freqs need to be configurable..
k = filter_expr_results(known,score=0,freq=.5,total_reads=50)
n = filter_expr_results(novel,score=4,freq=.8,total_reads=50)
cols = mirdeepcols
core = pd.concat([k,n])
utils.dataframe_to_fasta(core, 'consensus mature sequence', '#miRNA', 'mirdeep_core.fa')
k[cols].to_csv('known_mirdeep.csv')
n[cols].to_csv('novel_mirdeep.csv')
utils.create_html(n[cols],'novel_mirdeep')
k['perc'] = k['read_count']/k['read_count'].sum()
print (k[cols[:8]])
print
print (n[cols[:9]])
print ('mirdeep summary')
print ('-------------------------------')
print (('%s (%s novel) identified' %(len(df),len(novel))))
print ('quantifier results after filtering:')
print ('%s/%s known' %(len(k),len(known)))
print ('%s/%s novel' %(len(n),len(novel)))
print ('top 10 known account for %2.2f' %k['perc'][:10].sum())
print ('%s are 3p strand' %len(k[k['#miRNA'].str.contains('3p')]))
#print df[(df.mean_norm>300) & (df.freq<0.5)][cols[:7]]
print
k=k.set_index('#miRNA')
n=n.set_index('#miRNA')
fig, ax = plt.subplots(figsize=(8,6))
k['read_count'][:10].plot(kind='barh',colormap='Spectral',ax=ax,log=True)
plt.title('miRDeep2 top 10')
plt.tight_layout()
fig.savefig('mirdeep_top_known.png',dpi=150)
fig, ax = plt.subplots(figsize=(8,8))
df.plot('freq','mean_norm',kind='scatter',ax=ax,logy=True,alpha=0.8)
fig.savefig('mirdeep_freqsvcounts.png')
fig=plt.figure()
fig = plot_read_count_dists(n,h=5)
fig.savefig('mirdeep_novel_counts.png')
fig = plot_read_count_dists(k)
fig.savefig('mirdeep_known_counts.png')
#perSampleDists(k)
fig,ax = plt.subplots(figsize=(10,6))
core[idcols].sum().plot(kind='bar',ax=ax)
plt.title('total miRNA counts per sample (unnormalised)')
plt.tight_layout()
fig.savefig('mirdeep_total_persample.png')
#found per chromosome
fig = plt.figure(figsize=(8,6))
x = core.sort('chr').groupby('chr').size()
x.plot(kind='bar')
plt.title('miRNA per chromosome')
fig.savefig('mirdeep_chromosome_dist.png')
ss = get_score_stats(path)
plot_score_stats(ss)
#plt.show()
#plt.close()
return df,k,n
def plot_read_count_dists(df,h=8):
"""Boxplots of read count distributions per miRNA - use seaborn?"""
w=int(h*(len(df)/60.0))+4
fig, ax = plt.subplots(figsize=(w,h))
cols,normcols = get_column_names(df)
df = df[normcols]
t=df.T
t.index = cols
try:
import sns
sns.boxplot(t,linewidth=1.0,saturation=0.2,palette='coolwarm_r')
sns.despine(trim=True)
except:
t.plot(kind='box',color='black',grid=False,whis=1.0,ax=ax)
ax.set_yscale('log')
plt.setp(ax.xaxis.get_majorticklabels(), rotation=90)
plt.ylabel('read count')
plt.tight_layout()
return fig
def get_file_ids(path):
"""Get file<->mirdeep2 id mapping"""
idmap = pd.read_csv(os.path.join(path, 'combined.txt'),sep=' ',
header=None,names=['filename','id'])
return idmap
def get_label_map(path, labels):
"""Get results labels mapped to labels with the filenames"""
condmap = pd.read_csv(labels)
idmap = get_file_ids(path)
def matchname(x):
r = idmap[idmap['filename'].str.contains(x)]
if len(r)>0:
return r.id.squeeze()
else:
return np.nan
condmap['id'] = condmap.filename.apply(lambda x: matchname(x),1)
condmap = condmap.dropna()
return condmap
def test_quantifier(path):
resfile = glob.glob(os.path.join(path,'result*.csv'))[0]
df = read_results_file(resfile)
novelmature = os.path.join(path, 'novel_mature.fa')
novelstar = os.path.join(path, 'novel_star.fa')
novelprecursor = os.path.join(path, 'novel_precursor.fa')
reads = '../results_mirdeep_combined/collapsedreads.fa'
#base.dataframe2Fasta(df[df.novel==True], 'consensus star sequence', 'provisional id',
# outfile=novelstar)
quantifier(path, os.path.abspath(novelmature), os.path.abspath(novelprecursor),
os.path.abspath(novelstar), reads)
return
def check_quantifier_results(path):
"""Check quantifier vs results file in case of miscounts"""
resfile = glob.glob(os.path.join(path,'result*.csv'))[0]
df = read_results_file(resfile)
files = glob.glob(os.path.join(path,'miRNAs_expressed_all_samples*.csv'))
q = pd.read_csv(files[0],sep='\t')
key='provisional id'
m=q.merge(df,left_on='#miRNA',right_on=key).drop_duplicates('#miRNA')
m.sc = m['miRDeep2 score']
m['err'] = abs(m['read_count']-m['total read count'])
cols=['#miRNA','total read count','read_count','miRDeep2 score']
print (m[m.err>400].sort('total read count',ascending=False)[cols])
m['size'] = np.select([m.sc < 2, m.sc < 3, m.sc < 4], [20,40,50], 80)
f,ax=plt.subplots(1,1)
plt.xscale('log')
plt.yscale('log')
m.plot(x='total read count',y='read_count', kind='scatter',s=60,alpha=0.6,ax=ax)
#ax.plot([0, 1], [0, 1], transform=ax.transAxes,color='red',alpha=0.7)
plt.show()
return
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-r", "--run", dest="run", action='store_true',
help="run predictions")
parser.add_option("-i", "--input", dest="input",
help="input path or file")
parser.add_option("-c", "--config", dest="config",
help="config file")
parser.add_option("-a", "--analyse", dest="analyse",
help="analyse results of mirdeep2")
opts, remainder = parser.parse_args()
pd.set_option('display.width', 800)
if opts.run == True:
#all other options are stored in config file
if opts.config == None:
print ('No config file provided.')
base.write_default_config('mirdeep2.conf', defaults=mirdeep2options)
return
cp = base.parse_config(opts.config)
if opts.input != None:
conf.input = os.path.abspath(opts.input)
options = base.get_options(cp)
run_multiple(**options)
elif opts.analyse != None:
analyse_results(opts.analyse)
elif opts.test == True:
test(opts.input)
if __name__ == '__main__':
main()
|
onecloud/ovs-igmp-v3 | refs/heads/master | python/ovs/json.py | 52 | # Copyright (c) 2010, 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import StringIO
import sys
__pychecker__ = 'no-stringiter'
escapes = {ord('"'): u"\\\"",
ord("\\"): u"\\\\",
ord("\b"): u"\\b",
ord("\f"): u"\\f",
ord("\n"): u"\\n",
ord("\r"): u"\\r",
ord("\t"): u"\\t"}
for esc in range(32):
if esc not in escapes:
escapes[esc] = u"\\u%04x" % esc
SPACES_PER_LEVEL = 2
class _Serializer(object):
def __init__(self, stream, pretty, sort_keys):
self.stream = stream
self.pretty = pretty
self.sort_keys = sort_keys
self.depth = 0
def __serialize_string(self, s):
self.stream.write(u'"%s"' % ''.join(escapes.get(ord(c), c) for c in s))
def __indent_line(self):
if self.pretty:
self.stream.write('\n')
self.stream.write(' ' * (SPACES_PER_LEVEL * self.depth))
def serialize(self, obj):
if obj is None:
self.stream.write(u"null")
elif obj is False:
self.stream.write(u"false")
elif obj is True:
self.stream.write(u"true")
elif type(obj) in (int, long):
self.stream.write(u"%d" % obj)
elif type(obj) == float:
self.stream.write("%.15g" % obj)
elif type(obj) == unicode:
self.__serialize_string(obj)
elif type(obj) == str:
self.__serialize_string(unicode(obj))
elif type(obj) == dict:
self.stream.write(u"{")
self.depth += 1
self.__indent_line()
if self.sort_keys:
items = sorted(obj.items())
else:
items = obj.iteritems()
for i, (key, value) in enumerate(items):
if i > 0:
self.stream.write(u",")
self.__indent_line()
self.__serialize_string(unicode(key))
self.stream.write(u":")
if self.pretty:
self.stream.write(u' ')
self.serialize(value)
self.stream.write(u"}")
self.depth -= 1
elif type(obj) in (list, tuple):
self.stream.write(u"[")
self.depth += 1
if obj:
self.__indent_line()
for i, value in enumerate(obj):
if i > 0:
self.stream.write(u",")
self.__indent_line()
self.serialize(value)
self.depth -= 1
self.stream.write(u"]")
else:
raise Exception("can't serialize %s as JSON" % obj)
def to_stream(obj, stream, pretty=False, sort_keys=True):
_Serializer(stream, pretty, sort_keys).serialize(obj)
def to_file(obj, name, pretty=False, sort_keys=True):
stream = open(name, "w")
try:
to_stream(obj, stream, pretty, sort_keys)
finally:
stream.close()
def to_string(obj, pretty=False, sort_keys=True):
output = StringIO.StringIO()
to_stream(obj, output, pretty, sort_keys)
s = output.getvalue()
output.close()
return s
def from_stream(stream):
p = Parser(check_trailer=True)
while True:
buf = stream.read(4096)
if buf == "" or p.feed(buf) != len(buf):
break
return p.finish()
def from_file(name):
stream = open(name, "r")
try:
return from_stream(stream)
finally:
stream.close()
def from_string(s):
try:
s = unicode(s, 'utf-8')
except UnicodeDecodeError, e:
seq = ' '.join(["0x%2x" % ord(c)
for c in e.object[e.start:e.end] if ord(c) >= 0x80])
return ("not a valid UTF-8 string: invalid UTF-8 sequence %s" % seq)
p = Parser(check_trailer=True)
p.feed(s)
return p.finish()
class Parser(object):
## Maximum height of parsing stack. ##
MAX_HEIGHT = 1000
def __init__(self, check_trailer=False):
self.check_trailer = check_trailer
# Lexical analysis.
self.lex_state = Parser.__lex_start
self.buffer = ""
self.line_number = 0
self.column_number = 0
self.byte_number = 0
# Parsing.
self.parse_state = Parser.__parse_start
self.stack = []
self.member_name = None
# Parse status.
self.done = False
self.error = None
def __lex_start_space(self, c):
pass
def __lex_start_alpha(self, c):
self.buffer = c
self.lex_state = Parser.__lex_keyword
def __lex_start_token(self, c):
self.__parser_input(c)
def __lex_start_number(self, c):
self.buffer = c
self.lex_state = Parser.__lex_number
def __lex_start_string(self, _):
self.lex_state = Parser.__lex_string
def __lex_start_error(self, c):
if ord(c) >= 32 and ord(c) < 128:
self.__error("invalid character '%s'" % c)
else:
self.__error("invalid character U+%04x" % ord(c))
__lex_start_actions = {}
for c in " \t\n\r":
__lex_start_actions[c] = __lex_start_space
for c in "abcdefghijklmnopqrstuvwxyz":
__lex_start_actions[c] = __lex_start_alpha
for c in "[{]}:,":
__lex_start_actions[c] = __lex_start_token
for c in "-0123456789":
__lex_start_actions[c] = __lex_start_number
__lex_start_actions['"'] = __lex_start_string
def __lex_start(self, c):
Parser.__lex_start_actions.get(
c, Parser.__lex_start_error)(self, c)
return True
__lex_alpha = {}
for c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
__lex_alpha[c] = True
def __lex_finish_keyword(self):
if self.buffer == "false":
self.__parser_input(False)
elif self.buffer == "true":
self.__parser_input(True)
elif self.buffer == "null":
self.__parser_input(None)
else:
self.__error("invalid keyword '%s'" % self.buffer)
def __lex_keyword(self, c):
if c in Parser.__lex_alpha:
self.buffer += c
return True
else:
self.__lex_finish_keyword()
return False
__number_re = re.compile("(-)?(0|[1-9][0-9]*)"
"(?:\.([0-9]+))?(?:[eE]([-+]?[0-9]+))?$")
def __lex_finish_number(self):
s = self.buffer
m = Parser.__number_re.match(s)
if m:
sign, integer, fraction, exp = m.groups()
if (exp is not None and
(long(exp) > sys.maxint or long(exp) < -sys.maxint - 1)):
self.__error("exponent outside valid range")
return
if fraction is not None and len(fraction.lstrip('0')) == 0:
fraction = None
sig_string = integer
if fraction is not None:
sig_string += fraction
significand = int(sig_string)
pow10 = 0
if fraction is not None:
pow10 -= len(fraction)
if exp is not None:
pow10 += long(exp)
if significand == 0:
self.__parser_input(0)
return
elif significand <= 2 ** 63:
while pow10 > 0 and significand <= 2 ** 63:
significand *= 10
pow10 -= 1
while pow10 < 0 and significand % 10 == 0:
significand /= 10
pow10 += 1
if (pow10 == 0 and
((not sign and significand < 2 ** 63) or
(sign and significand <= 2 ** 63))):
if sign:
self.__parser_input(-significand)
else:
self.__parser_input(significand)
return
value = float(s)
if value == float("inf") or value == float("-inf"):
self.__error("number outside valid range")
return
if value == 0:
# Suppress negative zero.
value = 0
self.__parser_input(value)
elif re.match("-?0[0-9]", s):
self.__error("leading zeros not allowed")
elif re.match("-([^0-9]|$)", s):
self.__error("'-' must be followed by digit")
elif re.match("-?(0|[1-9][0-9]*)\.([^0-9]|$)", s):
self.__error("decimal point must be followed by digit")
elif re.search("e[-+]?([^0-9]|$)", s):
self.__error("exponent must contain at least one digit")
else:
self.__error("syntax error in number")
def __lex_number(self, c):
if c in ".0123456789eE-+":
self.buffer += c
return True
else:
self.__lex_finish_number()
return False
__4hex_re = re.compile("[0-9a-fA-F]{4}")
def __lex_4hex(self, s):
if len(s) < 4:
self.__error("quoted string ends within \\u escape")
elif not Parser.__4hex_re.match(s):
self.__error("malformed \\u escape")
elif s == "0000":
self.__error("null bytes not supported in quoted strings")
else:
return int(s, 16)
@staticmethod
def __is_leading_surrogate(c):
"""Returns true if 'c' is a Unicode code point for a leading
surrogate."""
return c >= 0xd800 and c <= 0xdbff
@staticmethod
def __is_trailing_surrogate(c):
"""Returns true if 'c' is a Unicode code point for a trailing
surrogate."""
return c >= 0xdc00 and c <= 0xdfff
@staticmethod
def __utf16_decode_surrogate_pair(leading, trailing):
"""Returns the unicode code point corresponding to leading surrogate
'leading' and trailing surrogate 'trailing'. The return value will not
make any sense if 'leading' or 'trailing' are not in the correct ranges
for leading or trailing surrogates."""
# Leading surrogate: 110110wwwwxxxxxx
# Trailing surrogate: 110111xxxxxxxxxx
# Code point: 000uuuuuxxxxxxxxxxxxxxxx
w = (leading >> 6) & 0xf
u = w + 1
x0 = leading & 0x3f
x1 = trailing & 0x3ff
return (u << 16) | (x0 << 10) | x1
__unescape = {'"': u'"',
"\\": u"\\",
"/": u"/",
"b": u"\b",
"f": u"\f",
"n": u"\n",
"r": u"\r",
"t": u"\t"}
def __lex_finish_string(self):
inp = self.buffer
out = u""
while len(inp):
backslash = inp.find('\\')
if backslash == -1:
out += inp
break
out += inp[:backslash]
inp = inp[backslash + 1:]
if inp == "":
self.__error("quoted string may not end with backslash")
return
replacement = Parser.__unescape.get(inp[0])
if replacement is not None:
out += replacement
inp = inp[1:]
continue
elif inp[0] != u'u':
self.__error("bad escape \\%s" % inp[0])
return
c0 = self.__lex_4hex(inp[1:5])
if c0 is None:
return
inp = inp[5:]
if Parser.__is_leading_surrogate(c0):
if inp[:2] != u'\\u':
self.__error("malformed escaped surrogate pair")
return
c1 = self.__lex_4hex(inp[2:6])
if c1 is None:
return
if not Parser.__is_trailing_surrogate(c1):
self.__error("second half of escaped surrogate pair is "
"not trailing surrogate")
return
code_point = Parser.__utf16_decode_surrogate_pair(c0, c1)
inp = inp[6:]
else:
code_point = c0
out += unichr(code_point)
self.__parser_input('string', out)
def __lex_string_escape(self, c):
self.buffer += c
self.lex_state = Parser.__lex_string
return True
def __lex_string(self, c):
if c == '\\':
self.buffer += c
self.lex_state = Parser.__lex_string_escape
elif c == '"':
self.__lex_finish_string()
elif ord(c) >= 0x20:
self.buffer += c
else:
self.__error("U+%04X must be escaped in quoted string" % ord(c))
return True
def __lex_input(self, c):
eat = self.lex_state(self, c)
assert eat is True or eat is False
return eat
def __parse_start(self, token, unused_string):
if token == '{':
self.__push_object()
elif token == '[':
self.__push_array()
else:
self.__error("syntax error at beginning of input")
def __parse_end(self, unused_token, unused_string):
self.__error("trailing garbage at end of input")
def __parse_object_init(self, token, string):
if token == '}':
self.__parser_pop()
else:
self.__parse_object_name(token, string)
def __parse_object_name(self, token, string):
if token == 'string':
self.member_name = string
self.parse_state = Parser.__parse_object_colon
else:
self.__error("syntax error parsing object expecting string")
def __parse_object_colon(self, token, unused_string):
if token == ":":
self.parse_state = Parser.__parse_object_value
else:
self.__error("syntax error parsing object expecting ':'")
def __parse_object_value(self, token, string):
self.__parse_value(token, string, Parser.__parse_object_next)
def __parse_object_next(self, token, unused_string):
if token == ",":
self.parse_state = Parser.__parse_object_name
elif token == "}":
self.__parser_pop()
else:
self.__error("syntax error expecting '}' or ','")
def __parse_array_init(self, token, string):
if token == ']':
self.__parser_pop()
else:
self.__parse_array_value(token, string)
def __parse_array_value(self, token, string):
self.__parse_value(token, string, Parser.__parse_array_next)
def __parse_array_next(self, token, unused_string):
if token == ",":
self.parse_state = Parser.__parse_array_value
elif token == "]":
self.__parser_pop()
else:
self.__error("syntax error expecting ']' or ','")
def __parser_input(self, token, string=None):
self.lex_state = Parser.__lex_start
self.buffer = ""
self.parse_state(self, token, string)
def __put_value(self, value):
top = self.stack[-1]
if type(top) == dict:
top[self.member_name] = value
else:
top.append(value)
def __parser_push(self, new_json, next_state):
if len(self.stack) < Parser.MAX_HEIGHT:
if len(self.stack) > 0:
self.__put_value(new_json)
self.stack.append(new_json)
self.parse_state = next_state
else:
self.__error("input exceeds maximum nesting depth %d" %
Parser.MAX_HEIGHT)
def __push_object(self):
self.__parser_push({}, Parser.__parse_object_init)
def __push_array(self):
self.__parser_push([], Parser.__parse_array_init)
def __parser_pop(self):
if len(self.stack) == 1:
self.parse_state = Parser.__parse_end
if not self.check_trailer:
self.done = True
else:
self.stack.pop()
top = self.stack[-1]
if type(top) == list:
self.parse_state = Parser.__parse_array_next
else:
self.parse_state = Parser.__parse_object_next
def __parse_value(self, token, string, next_state):
if token in [False, None, True] or type(token) in [int, long, float]:
self.__put_value(token)
elif token == 'string':
self.__put_value(string)
else:
if token == '{':
self.__push_object()
elif token == '[':
self.__push_array()
else:
self.__error("syntax error expecting value")
return
self.parse_state = next_state
def __error(self, message):
if self.error is None:
self.error = ("line %d, column %d, byte %d: %s"
% (self.line_number, self.column_number,
self.byte_number, message))
self.done = True
def feed(self, s):
i = 0
while True:
if self.done or i >= len(s):
return i
c = s[i]
if self.__lex_input(c):
self.byte_number += 1
if c == '\n':
self.column_number = 0
self.line_number += 1
else:
self.column_number += 1
i += 1
def is_done(self):
return self.done
def finish(self):
if self.lex_state == Parser.__lex_start:
pass
elif self.lex_state in (Parser.__lex_string,
Parser.__lex_string_escape):
self.__error("unexpected end of input in quoted string")
else:
self.__lex_input(" ")
if self.parse_state == Parser.__parse_start:
self.__error("empty input stream")
elif self.parse_state != Parser.__parse_end:
self.__error("unexpected end of input")
if self.error == None:
assert len(self.stack) == 1
return self.stack.pop()
else:
return self.error
|
elmar-peise/ELAPS | refs/heads/master | elaps/bin/playmat.py | 2 | #!/usr/bin/env python
"""Wrapper for ELAPS:PlayMat."""
import argparse
from .. import defines
from ..qt import PlayMat
def main():
"""Main entry point."""
# parse args
parser = argparse.ArgumentParser(
description="ELAPS PlayMat (Experiment GUI)"
)
parser.add_argument("--reset", action="store_true",
help="reset to default Experiment")
parser.add_argument(
"experiment", nargs="?",
help="An ELAPS Experiment (.%s) or Report (.%s)" %
(defines.experiment_extension, defines.report_extension)
)
args = parser.parse_args()
# start PlayMat
PlayMat(load=args.experiment, reset=args.reset).start()
if __name__ == "__main__":
main()
|
dbckz/ansible | refs/heads/devel | lib/ansible/playbook/role/requirement.py | 20 | # (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import shutil
import subprocess
import tempfile
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.playbook.role.definition import RoleDefinition
__all__ = ['RoleRequirement']
VALID_SPEC_KEYS = [
'name',
'role',
'scm',
'src',
'version',
]
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class RoleRequirement(RoleDefinition):
"""
Helper class for Galaxy, which is used to parse both dependencies
specified in meta/main.yml and requirements.yml files.
"""
def __init__(self):
pass
@staticmethod
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
@staticmethod
def role_spec_parse(role_spec):
# takes a repo and a version like
# git+http://git.example.com/repos/repo.git,v1.0
# and returns a list of properties such as:
# {
# 'scm': 'git',
# 'src': 'http://git.example.com/repos/repo.git',
# 'version': 'v1.0',
# 'name': 'repo'
# }
display.deprecated("The comma separated role spec format, use the yaml/explicit format instead. Line that trigger this: %s" % role_spec)
default_role_versions = dict(git='master', hg='tip')
role_spec = role_spec.strip()
role_version = ''
if role_spec == "" or role_spec.startswith("#"):
return (None, None, None, None)
tokens = [s.strip() for s in role_spec.split(',')]
# assume https://github.com URLs are git+https:// URLs and not
# tarballs unless they end in '.zip'
if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
tokens[0] = 'git+' + tokens[0]
if '+' in tokens[0]:
(scm, role_url) = tokens[0].split('+')
else:
scm = None
role_url = tokens[0]
if len(tokens) >= 2:
role_version = tokens[1]
if len(tokens) == 3:
role_name = tokens[2]
else:
role_name = RoleRequirement.repo_url_to_role_name(tokens[0])
if scm and not role_version:
role_version = default_role_versions.get(scm, '')
return dict(scm=scm, src=role_url, version=role_version, name=role_name)
@staticmethod
def role_yaml_parse(role):
if isinstance(role, string_types):
name = None
scm = None
src = None
version = None
if ',' in role:
if role.count(',') == 1:
(src, version) = role.strip().split(',', 1)
elif role.count(',') == 2:
(src, version, name) = role.strip().split(',', 2)
else:
raise AnsibleError("Invalid role line (%s). Proper format is 'role_name[,version[,name]]'" % role)
else:
src = role
if name is None:
name = RoleRequirement.repo_url_to_role_name(src)
if '+' in src:
(scm, src) = src.split('+', 1)
return dict(name=name, src=src, scm=scm, version=version)
if 'role' in role:
name = role['role']
if ',' in name:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role = RoleRequirement.role_spec_parse(role['role'])
else:
del role['role']
role['name'] = name
else:
role = role.copy()
if 'src'in role:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = RoleRequirement.repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
for key in list(role.keys()):
if key not in VALID_SPEC_KEYS:
role.pop(key)
return role
@staticmethod
def scm_archive_role(src, scm='git', name=None, version='HEAD'):
if scm not in ['hg', 'git']:
raise AnsibleError("- scm %s is not currently supported" % scm)
tempdir = tempfile.mkdtemp()
clone_cmd = [scm, 'clone', src, name]
with open('/dev/null', 'w') as devnull:
try:
popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
except:
raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
rc = popen.wait()
if rc != 0:
raise AnsibleError ("- command %s failed in directory %s (rc=%s)" % (' '.join(clone_cmd), tempdir, rc))
if scm == 'git' and version:
checkout_cmd = [scm, 'checkout', version]
with open('/dev/null', 'w') as devnull:
try:
popen = subprocess.Popen(checkout_cmd, cwd=os.path.join(tempdir, name), stdout=devnull, stderr=devnull)
except (IOError, OSError):
raise AnsibleError("error executing: %s" % " ".join(checkout_cmd))
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(checkout_cmd), tempdir, rc))
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
if scm == 'hg':
archive_cmd = ['hg', 'archive', '--prefix', "%s/" % name]
if version:
archive_cmd.extend(['-r', version])
archive_cmd.append(temp_file.name)
if scm == 'git':
archive_cmd = ['git', 'archive', '--prefix=%s/' % name, '--output=%s' % temp_file.name]
if version:
archive_cmd.append(version)
else:
archive_cmd.append('HEAD')
with open('/dev/null', 'w') as devnull:
popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, name),
stderr=devnull, stdout=devnull)
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(archive_cmd), tempdir, rc))
shutil.rmtree(tempdir, ignore_errors=True)
return temp_file.name
|
mammique/django | refs/heads/tp_alpha | tests/modeltests/fixtures_model_package/tests.py | 58 | from __future__ import unicode_literals
from django.core import management
from django.db import transaction
from django.test import TestCase, TransactionTestCase
from .models import Article, Book
class SampleTestCase(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Test cases can load fixture objects into models defined in packages"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(
Article.objects.all(),[
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline
)
class TestNoInitialDataLoading(TransactionTestCase):
def test_syncdb(self):
with transaction.commit_manually():
Book.objects.all().delete()
management.call_command(
'syncdb',
verbosity=0,
load_initial_data=False
)
self.assertQuerysetEqual(Book.objects.all(), [])
transaction.rollback()
def test_flush(self):
# Test presence of fixture (flush called by TransactionTestCase)
self.assertQuerysetEqual(
Book.objects.all(), [
'Achieving self-awareness of Python programs'
],
lambda a: a.name
)
with transaction.commit_manually():
management.call_command(
'flush',
verbosity=0,
interactive=False,
commit=False,
load_initial_data=False
)
self.assertQuerysetEqual(Book.objects.all(), [])
transaction.rollback()
class FixtureTestCase(TestCase):
def test_initial_data(self):
"Fixtures can load initial data into models defined in packages"
# syncdb introduces 1 initial data object from initial_data.json
self.assertQuerysetEqual(
Book.objects.all(), [
'Achieving self-awareness of Python programs'
],
lambda a: a.name
)
def test_loaddata(self):
"Fixtures can load data into models defined in packages"
# Load fixture 1. Single JSON file, with two objects
management.call_command("loaddata", "fixture1.json", verbosity=0, commit=False)
self.assertQuerysetEqual(
Article.objects.all(), [
"Time to reform copyright",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
# Load fixture 2. JSON file imported by default. Overwrites some
# existing objects
management.call_command("loaddata", "fixture2.json", verbosity=0, commit=False)
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
# Load a fixture that doesn't exist
management.call_command("loaddata", "unknown.json", verbosity=0, commit=False)
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.