repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
sYnfo/sti-python
refs/heads/master
3.3/test/django-test-app/project/settings.py
51
""" Django settings for project project. Generated by 'django-admin startproject' using Django 1.8.1. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'y*b^6p#z&cm2)8rzgbp2i4k*+rg2h%60l*bmf6hg&ro!z0-ael' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'project.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'project.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
wenyu1001/scrapy
refs/heads/master
tests/test_downloadermiddleware_robotstxt.py
11
from __future__ import absolute_import import re from twisted.internet import reactor, error from twisted.internet.defer import Deferred, DeferredList, maybeDeferred from twisted.python import failure from twisted.trial import unittest from scrapy.downloadermiddlewares.robotstxt import (RobotsTxtMiddleware, logger as mw_module_logger) from scrapy.exceptions import IgnoreRequest, NotConfigured from scrapy.http import Request, Response, TextResponse from scrapy.settings import Settings from tests import mock class RobotsTxtMiddlewareTest(unittest.TestCase): def setUp(self): self.crawler = mock.MagicMock() self.crawler.settings = Settings() self.crawler.engine.download = mock.MagicMock() def tearDown(self): del self.crawler def test_robotstxt_settings(self): self.crawler.settings = Settings() self.crawler.settings.set('USER_AGENT', 'CustomAgent') self.assertRaises(NotConfigured, RobotsTxtMiddleware, self.crawler) def _get_successful_crawler(self): crawler = self.crawler crawler.settings.set('ROBOTSTXT_OBEY', True) ROBOTS = re.sub(b'^\s+(?m)', b'', b''' User-Agent: * Disallow: /admin/ Disallow: /static/ ''') response = TextResponse('http://site.local/robots.txt', body=ROBOTS) def return_response(request, spider): deferred = Deferred() reactor.callFromThread(deferred.callback, response) return deferred crawler.engine.download.side_effect = return_response return crawler def test_robotstxt(self): middleware = RobotsTxtMiddleware(self._get_successful_crawler()) return DeferredList([ self.assertNotIgnored(Request('http://site.local/allowed'), middleware), self.assertIgnored(Request('http://site.local/admin/main'), middleware), self.assertIgnored(Request('http://site.local/static/'), middleware) ], fireOnOneErrback=True) def test_robotstxt_ready_parser(self): middleware = RobotsTxtMiddleware(self._get_successful_crawler()) d = self.assertNotIgnored(Request('http://site.local/allowed'), middleware) d.addCallback(lambda _: self.assertNotIgnored(Request('http://site.local/allowed'), middleware)) return d def test_robotstxt_meta(self): middleware = RobotsTxtMiddleware(self._get_successful_crawler()) meta = {'dont_obey_robotstxt': True} return DeferredList([ self.assertNotIgnored(Request('http://site.local/allowed', meta=meta), middleware), self.assertNotIgnored(Request('http://site.local/admin/main', meta=meta), middleware), self.assertNotIgnored(Request('http://site.local/static/', meta=meta), middleware) ], fireOnOneErrback=True) def _get_garbage_crawler(self): crawler = self.crawler crawler.settings.set('ROBOTSTXT_OBEY', True) response = Response('http://site.local/robots.txt', body=b'GIF89a\xd3\x00\xfe\x00\xa2') def return_response(request, spider): deferred = Deferred() reactor.callFromThread(deferred.callback, response) return deferred crawler.engine.download.side_effect = return_response return crawler def test_robotstxt_garbage(self): # garbage response should be discarded, equal 'allow all' middleware = RobotsTxtMiddleware(self._get_garbage_crawler()) deferred = DeferredList([ self.assertNotIgnored(Request('http://site.local'), middleware), self.assertNotIgnored(Request('http://site.local/allowed'), middleware), self.assertNotIgnored(Request('http://site.local/admin/main'), middleware), self.assertNotIgnored(Request('http://site.local/static/'), middleware) ], fireOnOneErrback=True) return deferred def _get_emptybody_crawler(self): crawler = self.crawler crawler.settings.set('ROBOTSTXT_OBEY', True) response = Response('http://site.local/robots.txt') def return_response(request, spider): deferred = Deferred() reactor.callFromThread(deferred.callback, response) return deferred crawler.engine.download.side_effect = return_response return crawler def test_robotstxt_empty_response(self): # empty response should equal 'allow all' middleware = RobotsTxtMiddleware(self._get_emptybody_crawler()) return DeferredList([ self.assertNotIgnored(Request('http://site.local/allowed'), middleware), self.assertNotIgnored(Request('http://site.local/admin/main'), middleware), self.assertNotIgnored(Request('http://site.local/static/'), middleware) ], fireOnOneErrback=True) def test_robotstxt_error(self): self.crawler.settings.set('ROBOTSTXT_OBEY', True) err = error.DNSLookupError('Robotstxt address not found') def return_failure(request, spider): deferred = Deferred() reactor.callFromThread(deferred.errback, failure.Failure(err)) return deferred self.crawler.engine.download.side_effect = return_failure middleware = RobotsTxtMiddleware(self.crawler) middleware._logerror = mock.MagicMock(side_effect=middleware._logerror) deferred = middleware.process_request(Request('http://site.local'), None) deferred.addCallback(lambda _: self.assertTrue(middleware._logerror.called)) return deferred def test_robotstxt_immediate_error(self): self.crawler.settings.set('ROBOTSTXT_OBEY', True) err = error.DNSLookupError('Robotstxt address not found') def immediate_failure(request, spider): deferred = Deferred() deferred.errback(failure.Failure(err)) return deferred self.crawler.engine.download.side_effect = immediate_failure middleware = RobotsTxtMiddleware(self.crawler) return self.assertNotIgnored(Request('http://site.local'), middleware) def test_ignore_robotstxt_request(self): self.crawler.settings.set('ROBOTSTXT_OBEY', True) def ignore_request(request, spider): deferred = Deferred() reactor.callFromThread(deferred.errback, failure.Failure(IgnoreRequest())) return deferred self.crawler.engine.download.side_effect = ignore_request middleware = RobotsTxtMiddleware(self.crawler) mw_module_logger.error = mock.MagicMock() d = self.assertNotIgnored(Request('http://site.local/allowed'), middleware) d.addCallback(lambda _: self.assertFalse(mw_module_logger.error.called)) return d def assertNotIgnored(self, request, middleware): spider = None # not actually used dfd = maybeDeferred(middleware.process_request, request, spider) dfd.addCallback(self.assertIsNone) return dfd def assertIgnored(self, request, middleware): spider = None # not actually used return self.assertFailure(maybeDeferred(middleware.process_request, request, spider), IgnoreRequest)
googleapis/googleapis-gen
refs/heads/master
google/cloud/osconfig/agentendpoint/v1/osconfig-agentendpoint-v1-py/noxfile.py
1
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import pathlib import shutil import subprocess import sys import nox # type: ignore CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") nox.sessions = [ "unit", "cover", "mypy", "check_lower_bounds" # exclude update_lower_bounds from default "docs", ] @nox.session(python=['3.6', '3.7', '3.8', '3.9']) def unit(session): """Run the unit test suite.""" session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') session.install('-e', '.') session.run( 'py.test', '--quiet', '--cov=google/cloud/osconfig/agentendpoint_v1/', '--cov-config=.coveragerc', '--cov-report=term', '--cov-report=html', os.path.join('tests', 'unit', ''.join(session.posargs)) ) @nox.session(python='3.7') def cover(session): """Run the final coverage report. This outputs the coverage report aggregating coverage from the unit test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") session.run("coverage", "report", "--show-missing", "--fail-under=100") session.run("coverage", "erase") @nox.session(python=['3.6', '3.7']) def mypy(session): """Run the type checker.""" session.install('mypy', 'types-pkg_resources') session.install('.') session.run( 'mypy', '--explicit-package-bases', 'google', ) @nox.session def update_lower_bounds(session): """Update lower bounds in constraints.txt to match setup.py""" session.install('google-cloud-testutils') session.install('.') session.run( 'lower-bound-checker', 'update', '--package-name', PACKAGE_NAME, '--constraints-file', str(LOWER_BOUND_CONSTRAINTS_FILE), ) @nox.session def check_lower_bounds(session): """Check lower bounds in setup.py are reflected in constraints file""" session.install('google-cloud-testutils') session.install('.') session.run( 'lower-bound-checker', 'check', '--package-name', PACKAGE_NAME, '--constraints-file', str(LOWER_BOUND_CONSTRAINTS_FILE), ) @nox.session(python='3.6') def docs(session): """Build the docs for this library.""" session.install("-e", ".") session.install("sphinx<3.0.0", "alabaster", "recommonmark") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( "sphinx-build", "-W", # warnings as errors "-T", # show full traceback on exception "-N", # no colors "-b", "html", "-d", os.path.join("docs", "_build", "doctrees", ""), os.path.join("docs", ""), os.path.join("docs", "_build", "html", ""), )
gtt116/novalog
refs/heads/master
remotelog/admin.py
1
from django.contrib import admin from django.core.urlresolvers import reverse from remotelog import models as remotelog class ApplicationAdmin(admin.ModelAdmin): list_display = ('name', 'slug',) search_fields = ('name', 'slug',) admin.site.register(remotelog.Application, ApplicationAdmin) # date = models.DateTimeField() # remote_ip = models.CharField(max_length=40) # remote_host = models.CharField(max_length=255) # # levelno = models.IntegerField() # levelname = models.CharField(max_length=255) # # name = models.CharField(max_length=255) # module = models.CharField(max_length=255) # filename = models.CharField(max_length=255) # pathname = models.CharField(max_length=255) # funcName = models.CharField(max_length=255) # lineno = models.IntegerField() # # msg = models.TextField() # exc_info = models.TextField() # exc_text = models.TextField() # args = models.TextField(null=True, blank=True) # # threadName = models.CharField(max_length=255) # thread = models.FloatField() # created = models.FloatField() # process = models.IntegerField() # relativeCreated = models.FloatField() # msecs = models.FloatField() class LogMessageAdmin(admin.ModelAdmin): list_display = ('application', 'date', 'remote_ip', 'name', 'levelname', 'short_msg', 'view_link') search_fields = ('remote_ip', 'remote_host', 'name', 'filename', 'funcName', 'levelname', 'short_msg') list_filter = ('application', 'remote_ip', 'levelname', 'name', 'date',) def short_msg(self, obj): return obj.short_msg short_msg.short_description = 'Message' def view_link(self, obj): kwargs = { 'message_id': obj.id, 'app_slug': obj.application.slug, } return '<a href="%s">View</a>' % reverse('view_message', kwargs=kwargs) view_link.short_description = '' view_link.allow_tags = True admin.site.register(remotelog.LogMessage, LogMessageAdmin)
rldotai/deepy
refs/heads/master
deepy/utils/functions.py
3
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import theano import theano.tensor as T import numpy as np import re import copy from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams import logging as loggers logging = loggers.getLogger(__name__) FLOATX = theano.config.floatX EPSILON = T.constant(1.0e-15, dtype=FLOATX) BIG_EPSILON = T.constant(1.0e-7, dtype=FLOATX) if 'DEEPY_SEED' in os.environ: global_seed = int(os.environ['DEEPY_SEED']) logging.info("set global random seed to %d" % global_seed) else: global_seed = 3 global_rand = np.random.RandomState(seed=global_seed) global_theano_rand = RandomStreams(seed=global_seed) def onehot(size, eye): return np.eye(1, size, eye, dtype=FLOATX)[0] def onehot_tensor(i_matrix, vocab_size): """ # batch x time """ dim0, dim1 = i_matrix.shape i_vector = i_matrix.reshape((-1,)) hot_matrix = T.extra_ops.to_one_hot(i_vector, vocab_size).reshape((dim0, dim1, vocab_size)) return hot_matrix # def onehot_tensor(t, r=None): # if r is None: # r = T.max(t) + 1 # # ranges = T.shape_padleft(T.arange(r), t.ndim) # return T.eq(ranges, T.shape_padright(t, 1)) def make_float_matrices(*names): ret = [] for n in names: ret.append(T.matrix(n, dtype=FLOATX)) return ret def make_float_vectors(*names): ret = [] for n in names: ret.append(T.vector(n, dtype=FLOATX)) return ret def monitor_var(value, name="", disabled=False): if disabled: return value else: return theano.printing.Print(name)(value) def monitor_var_sum(value, name="", disabled=False): if disabled: return T.sum(value)*0 else: val = T.sum(theano.printing.Print(name)(value))*T.constant(0.0000001, dtype=FLOATX) return T.cast(val, FLOATX) def back_grad(jacob, err_g): return T.dot(jacob, err_g) # return (jacob.T * err_g).T def replace_graph(node, dct, deepcopy=True): """ Replace nodes in a computational graph (Safe). """ if not hasattr(node, 'owner'): return node if not hasattr(node.owner, 'inputs'): return node if deepcopy: new_node = copy.deepcopy(node) new_node.owner.inputs = copy.copy(node.owner.inputs) node = new_node owner = node.owner for i, elem in enumerate(owner.inputs): if elem in dct: owner.inputs[i] = dct[elem] else: owner.inputs[i] = replace_graph(elem, dct) return node def build_node_name(n): if "owner" not in dir(n) or "inputs" not in dir(n.owner): return str(n) else: op_name = str(n.owner.op) if "{" not in op_name: op_name = "Elemwise{%s}" % op_name if "," in op_name: op_name = re.sub(r"\{([^}]+),[^}]+\}", "{\\1}", op_name) if "_" in op_name: op_name = re.sub(r"\{[^}]+_([^_}]+)\}", "{\\1}", op_name) return "%s(%s)" % (op_name, ",".join([build_node_name(m) for m in n.owner.inputs])) def smart_replace_graph(n, dct, name_map=None, deepcopy=True): """ Replace nodes in a computational graph (Smart). """ if not name_map: name_map = {} for src, dst in dct.items(): name_map[build_node_name(src)] = dst if not hasattr(n, 'owner'): return n if not hasattr(n.owner, 'inputs'): return n if deepcopy: new_node = copy.deepcopy(n) new_node.owner.inputs = copy.copy(n.owner.inputs) n = new_node owner = n.owner for i, elem in enumerate(owner.inputs): if elem in dct: owner.inputs[i] = dct[elem] elif build_node_name(elem) in name_map: owner.inputs[i] = name_map[build_node_name(elem)] else: owner.inputs[i] = smart_replace_graph(elem, dct, name_map, deepcopy) return n class VarMap(): def __init__(self): self.varmap = {} def __get__(self, instance, owner): if instance not in self.varmap: return None else: return self.varmap[instance] def __set__(self, instance, value): self.varmap[instance] = value def __contains__(self, item): return item in self.varmap def update_if_not_existing(self, name, value): if name not in self.varmap: self.varmap[name] = value def get(self, name): return self.varmap[name] def set(self, name, value): self.varmap[name] = value import numpy as np chars = [u" ", u"▁", u"▂", u"▃", u"▄", u"▅", u"▆", u"▇", u"█"] def plot_hinton(arr, max_arr=None): if max_arr == None: max_arr = arr arr = np.array(arr) max_val = max(abs(np.max(max_arr)), abs(np.min(max_arr))) print np.array2string(arr, formatter={'float_kind': lambda x: visual_hinton(x, max_val)}, max_line_width=5000 ) def visual_hinton(val, max_val): if abs(val) == max_val: step = len(chars) - 1 else: step = int(abs(float(val) / max_val) * len(chars)) colourstart = "" colourend = "" if val < 0: colourstart, colourend = '\033[90m', '\033[0m' #bh.internal = colourstart + chars[step] + colourend return colourstart + chars[step] + colourend from theano.compile import ViewOp from theano.gradient import DisconnectedType class DisconnectedGrad(ViewOp): def grad(self, args, g_outs): return [ DisconnectedType()() for g_out in g_outs] def connection_pattern(self, node): return [[False]] disconnected_grad = DisconnectedGrad()
jeffery-do/Vizdoombot
refs/heads/master
doom/lib/python3.5/site-packages/numpy/f2py/capi_maps.py
29
#!/usr/bin/env python """ Copyright 1999,2000 Pearu Peterson all rights reserved, Pearu Peterson <pearu@ioc.ee> Permission to use, modify, and distribute this software is given under the terms of the NumPy License. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. $Date: 2005/05/06 10:57:33 $ Pearu Peterson """ from __future__ import division, absolute_import, print_function __version__ = "$Revision: 1.60 $"[10:-1] from . import __version__ f2py_version = __version__.version import copy import re import os import sys from .crackfortran import markoutercomma from . import cb_rules # The eviroment provided by auxfuncs.py is needed for some calls to eval. # As the needed functions cannot be determined by static inspection of the # code, it is safest to use import * pending a major refactoring of f2py. from .auxfuncs import * __all__ = [ 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map', 'cb_sign2map', 'cb_routsign2map', 'common_sign2map' ] # Numarray and Numeric users should set this False using_newcore = True depargs = [] lcb_map = {} lcb2_map = {} # forced casting: mainly caused by the fact that Python or Numeric # C/APIs do not support the corresponding C types. c2py_map = {'double': 'float', 'float': 'float', # forced casting 'long_double': 'float', # forced casting 'char': 'int', # forced casting 'signed_char': 'int', # forced casting 'unsigned_char': 'int', # forced casting 'short': 'int', # forced casting 'unsigned_short': 'int', # forced casting 'int': 'int', # (forced casting) 'long': 'int', 'long_long': 'long', 'unsigned': 'int', # forced casting 'complex_float': 'complex', # forced casting 'complex_double': 'complex', 'complex_long_double': 'complex', # forced casting 'string': 'string', } c2capi_map = {'double': 'NPY_DOUBLE', 'float': 'NPY_FLOAT', 'long_double': 'NPY_DOUBLE', # forced casting 'char': 'NPY_CHAR', 'unsigned_char': 'NPY_UBYTE', 'signed_char': 'NPY_BYTE', 'short': 'NPY_SHORT', 'unsigned_short': 'NPY_USHORT', 'int': 'NPY_INT', 'unsigned': 'NPY_UINT', 'long': 'NPY_LONG', 'long_long': 'NPY_LONG', # forced casting 'complex_float': 'NPY_CFLOAT', 'complex_double': 'NPY_CDOUBLE', 'complex_long_double': 'NPY_CDOUBLE', # forced casting 'string': 'NPY_CHAR'} # These new maps aren't used anyhere yet, but should be by default # unless building numeric or numarray extensions. if using_newcore: c2capi_map = {'double': 'NPY_DOUBLE', 'float': 'NPY_FLOAT', 'long_double': 'NPY_LONGDOUBLE', 'char': 'NPY_BYTE', 'unsigned_char': 'NPY_UBYTE', 'signed_char': 'NPY_BYTE', 'short': 'NPY_SHORT', 'unsigned_short': 'NPY_USHORT', 'int': 'NPY_INT', 'unsigned': 'NPY_UINT', 'long': 'NPY_LONG', 'unsigned_long': 'NPY_ULONG', 'long_long': 'NPY_LONGLONG', 'unsigned_long_long': 'NPY_ULONGLONG', 'complex_float': 'NPY_CFLOAT', 'complex_double': 'NPY_CDOUBLE', 'complex_long_double': 'NPY_CDOUBLE', # f2py 2e is not ready for NPY_STRING (must set itemisize # etc) 'string': 'NPY_CHAR', #'string':'NPY_STRING' } c2pycode_map = {'double': 'd', 'float': 'f', 'long_double': 'd', # forced casting 'char': '1', 'signed_char': '1', 'unsigned_char': 'b', 'short': 's', 'unsigned_short': 'w', 'int': 'i', 'unsigned': 'u', 'long': 'l', 'long_long': 'L', 'complex_float': 'F', 'complex_double': 'D', 'complex_long_double': 'D', # forced casting 'string': 'c' } if using_newcore: c2pycode_map = {'double': 'd', 'float': 'f', 'long_double': 'g', 'char': 'b', 'unsigned_char': 'B', 'signed_char': 'b', 'short': 'h', 'unsigned_short': 'H', 'int': 'i', 'unsigned': 'I', 'long': 'l', 'unsigned_long': 'L', 'long_long': 'q', 'unsigned_long_long': 'Q', 'complex_float': 'F', 'complex_double': 'D', 'complex_long_double': 'G', 'string': 'S'} c2buildvalue_map = {'double': 'd', 'float': 'f', 'char': 'b', 'signed_char': 'b', 'short': 'h', 'int': 'i', 'long': 'l', 'long_long': 'L', 'complex_float': 'N', 'complex_double': 'N', 'complex_long_double': 'N', 'string': 'z'} if sys.version_info[0] >= 3: # Bytes, not Unicode strings c2buildvalue_map['string'] = 'y' if using_newcore: # c2buildvalue_map=??? pass f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double', '12': 'long_double', '16': 'long_double'}, 'integer': {'': 'int', '1': 'signed_char', '2': 'short', '4': 'int', '8': 'long_long', '-1': 'unsigned_char', '-2': 'unsigned_short', '-4': 'unsigned', '-8': 'unsigned_long_long'}, 'complex': {'': 'complex_float', '8': 'complex_float', '16': 'complex_double', '24': 'complex_long_double', '32': 'complex_long_double'}, 'complexkind': {'': 'complex_float', '4': 'complex_float', '8': 'complex_double', '12': 'complex_long_double', '16': 'complex_long_double'}, 'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int', '8': 'long_long'}, 'double complex': {'': 'complex_double'}, 'double precision': {'': 'double'}, 'byte': {'': 'char'}, 'character': {'': 'string'} } if os.path.isfile('.f2py_f2cmap'): # User defined additions to f2cmap_all. # .f2py_f2cmap must contain a dictionary of dictionaries, only. For # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is # interpreted as C 'float'. This feature is useful for F90/95 users if # they use PARAMETERSs in type specifications. try: outmess('Reading .f2py_f2cmap ...\n') f = open('.f2py_f2cmap', 'r') d = eval(f.read(), {}, {}) f.close() for k, d1 in list(d.items()): for k1 in list(d1.keys()): d1[k1.lower()] = d1[k1] d[k.lower()] = d[k] for k in list(d.keys()): if k not in f2cmap_all: f2cmap_all[k] = {} for k1 in list(d[k].keys()): if d[k][k1] in c2py_map: if k1 in f2cmap_all[k]: outmess( "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" % (k, k1, f2cmap_all[k][k1], d[k][k1])) f2cmap_all[k][k1] = d[k][k1] outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k, k1, d[k][k1])) else: errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" % ( k, k1, d[k][k1], d[k][k1], list(c2py_map.keys()))) outmess('Succesfully applied user defined changes from .f2py_f2cmap\n') except Exception as msg: errmess( 'Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg)) cformat_map = {'double': '%g', 'float': '%g', 'long_double': '%Lg', 'char': '%d', 'signed_char': '%d', 'unsigned_char': '%hhu', 'short': '%hd', 'unsigned_short': '%hu', 'int': '%d', 'unsigned': '%u', 'long': '%ld', 'unsigned_long': '%lu', 'long_long': '%ld', 'complex_float': '(%g,%g)', 'complex_double': '(%g,%g)', 'complex_long_double': '(%Lg,%Lg)', 'string': '%s', } # Auxiliary functions def getctype(var): """ Determines C type """ ctype = 'void' if isfunction(var): if 'result' in var: a = var['result'] else: a = var['name'] if a in var['vars']: return getctype(var['vars'][a]) else: errmess('getctype: function %s has no return value?!\n' % a) elif issubroutine(var): return ctype elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: typespec = var['typespec'].lower() f2cmap = f2cmap_all[typespec] ctype = f2cmap[''] # default type if 'kindselector' in var: if '*' in var['kindselector']: try: ctype = f2cmap[var['kindselector']['*']] except KeyError: errmess('getctype: "%s %s %s" not supported.\n' % (var['typespec'], '*', var['kindselector']['*'])) elif 'kind' in var['kindselector']: if typespec + 'kind' in f2cmap_all: f2cmap = f2cmap_all[typespec + 'kind'] try: ctype = f2cmap[var['kindselector']['kind']] except KeyError: if typespec in f2cmap_all: f2cmap = f2cmap_all[typespec] try: ctype = f2cmap[str(var['kindselector']['kind'])] except KeyError: errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="<C typespec>")) in %s/.f2py_f2cmap file).\n' % (typespec, var['kindselector']['kind'], ctype, typespec, var['kindselector']['kind'], os.getcwd())) else: if not isexternal(var): errmess( 'getctype: No C-type found in "%s", assuming void.\n' % var) return ctype def getstrlength(var): if isstringfunction(var): if 'result' in var: a = var['result'] else: a = var['name'] if a in var['vars']: return getstrlength(var['vars'][a]) else: errmess('getstrlength: function %s has no return value?!\n' % a) if not isstring(var): errmess( 'getstrlength: expected a signature of a string but got: %s\n' % (repr(var))) len = '1' if 'charselector' in var: a = var['charselector'] if '*' in a: len = a['*'] elif 'len' in a: len = a['len'] if re.match(r'\(\s*([*]|[:])\s*\)', len) or re.match(r'([*]|[:])', len): if isintent_hide(var): errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( repr(var))) len = '-1' return len def getarrdims(a, var, verbose=0): global depargs ret = {} if isstring(var) and not isarray(var): ret['dims'] = getstrlength(var) ret['size'] = ret['dims'] ret['rank'] = '1' elif isscalar(var): ret['size'] = '1' ret['rank'] = '0' ret['dims'] = '' elif isarray(var): dim = copy.copy(var['dimension']) ret['size'] = '*'.join(dim) try: ret['size'] = repr(eval(ret['size'])) except: pass ret['dims'] = ','.join(dim) ret['rank'] = repr(len(dim)) ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1] for i in range(len(dim)): # solve dim for dependecies v = [] if dim[i] in depargs: v = [dim[i]] else: for va in depargs: if re.match(r'.*?\b%s\b.*' % va, dim[i]): v.append(va) for va in v: if depargs.index(va) > depargs.index(a): dim[i] = '*' break ret['setdims'], i = '', -1 for d in dim: i = i + 1 if d not in ['*', ':', '(*)', '(:)']: ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % ( ret['setdims'], i, d) if ret['setdims']: ret['setdims'] = ret['setdims'][:-1] ret['cbsetdims'], i = '', -1 for d in var['dimension']: i = i + 1 if d not in ['*', ':', '(*)', '(:)']: ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( ret['cbsetdims'], i, d) elif isintent_in(var): outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' % (d)) ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( ret['cbsetdims'], i, 0) elif verbose: errmess( 'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n' % (repr(a), repr(d))) if ret['cbsetdims']: ret['cbsetdims'] = ret['cbsetdims'][:-1] # if not isintent_c(var): # var['dimension'].reverse() return ret def getpydocsign(a, var): global lcb_map if isfunction(var): if 'result' in var: af = var['result'] else: af = var['name'] if af in var['vars']: return getpydocsign(af, var['vars'][af]) else: errmess('getctype: function %s has no return value?!\n' % af) return '', '' sig, sigout = a, a opt = '' if isintent_in(var): opt = 'input' elif isintent_inout(var): opt = 'in/output' out_a = a if isintent_out(var): for k in var['intent']: if k[:4] == 'out=': out_a = k[4:] break init = '' ctype = getctype(var) if hasinitvalue(var): init, showinit = getinit(a, var) init = ', optional\\n Default: %s' % showinit if isscalar(var): if isintent_inout(var): sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], c2pycode_map[ctype], init) else: sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init) sigout = '%s : %s' % (out_a, c2py_map[ctype]) elif isstring(var): if isintent_inout(var): sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( a, opt, getstrlength(var), init) else: sig = '%s : %s string(len=%s)%s' % ( a, opt, getstrlength(var), init) sigout = '%s : string(len=%s)' % (out_a, getstrlength(var)) elif isarray(var): dim = var['dimension'] rank = repr(len(dim)) sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank, c2pycode_map[ ctype], ','.join(dim), init) if a == out_a: sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\ % (a, rank, c2pycode_map[ctype], ','.join(dim)) else: sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a) elif isexternal(var): ua = '' if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: ua = lcb2_map[lcb_map[a]]['argname'] if not ua == a: ua = ' => %s' % ua else: ua = '' sig = '%s : call-back function%s' % (a, ua) sigout = sig else: errmess( 'getpydocsign: Could not resolve docsignature for "%s".\\n' % a) return sig, sigout def getarrdocsign(a, var): ctype = getctype(var) if isstring(var) and (not isarray(var)): sig = '%s : rank-0 array(string(len=%s),\'c\')' % (a, getstrlength(var)) elif isscalar(var): sig = '%s : rank-0 array(%s,\'%s\')' % (a, c2py_map[ctype], c2pycode_map[ctype],) elif isarray(var): dim = var['dimension'] rank = repr(len(dim)) sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank, c2pycode_map[ ctype], ','.join(dim)) return sig def getinit(a, var): if isstring(var): init, showinit = '""', "''" else: init, showinit = '', '' if hasinitvalue(var): init = var['='] showinit = init if iscomplex(var) or iscomplexarray(var): ret = {} try: v = var["="] if ',' in v: ret['init.r'], ret['init.i'] = markoutercomma( v[1:-1]).split('@,@') else: v = eval(v, {}, {}) ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) except: raise ValueError( 'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) if isarray(var): init = '(capi_c.r=%s,capi_c.i=%s,capi_c)' % ( ret['init.r'], ret['init.i']) elif isstring(var): if not init: init, showinit = '""', "''" if init[0] == "'": init = '"%s"' % (init[1:-1].replace('"', '\\"')) if init[0] == '"': showinit = "'%s'" % (init[1:-1]) return init, showinit def sign2map(a, var): """ varname,ctype,atype init,init.r,init.i,pytype vardebuginfo,vardebugshowvalue,varshowvalue varrfromat intent """ global lcb_map, cb_map out_a = a if isintent_out(var): for k in var['intent']: if k[:4] == 'out=': out_a = k[4:] break ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)} intent_flags = [] for f, s in isintent_dict.items(): if f(var): intent_flags.append('F2PY_%s' % s) if intent_flags: # XXX: Evaluate intent_flags here. ret['intent'] = '|'.join(intent_flags) else: ret['intent'] = 'F2PY_INTENT_IN' if isarray(var): ret['varrformat'] = 'N' elif ret['ctype'] in c2buildvalue_map: ret['varrformat'] = c2buildvalue_map[ret['ctype']] else: ret['varrformat'] = 'O' ret['init'], ret['showinit'] = getinit(a, var) if hasinitvalue(var) and iscomplex(var) and not isarray(var): ret['init.r'], ret['init.i'] = markoutercomma( ret['init'][1:-1]).split('@,@') if isexternal(var): ret['cbnamekey'] = a if a in lcb_map: ret['cbname'] = lcb_map[a] ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs'] ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs'] ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr'] ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr'] else: ret['cbname'] = a errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % ( a, list(lcb_map.keys()))) if isstring(var): ret['length'] = getstrlength(var) if isarray(var): ret = dictappend(ret, getarrdims(a, var)) dim = copy.copy(var['dimension']) if ret['ctype'] in c2capi_map: ret['atype'] = c2capi_map[ret['ctype']] # Debug info if debugcapi(var): il = [isintent_in, 'input', isintent_out, 'output', isintent_inout, 'inoutput', isrequired, 'required', isoptional, 'optional', isintent_hide, 'hidden', iscomplex, 'complex scalar', l_and(isscalar, l_not(iscomplex)), 'scalar', isstring, 'string', isarray, 'array', iscomplexarray, 'complex array', isstringarray, 'string array', iscomplexfunction, 'complex function', l_and(isfunction, l_not(iscomplexfunction)), 'function', isexternal, 'callback', isintent_callback, 'callback', isintent_aux, 'auxiliary', ] rl = [] for i in range(0, len(il), 2): if il[i](var): rl.append(il[i + 1]) if isstring(var): rl.append('slen(%s)=%s' % (a, ret['length'])) if isarray(var): ddim = ','.join( map(lambda x, y: '%s|%s' % (x, y), var['dimension'], dim)) rl.append('dims(%s)' % ddim) if isexternal(var): ret['vardebuginfo'] = 'debug-capi:%s=>%s:%s' % ( a, ret['cbname'], ','.join(rl)) else: ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( ret['ctype'], a, ret['showinit'], ','.join(rl)) if isscalar(var): if ret['ctype'] in cformat_map: ret['vardebugshowvalue'] = 'debug-capi:%s=%s' % ( a, cformat_map[ret['ctype']]) if isstring(var): ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( a, a) if isexternal(var): ret['vardebugshowvalue'] = 'debug-capi:%s=%%p' % (a) if ret['ctype'] in cformat_map: ret['varshowvalue'] = '#name#:%s=%s' % (a, cformat_map[ret['ctype']]) ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) if isstring(var): ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) if hasnote(var): ret['note'] = var['note'] return ret def routsign2map(rout): """ name,NAME,begintitle,endtitle rname,ctype,rformat routdebugshowvalue """ global lcb_map name = rout['name'] fname = getfortranname(rout) ret = {'name': name, 'texname': name.replace('_', '\\_'), 'name_lower': name.lower(), 'NAME': name.upper(), 'begintitle': gentitle(name), 'endtitle': gentitle('end of %s' % name), 'fortranname': fname, 'FORTRANNAME': fname.upper(), 'callstatement': getcallstatement(rout) or '', 'usercode': getusercode(rout) or '', 'usercode1': getusercode1(rout) or '', } if '_' in fname: ret['F_FUNC'] = 'F_FUNC_US' else: ret['F_FUNC'] = 'F_FUNC' if '_' in name: ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' else: ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' lcb_map = {} if 'use' in rout: for u in rout['use'].keys(): if u in cb_rules.cb_map: for un in cb_rules.cb_map[u]: ln = un[0] if 'map' in rout['use'][u]: for k in rout['use'][u]['map'].keys(): if rout['use'][u]['map'][k] == un[0]: ln = k break lcb_map[ln] = un[1] elif 'externals' in rout and rout['externals']: errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( ret['name'], repr(rout['externals']))) ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' if isfunction(rout): if 'result' in rout: a = rout['result'] else: a = rout['name'] ret['rname'] = a ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) ret['ctype'] = getctype(rout['vars'][a]) if hasresultnote(rout): ret['resultnote'] = rout['vars'][a]['note'] rout['vars'][a]['note'] = ['See elsewhere.'] if ret['ctype'] in c2buildvalue_map: ret['rformat'] = c2buildvalue_map[ret['ctype']] else: ret['rformat'] = 'O' errmess('routsign2map: no c2buildvalue key for type %s\n' % (repr(ret['ctype']))) if debugcapi(rout): if ret['ctype'] in cformat_map: ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % ( a, cformat_map[ret['ctype']]) if isstringfunction(rout): ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( a, a) if isstringfunction(rout): ret['rlength'] = getstrlength(rout['vars'][a]) if ret['rlength'] == '-1': errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % ( repr(rout['name']))) ret['rlength'] = '10' if hasnote(rout): ret['note'] = rout['note'] rout['note'] = ['See elsewhere.'] return ret def modsign2map(m): """ modulename """ if ismodule(m): ret = {'f90modulename': m['name'], 'F90MODULENAME': m['name'].upper(), 'texf90modulename': m['name'].replace('_', '\\_')} else: ret = {'modulename': m['name'], 'MODULENAME': m['name'].upper(), 'texmodulename': m['name'].replace('_', '\\_')} ret['restdoc'] = getrestdoc(m) or [] if hasnote(m): ret['note'] = m['note'] ret['usercode'] = getusercode(m) or '' ret['usercode1'] = getusercode1(m) or '' if m['body']: ret['interface_usercode'] = getusercode(m['body'][0]) or '' else: ret['interface_usercode'] = '' ret['pymethoddef'] = getpymethoddef(m) or '' if 'coutput' in m: ret['coutput'] = m['coutput'] if 'f2py_wrapper_output' in m: ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] return ret def cb_sign2map(a, var, index=None): ret = {'varname': a} if index is None or 1: # disable 7712 patch ret['varname_i'] = ret['varname'] else: ret['varname_i'] = ret['varname'] + '_' + str(index) ret['ctype'] = getctype(var) if ret['ctype'] in c2capi_map: ret['atype'] = c2capi_map[ret['ctype']] if ret['ctype'] in cformat_map: ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) if isarray(var): ret = dictappend(ret, getarrdims(a, var)) ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) if hasnote(var): ret['note'] = var['note'] var['note'] = ['See elsewhere.'] return ret def cb_routsign2map(rout, um): """ name,begintitle,endtitle,argname ctype,rctype,maxnofargs,nofoptargs,returncptr """ ret = {'name': 'cb_%s_in_%s' % (rout['name'], um), 'returncptr': ''} if isintent_callback(rout): if '_' in rout['name']: F_FUNC = 'F_FUNC_US' else: F_FUNC = 'F_FUNC' ret['callbackname'] = '%s(%s,%s)' \ % (F_FUNC, rout['name'].lower(), rout['name'].upper(), ) ret['static'] = 'extern' else: ret['callbackname'] = ret['name'] ret['static'] = 'static' ret['argname'] = rout['name'] ret['begintitle'] = gentitle(ret['name']) ret['endtitle'] = gentitle('end of %s' % ret['name']) ret['ctype'] = getctype(rout) ret['rctype'] = 'void' if ret['ctype'] == 'string': ret['rctype'] = 'void' else: ret['rctype'] = ret['ctype'] if ret['rctype'] != 'void': if iscomplexfunction(rout): ret['returncptr'] = """ #ifdef F2PY_CB_RETURNCOMPLEX return_value= #endif """ else: ret['returncptr'] = 'return_value=' if ret['ctype'] in cformat_map: ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) if isstringfunction(rout): ret['strlength'] = getstrlength(rout) if isfunction(rout): if 'result' in rout: a = rout['result'] else: a = rout['name'] if hasnote(rout['vars'][a]): ret['note'] = rout['vars'][a]['note'] rout['vars'][a]['note'] = ['See elsewhere.'] ret['rname'] = a ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) if iscomplexfunction(rout): ret['rctype'] = """ #ifdef F2PY_CB_RETURNCOMPLEX #ctype# #else void #endif """ else: if hasnote(rout): ret['note'] = rout['note'] rout['note'] = ['See elsewhere.'] nofargs = 0 nofoptargs = 0 if 'args' in rout and 'vars' in rout: for a in rout['args']: var = rout['vars'][a] if l_or(isintent_in, isintent_inout)(var): nofargs = nofargs + 1 if isoptional(var): nofoptargs = nofoptargs + 1 ret['maxnofargs'] = repr(nofargs) ret['nofoptargs'] = repr(nofoptargs) if hasnote(rout) and isfunction(rout) and 'result' in rout: ret['routnote'] = rout['note'] rout['note'] = ['See elsewhere.'] return ret def common_sign2map(a, var): # obsolute ret = {'varname': a, 'ctype': getctype(var)} if isstringarray(var): ret['ctype'] = 'char' if ret['ctype'] in c2capi_map: ret['atype'] = c2capi_map[ret['ctype']] if ret['ctype'] in cformat_map: ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']]) if isarray(var): ret = dictappend(ret, getarrdims(a, var)) elif isstring(var): ret['size'] = getstrlength(var) ret['rank'] = '1' ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) if hasnote(var): ret['note'] = var['note'] var['note'] = ['See elsewhere.'] # for strings this returns 0-rank but actually is 1-rank ret['arrdocstr'] = getarrdocsign(a, var) return ret
fluxcapacitor/pipeline
refs/heads/master
libs/pipeline_model/tensorflow/core/framework/tensor_slice_pb2.py
1
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: tensorflow/core/framework/tensor_slice.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='tensorflow/core/framework/tensor_slice.proto', package='tensorflow', syntax='proto3', serialized_pb=_b('\n,tensorflow/core/framework/tensor_slice.proto\x12\ntensorflow\"\x80\x01\n\x10TensorSliceProto\x12\x33\n\x06\x65xtent\x18\x01 \x03(\x0b\x32#.tensorflow.TensorSliceProto.Extent\x1a\x37\n\x06\x45xtent\x12\r\n\x05start\x18\x01 \x01(\x03\x12\x10\n\x06length\x18\x02 \x01(\x03H\x00\x42\x0c\n\nhas_lengthB2\n\x18org.tensorflow.frameworkB\x11TensorSliceProtosP\x01\xf8\x01\x01\x62\x06proto3') ) _TENSORSLICEPROTO_EXTENT = _descriptor.Descriptor( name='Extent', full_name='tensorflow.TensorSliceProto.Extent', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='start', full_name='tensorflow.TensorSliceProto.Extent.start', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='length', full_name='tensorflow.TensorSliceProto.Extent.length', index=1, number=2, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='has_length', full_name='tensorflow.TensorSliceProto.Extent.has_length', index=0, containing_type=None, fields=[]), ], serialized_start=134, serialized_end=189, ) _TENSORSLICEPROTO = _descriptor.Descriptor( name='TensorSliceProto', full_name='tensorflow.TensorSliceProto', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='extent', full_name='tensorflow.TensorSliceProto.extent', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_TENSORSLICEPROTO_EXTENT, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=61, serialized_end=189, ) _TENSORSLICEPROTO_EXTENT.containing_type = _TENSORSLICEPROTO _TENSORSLICEPROTO_EXTENT.oneofs_by_name['has_length'].fields.append( _TENSORSLICEPROTO_EXTENT.fields_by_name['length']) _TENSORSLICEPROTO_EXTENT.fields_by_name['length'].containing_oneof = _TENSORSLICEPROTO_EXTENT.oneofs_by_name['has_length'] _TENSORSLICEPROTO.fields_by_name['extent'].message_type = _TENSORSLICEPROTO_EXTENT DESCRIPTOR.message_types_by_name['TensorSliceProto'] = _TENSORSLICEPROTO _sym_db.RegisterFileDescriptor(DESCRIPTOR) TensorSliceProto = _reflection.GeneratedProtocolMessageType('TensorSliceProto', (_message.Message,), dict( Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), dict( DESCRIPTOR = _TENSORSLICEPROTO_EXTENT, __module__ = 'tensorflow.core.framework.tensor_slice_pb2' # @@protoc_insertion_point(class_scope:tensorflow.TensorSliceProto.Extent) )) , DESCRIPTOR = _TENSORSLICEPROTO, __module__ = 'tensorflow.core.framework.tensor_slice_pb2' # @@protoc_insertion_point(class_scope:tensorflow.TensorSliceProto) )) _sym_db.RegisterMessage(TensorSliceProto) _sym_db.RegisterMessage(TensorSliceProto.Extent) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\021TensorSliceProtosP\001\370\001\001')) try: # THESE ELEMENTS WILL BE DEPRECATED. # Please use the generated *_pb2_grpc.py files instead. import grpc from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities except ImportError: pass # @@protoc_insertion_point(module_scope)
m4rx9/rna-pdb-tools
refs/heads/master
versioneer.py
386
# Version: 0.18 """The Versioneer - like a rocketeer, but for versions. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy * [![Latest Version] (https://pypip.in/version/versioneer/badge.svg?style=flat) ](https://pypi.python.org/pypi/versioneer/) * [![Build Status] (https://travis-ci.org/warner/python-versioneer.png?branch=master) ](https://travis-ci.org/warner/python-versioneer) This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere to your $PATH * add a `[versioneer]` section to your setup.cfg (see below) * run `versioneer install` in your source tree, commit the results ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes. The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation See [INSTALL.md](./INSTALL.md) for detailed installation instructions. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the commit date in ISO 8601 format. This will be None if the date is not available. * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See [details.md](details.md) in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Known Limitations Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github [issues page](https://github.com/warner/python-versioneer/issues). ### Subprojects Versioneer has limited support for source trees in which `setup.py` is not in the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are two common reasons why `setup.py` might not be in the root: * Source trees which contain multiple subprojects, such as [Buildbot](https://github.com/buildbot/buildbot), which contains both "master" and "slave" subprojects, each with their own `setup.py`, `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also provide bindings to Python (and perhaps other langauges) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs and implementation details which frequently cause `pip install .` from a subproject directory to fail to find a correct version string (so it usually defaults to `0+unknown`). `pip install --editable .` should work correctly. `setup.py install` might work too. Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. [Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking this issue. The discussion in [PR #61](https://github.com/warner/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve pip to let Versioneer work correctly. Versioneer-0.16 and earlier only looked for a `.git` directory next to the `setup.cfg`, so subprojects were completely unsupported with those releases. ### Editable installs with setuptools <= 18.5 `setup.py develop` and `pip install --editable .` allow you to install a project into a virtualenv once, then continue editing the source code (and test) without re-installing after every change. "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a convenient way to specify executable scripts that should be installed along with the python package. These both work as expected when using modern setuptools. When using setuptools-18.5 or earlier, however, certain operations will cause `pkg_resources.DistributionNotFound` errors when running the entrypoint script, which must be resolved by re-installing the package. This happens when the install happens with one version, then the egg_info data is regenerated while a different version is checked out. Many setup.py commands cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. [Bug #83](https://github.com/warner/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. ### Unicode version strings While Versioneer works (and is continually tested) with both Python 2 and Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. Newer releases probably generate unicode version strings on py2. It's not clear that this is wrong, but it may be surprising for applications when then write these strings to a network connection or include them in bytes-oriented APIs like cryptographic checksums. [Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates this question. ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the Creative Commons "Public Domain Dedication" license (CC0-1.0), as described in https://creativecommons.org/publicdomain/zero/1.0/ . """ from __future__ import print_function try: import configparser except ImportError: import ConfigParser as configparser import errno import json import os import re import subprocess import sys class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py)) except NameError: pass return root def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" # This might raise EnvironmentError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() with open(setup_cfg, "r") as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" cfg.versionfile_source = get(parser, "versionfile_source") cfg.versionfile_build = get(parser, "versionfile_build") cfg.tag_prefix = get(parser, "tag_prefix") if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = "" cfg.parentdir_prefix = get(parser, "parentdir_prefix") cfg.verbose = get(parser, "verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode LONG_VERSION_PY['git'] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%%s*" %% tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%%d" %% pieces["distance"] else: # exception #1 rendered = "0.post.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.18) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose=False): """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} def get_version(): """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(): """Get the custom setuptools/distutils subclasses used by Versioneer.""" if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/warner/python-versioneer/issues/52 cmds = {} # we add "version" to both distutils and setuptools from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # we override different "build_py" commands for both environments if "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py else: from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if 'py2exe' in sys.modules: # py2exe enabled? try: from py2exe.distutils_buildexe import py2exe as _py2exe # py3 except ImportError: from py2exe.build_exe import py2exe as _py2exe # py2 class cmd_py2exe(_py2exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments if "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ def do_setup(): """Main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(INIT_PY_SNIPPET) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with open(manifest_in, "r") as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, "a") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1)
GNOME/orca
refs/heads/master
test/keystrokes/firefox/line_nav_roledescriptions.py
1
#!/usr/bin/python from macaroon.playback import * import utils sequence = MacroSequence() #sequence.append(WaitForDocLoad()) sequence.append(PauseAction(5000)) # Work around some new quirk in Gecko that causes this test to fail if # run via the test harness rather than manually. sequence.append(KeyComboAction("<Control>r")) sequence.append(KeyComboAction("<Control>Home")) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("Down")) sequence.append(utils.AssertPresentationAction( "1. Line Down", ["BRAILLE LINE: 'Focus me 1'", " VISIBLE: 'Focus me 1', cursor=1", "SPEECH OUTPUT: 'Focus me 1'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("Down")) sequence.append(utils.AssertPresentationAction( "2. Line Down", ["BRAILLE LINE: 'Focus me 2'", " VISIBLE: 'Focus me 2', cursor=1", "SPEECH OUTPUT: 'Focus me 2 kill switch'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("Down")) sequence.append(utils.AssertPresentationAction( "3. Line Down", ["BRAILLE LINE: 'Focus me 3 push button'", " VISIBLE: 'Focus me 3 push button', cursor=1", "SPEECH OUTPUT: 'Focus me 3 push button'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("Down")) sequence.append(utils.AssertPresentationAction( "4. Line Down", ["BRAILLE LINE: 'Focus me 4 kill switch'", " VISIBLE: 'Focus me 4 kill switch', cursor=1", "SPEECH OUTPUT: 'Focus me 4 kill switch'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("Down")) sequence.append(utils.AssertPresentationAction( "5. Line Down", ["BRAILLE LINE: 'Focus me 5 push button Focus me 6 kill switch'", " VISIBLE: 'Focus me 5 push button Focus me ', cursor=1", "SPEECH OUTPUT: 'Focus me 5 push button'", "SPEECH OUTPUT: 'Focus me 6 kill switch'"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("Down")) sequence.append(utils.AssertPresentationAction( "6. Line Down", ["BRAILLE LINE: 'Here are some slides'", " VISIBLE: 'Here are some slides', cursor=1", "SPEECH OUTPUT: 'Presentation slide set'", "SPEECH OUTPUT: 'Here are some slides'"])) sequence.append(utils.AssertionSummaryAction()) sequence.start()
lekum/ansible
refs/heads/devel
contrib/inventory/zabbix.py
119
#!/usr/bin/env python # (c) 2013, Greg Buehler # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ###################################################################### """ Zabbix Server external inventory script. ======================================== Returns hosts and hostgroups from Zabbix Server. Configuration is read from `zabbix.ini`. Tested with Zabbix Server 2.0.6. """ import os, sys import argparse import ConfigParser try: from zabbix_api import ZabbixAPI except: print >> sys.stderr, "Error: Zabbix API library must be installed: pip install zabbix-api." sys.exit(1) try: import json except: import simplejson as json class ZabbixInventory(object): def read_settings(self): config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini') # server if config.has_option('zabbix', 'server'): self.zabbix_server = config.get('zabbix', 'server') # login if config.has_option('zabbix', 'username'): self.zabbix_username = config.get('zabbix', 'username') if config.has_option('zabbix', 'password'): self.zabbix_password = config.get('zabbix', 'password') def read_cli(self): parser = argparse.ArgumentParser() parser.add_argument('--host') parser.add_argument('--list', action='store_true') self.options = parser.parse_args() def hoststub(self): return { 'hosts': [] } def get_host(self, api, name): data = {} return data def get_list(self, api): hostsData = api.host.get({'output': 'extend', 'selectGroups': 'extend'}) data = {} data[self.defaultgroup] = self.hoststub() for host in hostsData: hostname = host['name'] data[self.defaultgroup]['hosts'].append(hostname) for group in host['groups']: groupname = group['name'] if not groupname in data: data[groupname] = self.hoststub() data[groupname]['hosts'].append(hostname) return data def __init__(self): self.defaultgroup = 'group_all' self.zabbix_server = None self.zabbix_username = None self.zabbix_password = None self.read_settings() self.read_cli() if self.zabbix_server and self.zabbix_username: try: api = ZabbixAPI(server=self.zabbix_server) api.login(user=self.zabbix_username, password=self.zabbix_password) except BaseException, e: print >> sys.stderr, "Error: Could not login to Zabbix server. Check your zabbix.ini." sys.exit(1) if self.options.host: data = self.get_host(api, self.options.host) print json.dumps(data, indent=2) elif self.options.list: data = self.get_list(api) print json.dumps(data, indent=2) else: print >> sys.stderr, "usage: --list ..OR.. --host <hostname>" sys.exit(1) else: print >> sys.stderr, "Error: Configuration of server and credentials are required. See zabbix.ini." sys.exit(1) ZabbixInventory()
kbidarkar/robottelo
refs/heads/master
robottelo/ui/globalparameters.py
2
# -*- encoding: utf-8 -*- """Implements Global Parameters .""" from robottelo.ui.base import Base from robottelo.ui.locators import common_locators, locators from robottelo.ui.navigator import Navigator class GlobalParameters(Base): """Provides the CRUD functionality for Global Parameters.""" def navigate_to_entity(self): """Navigate to Global Parameters entity page""" Navigator(self.browser).go_to_global_parameters() def _search_locator(self): """Specify locator for Global Parameters entity search procedure""" return locators['globalparameters.select'] def create(self, name, value=None, hidden_value=None): """Creates a Global Parameter""" self.click(locators['globalparameters.new']) self.assign_value(locators['globalparameters.name'], name) if value is not None: self.assign_value(locators['globalparameters.value'], value) if hidden_value is not None: self.assign_value( locators['globalparameters.hidden_value'], hidden_value) self.click(common_locators['submit']) def update(self, name, new_name=None, value=None, hidden_value=None): """Updates a Global Parameter""" self.search_and_click(name) if new_name is not None: self.assign_value(locators['globalparameters.name'], new_name) if value is not None: self.assign_value(locators['globalparameters.value'], value) if hidden_value is not None: self.assign_value( locators['globalparameters.hidden_value'], hidden_value) self.click(common_locators['submit'])
nugget/home-assistant
refs/heads/dev
homeassistant/components/gpslogger/device_tracker.py
2
"""Support for the GPSLogger device tracking.""" import logging from homeassistant.components.device_tracker import DOMAIN as \ DEVICE_TRACKER_DOMAIN from homeassistant.components.gpslogger import DOMAIN as GPSLOGGER_DOMAIN, \ TRACKER_UPDATE from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.helpers.typing import HomeAssistantType _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['gpslogger'] DATA_KEY = '{}.{}'.format(GPSLOGGER_DOMAIN, DEVICE_TRACKER_DOMAIN) async def async_setup_entry(hass: HomeAssistantType, entry, async_see): """Configure a dispatcher connection based on a config entry.""" async def _set_location(device, gps_location, battery, accuracy, attrs): """Fire HA event to set location.""" await async_see( dev_id=device, gps=gps_location, battery=battery, gps_accuracy=accuracy, attributes=attrs ) hass.data[DATA_KEY] = async_dispatcher_connect( hass, TRACKER_UPDATE, _set_location ) return True async def async_unload_entry(hass: HomeAssistantType, entry): """Unload the config entry and remove the dispatcher connection.""" hass.data[DATA_KEY]() return True
ShangtongZhang/DeepRL
refs/heads/master
deep_rl/agent/DQN_agent.py
1
####################################################################### # Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) # # Permission given to modify the code as long as you keep this # # declaration at the top # ####################################################################### from ..network import * from ..component import * from ..utils import * import time from .BaseAgent import * class DQNActor(BaseActor): def __init__(self, config): BaseActor.__init__(self, config) self.config = config self.start() def compute_q(self, prediction): q_values = to_np(prediction['q']) return q_values def _transition(self): if self._state is None: self._state = self._task.reset() config = self.config if config.noisy_linear: self._network.reset_noise() with config.lock: prediction = self._network(config.state_normalizer(self._state)) q_values = self.compute_q(prediction) if config.noisy_linear: epsilon = 0 elif self._total_steps < config.exploration_steps: epsilon = 1 else: epsilon = config.random_action_prob() action = epsilon_greedy(epsilon, q_values) next_state, reward, done, info = self._task.step(action) entry = [self._state, action, reward, next_state, done, info] self._total_steps += 1 self._state = next_state return entry class DQNAgent(BaseAgent): def __init__(self, config): BaseAgent.__init__(self, config) self.config = config config.lock = mp.Lock() self.replay = config.replay_fn() self.actor = DQNActor(config) self.network = config.network_fn() self.network.share_memory() self.target_network = config.network_fn() self.target_network.load_state_dict(self.network.state_dict()) self.optimizer = config.optimizer_fn(self.network.parameters()) self.actor.set_network(self.network) self.total_steps = 0 def close(self): close_obj(self.replay) close_obj(self.actor) def eval_step(self, state): self.config.state_normalizer.set_read_only() state = self.config.state_normalizer(state) q = self.network(state)['q'] action = to_np(q.argmax(-1)) self.config.state_normalizer.unset_read_only() return action def reduce_loss(self, loss): return loss.pow(2).mul(0.5).mean() def compute_loss(self, transitions): config = self.config states = self.config.state_normalizer(transitions.state) next_states = self.config.state_normalizer(transitions.next_state) with torch.no_grad(): q_next = self.target_network(next_states)['q'].detach() if self.config.double_q: best_actions = torch.argmax(self.network(next_states)['q'], dim=-1) q_next = q_next.gather(1, best_actions.unsqueeze(-1)).squeeze(1) else: q_next = q_next.max(1)[0] masks = tensor(transitions.mask) rewards = tensor(transitions.reward) q_target = rewards + self.config.discount ** config.n_step * q_next * masks actions = tensor(transitions.action).long() q = self.network(states)['q'] q = q.gather(1, actions.unsqueeze(-1)).squeeze(-1) loss = q_target - q return loss def step(self): config = self.config transitions = self.actor.step() for states, actions, rewards, next_states, dones, info in transitions: self.record_online_return(info) self.total_steps += 1 self.replay.feed(dict( state=np.array([s[-1] if isinstance(s, LazyFrames) else s for s in states]), action=actions, reward=[config.reward_normalizer(r) for r in rewards], mask=1 - np.asarray(dones, dtype=np.int32), )) if self.total_steps > self.config.exploration_steps: transitions = self.replay.sample() if config.noisy_linear: self.target_network.reset_noise() self.network.reset_noise() loss = self.compute_loss(transitions) if isinstance(transitions, PrioritizedTransition): priorities = loss.abs().add(config.replay_eps).pow(config.replay_alpha) idxs = tensor(transitions.idx).long() self.replay.update_priorities(zip(to_np(idxs), to_np(priorities))) sampling_probs = tensor(transitions.sampling_prob) weights = sampling_probs.mul(sampling_probs.size(0)).add(1e-6).pow(-config.replay_beta()) weights = weights / weights.max() loss = loss.mul(weights) loss = self.reduce_loss(loss) self.optimizer.zero_grad() loss.backward() nn.utils.clip_grad_norm_(self.network.parameters(), self.config.gradient_clip) with config.lock: self.optimizer.step() if self.total_steps / self.config.sgd_update_frequency % \ self.config.target_network_update_freq == 0: self.target_network.load_state_dict(self.network.state_dict())
da1z/intellij-community
refs/heads/master
python/testData/stubs/ImportedNamedTupleFields.py
38
from collections import namedtuple from b import fields nt = namedtuple("name", fields)
angelblue05/Embytest.Kodi
refs/heads/master
resources/lib/mutagen/__init__.py
5
# -*- coding: utf-8 -*- # Copyright (C) 2005 Michael Urman # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. """Mutagen aims to be an all purpose multimedia tagging library. :: import mutagen.[format] metadata = mutagen.[format].Open(filename) `metadata` acts like a dictionary of tags in the file. Tags are generally a list of string-like values, but may have additional methods available depending on tag or format. They may also be entirely different objects for certain keys, again depending on format. """ from mutagen._util import MutagenError from mutagen._file import FileType, StreamInfo, File from mutagen._tags import Metadata, PaddingInfo version = (1, 31) """Version tuple.""" version_string = ".".join(map(str, version)) """Version string.""" MutagenError FileType StreamInfo File Metadata PaddingInfo
luisgg/iteexe
refs/heads/master
twisted/internet/tksupport.py
20
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. """This module integrates Tkinter with twisted.internet's mainloop. API Stability: semi-stable Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>} To use, do:: | tksupport.install(rootWidget) and then run your reactor as usual - do *not* call Tk's mainloop(), use Twisted's regular mechanism for running the event loop. Likewise, to stop your program you will need to stop Twisted's event loop. For example, if you want closing your root widget to stop Twisted:: | root.protocol('WM_DELETE_WINDOW', reactor.stop) """ # system imports import Tkinter, tkSimpleDialog, tkMessageBox # twisted imports from twisted.python import log from twisted.internet import task _task = None def install(widget, ms=10, reactor=None): """Install a Tkinter.Tk() object into the reactor.""" installTkFunctions() global _task _task = task.LoopingCall(widget.update) _task.start(ms / 1000.0, False) def uninstall(): """Remove the root Tk widget from the reactor. Call this before destroy()ing the root widget. """ global _task _task.stop() _task = None def installTkFunctions(): import twisted.python.util twisted.python.util.getPassword = getPassword def getPassword(prompt = '', confirm = 0): while 1: try1 = tkSimpleDialog.askstring('Password Dialog', prompt, show='*') if not confirm: return try1 try2 = tkSimpleDialog.askstring('Password Dialog', 'Confirm Password', show='*') if try1 == try2: return try1 else: tkMessageBox.showerror('Password Mismatch', 'Passwords did not match, starting over') __all__ = ["install", "uninstall"]
SteveDiamond/cvxpy
refs/heads/master
cvxpy/reductions/solvers/qp_solvers/cplex_qpif.py
1
import cvxpy.settings as s import cvxpy.interface as intf from cvxpy.reductions import Solution from cvxpy.reductions.solvers.qp_solvers.qp_solver import QpSolver from cvxpy.reductions.solvers.conic_solvers.cplex_conif import ( get_status, hide_solver_output, set_parameters ) import numpy as np def constrain_cplex_infty(v): ''' Limit values of vector v between +/- infinity as defined in the CPLEX package ''' import cplex as cpx n = len(v) for i in range(n): if v[i] >= cpx.infinity: v[i] = cpx.infinity if v[i] <= -cpx.infinity: v[i] = -cpx.infinity class CPLEX(QpSolver): """QP interface for the CPLEX solver""" MIP_CAPABLE = True def name(self): return s.CPLEX def import_solver(self): import cplex cplex def invert(self, results, inverse_data): model = results["model"] attr = {} if "cputime" in results: attr[s.SOLVE_TIME] = results["cputime"] attr[s.NUM_ITERS] = \ int(model.solution.progress.get_num_barrier_iterations()) \ if not inverse_data[CPLEX.IS_MIP] \ else 0 status = get_status(model) if status in s.SOLUTION_PRESENT: # Get objective value opt_val = model.solution.get_objective_value() + \ inverse_data[s.OFFSET] # Get solution x = np.array(model.solution.get_values()) primal_vars = { CPLEX.VAR_ID: intf.DEFAULT_INTF.const_to_matrix(np.array(x)) } # Only add duals if not a MIP. dual_vars = None if not inverse_data[CPLEX.IS_MIP]: y = -np.array(model.solution.get_dual_values()) dual_vars = {CPLEX.DUAL_VAR_ID: y} else: primal_vars = None dual_vars = None opt_val = np.inf if status == s.UNBOUNDED: opt_val = -np.inf return Solution(status, opt_val, primal_vars, dual_vars, attr) def solve_via_data(self, data, warm_start, verbose, solver_opts, solver_cache=None): import cplex as cpx P = data[s.P].tocsr() # Convert matrix to csr format q = data[s.Q] A = data[s.A].tocsr() # Convert A matrix to csr format b = data[s.B] F = data[s.F].tocsr() # Convert F matrix to csr format g = data[s.G] n_var = data['n_var'] n_eq = data['n_eq'] n_ineq = data['n_ineq'] # Constrain values between bounds constrain_cplex_infty(b) constrain_cplex_infty(g) # Define CPLEX problem model = cpx.Cplex() # Minimize problem model.objective.set_sense(model.objective.sense.minimize) # Add variables and linear objective var_idx = list(model.variables.add(obj=q, lb=-cpx.infinity*np.ones(n_var), ub=cpx.infinity*np.ones(n_var))) # Constrain binary/integer variables if present for i in data[s.BOOL_IDX]: model.variables.set_types(var_idx[i], model.variables.type.binary) for i in data[s.INT_IDX]: model.variables.set_types(var_idx[i], model.variables.type.integer) # Add constraints lin_expr, rhs = [], [] for i in range(n_eq): # Add equalities start = A.indptr[i] end = A.indptr[i+1] lin_expr.append([A.indices[start:end].tolist(), A.data[start:end].tolist()]) rhs.append(b[i]) if lin_expr: model.linear_constraints.add(lin_expr=lin_expr, senses=["E"] * len(lin_expr), rhs=rhs) lin_expr, rhs = [], [] for i in range(n_ineq): # Add inequalities start = F.indptr[i] end = F.indptr[i+1] lin_expr.append([F.indices[start:end].tolist(), F.data[start:end].tolist()]) rhs.append(g[i]) if lin_expr: model.linear_constraints.add(lin_expr=lin_expr, senses=["L"] * len(lin_expr), rhs=rhs) # Set quadratic Cost if P.count_nonzero(): # Only if quadratic form is not null qmat = [] for i in range(n_var): start = P.indptr[i] end = P.indptr[i+1] qmat.append([P.indices[start:end].tolist(), P.data[start:end].tolist()]) model.objective.set_quadratic(qmat) # Set verbosity if not verbose: hide_solver_output(model) # Set parameters set_parameters(model, solver_opts) # Solve problem results_dict = {} try: start = model.get_time() model.solve() end = model.get_time() results_dict["cputime"] = end - start except Exception: # Error in the solution results_dict["status"] = s.SOLVER_ERROR results_dict["model"] = model return results_dict
umitproject/tease-o-matic
refs/heads/master
django/contrib/flatpages/models.py
410
from django.db import models from django.contrib.sites.models import Site from django.utils.translation import ugettext_lazy as _ class FlatPage(models.Model): url = models.CharField(_('URL'), max_length=100, db_index=True) title = models.CharField(_('title'), max_length=200) content = models.TextField(_('content'), blank=True) enable_comments = models.BooleanField(_('enable comments')) template_name = models.CharField(_('template name'), max_length=70, blank=True, help_text=_("Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use 'flatpages/default.html'.")) registration_required = models.BooleanField(_('registration required'), help_text=_("If this is checked, only logged-in users will be able to view the page.")) sites = models.ManyToManyField(Site) class Meta: db_table = 'django_flatpage' verbose_name = _('flat page') verbose_name_plural = _('flat pages') ordering = ('url',) def __unicode__(self): return u"%s -- %s" % (self.url, self.title) def get_absolute_url(self): return self.url
jondo/shogun
refs/heads/develop
examples/undocumented/python_modular/tools/load.py
39
#!/usr/bin/env python from numpy import double, fromfile, loadtxt class LoadMatrix: def __init__(self): pass def load_numbers(self, filename): return loadtxt(filename).T def load_dna(self, filename): fh=open(filename, 'r'); matrix=[] for line in fh: matrix.append(line[:-1]) return matrix def load_cubes(self, filename): fh=open(filename, 'r'); matrix=[] for line in fh: matrix.append(line.split(' ')[0][:-1]) fh.close() return matrix def load_labels(self, filename): return fromfile(filename, dtype=double, sep=' ')
Ramalus/herovoices
refs/heads/master
.buildozer/venv/lib/python2.7/site-packages/setuptools/version.py
20
__version__ = '17.0'
robinfourdeux/check-linux-by-ssh
refs/heads/master
check_load_average_by_ssh.py
4
#!/usr/bin/env python2 # Copyright (C) 2013: # Gabes Jean, naparuba@gmail.com # Pasche Sebastien, sebastien.pasche@leshop.ch # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ''' This script is a check for lookup at load average over ssh without having an agent on the other side ''' import os import sys import optparse # Ok try to load our directory to load the plugin utils. my_dir = os.path.dirname(__file__) sys.path.insert(0, my_dir) try: import schecks except ImportError: print "ERROR : this plugin needs the local schecks.py lib. Please install it" sys.exit(2) VERSION = "0.1" DEFAULT_WARNING = '1,1,1' DEFAULT_CRITICAL = '2,2,2' def get_load(client): # We are looking for a line like #0.19 0.17 0.15 1/616 3634 4 # l1 l5 l15 _ _ nb_cpus raw = r"""echo "$(cat /proc/loadavg) $(grep -E '^CPU|^processor' < /proc/cpuinfo | wc -l)" """ stdin, stdout, stderr = client.exec_command(raw) line = [l for l in stdout][0].strip() load1, load5, load15, _, _, nb_cpus = (line.split(' ')) load1 = float(load1) load5 = float(load5) load15 = float(load15) nb_cpus = int(nb_cpus) # Before return, close the client client.close() return load1, load5, load15, nb_cpus parser = optparse.OptionParser( "%prog [options]", version="%prog " + VERSION) parser.add_option('-H', '--hostname', dest="hostname", help='Hostname to connect to') parser.add_option('-p', '--port', dest="port", type="int", default=22, help='SSH port to connect to. Default : 22') parser.add_option('-i', '--ssh-key', dest="ssh_key_file", help='SSH key file to use. By default will take ~/.ssh/id_rsa.') parser.add_option('-u', '--user', dest="user", help='remote use to use. By default shinken.') parser.add_option('-P', '--passphrase', dest="passphrase", help='SSH key passphrase. By default will use void') parser.add_option('-w', '--warning', dest="warning", help='Warning value for load average, as 3 values, for 1m,5m,15m. Default : 1,1,1') parser.add_option('-c', '--critical', dest="critical", help='Critical value for load average, as 3 values, for 1m,5m,15m. Default : 2,2,2') parser.add_option('-C', '--cpu-based', action='store_true', dest="cpu_based", help='Set the warning/critical number of cpu based values. For example ' '1,1,1 will warn if the load if over the number of CPUs. ' 'Default : False') if __name__ == '__main__': # Ok first job : parse args opts, args = parser.parse_args() if args: parser.error("Does not accept any argument.") port = opts.port hostname = opts.hostname or '' ssh_key_file = opts.ssh_key_file or os.path.expanduser('~/.ssh/id_rsa') user = opts.user or 'shinken' passphrase = opts.passphrase or '' # Try to get numeic warning/critical values s_warning = opts.warning or DEFAULT_WARNING s_critical = opts.critical or DEFAULT_CRITICAL # For warning/critical : or we got a int triplet, or a float*nb_cpu values cpu_based = opts.cpu_based or False if s_warning.count(',') != 2 or s_critical.count(',') != 2: print "Error: warning and/or critical values do not match type. Please fix it (-w and -c)" sys.exit(2) warning = [float(v) for v in s_warning.split(',')] critical = [float(v) for v in s_critical.split(',')] # Ok now connect, and try to get values for memory client = schecks.connect(hostname, port, ssh_key_file, passphrase, user) load1, load5, load15, nb_cpus = get_load(client) # Two cases : cpu_based_load or not. For CPU the real warning is based on warning*nb_cpu status = 0 w1, w5, w15 = tuple(warning) c1, c5, c15 = tuple(critical) # Look if warning < critical if c1 < w1 or c5 < w5 or c15 < w15: print "Error: your critical values should be lower than your warning ones. Please fix it (-w and -c)" sys.exit(2) ratio = 1 if cpu_based: ratio = nb_cpus # First warning if status == 0 and load1 >= w1*ratio: status = 1 if status == 0 and load5 >= w5*ratio: status = 1 if status == 0 and load15 >= w15*ratio: status = 1 # Then critical if load1 >= c1*ratio: status = 2 if load5 >= c5*ratio: status = 2 if load15 >= c15*ratio: status = 2 perfdata = '' perfdata += ' load1=%.2f;%.2f;%.2f;;' % (load1, w1*ratio, c1*ratio) perfdata += ' load5=%.2f;%.2f;%.2f;;' % (load5, w5*ratio, c5*ratio) perfdata += ' load15=%.2f;%.2f;%.2f;;' % (load15, w15*ratio, c15*ratio) # And compare to limits s_load = '%.2f,%.2f,%.2f' % (load1, load5, load15) if status == 2: print "Critical: load average is too high %s | %s" % (s_load, perfdata) sys.exit(2) if status == 1: print "Warning: load average is very high %s | %s" % (s_load, perfdata) sys.exit(1) print "Ok: load average is good %s | %s" % (s_load, perfdata) sys.exit(0)
jmiller-boundless/ol3
refs/heads/master
bin/BeautifulSoup.py
5
"""Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup parses a (possibly invalid) XML or HTML document into a tree representation. It provides methods and Pythonic idioms that make it easy to navigate, search, and modify the tree. A well-formed XML/HTML document yields a well-formed data structure. An ill-formed XML/HTML document yields a correspondingly ill-formed data structure. If your document is only locally well-formed, you can use this library to find and process the well-formed part of it. The BeautifulSoup class Beautiful Soup works with Python 2.2 and up. It has no external dependencies, but you'll have more success at converting data to UTF-8 if you also install these three packages: * chardet, for auto-detecting character encodings http://chardet.feedparser.org/ * cjkcodecs and iconv_codec, which add more encodings to the ones supported by stock Python. http://cjkpython.i18n.org/ Beautiful Soup defines classes for two main parsing strategies: * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific language that kind of looks like XML. * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid or invalid. This class has web browser-like heuristics for obtaining a sensible parse tree in the face of common HTML errors. Beautiful Soup also defines a class (UnicodeDammit) for autodetecting the encoding of an HTML or XML document, and converting it to Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/documentation.html """ from __future__ import generators __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "3.0.4" __copyright__ = "Copyright (c) 2004-2007 Leonard Richardson" __license__ = "PSF" from sgmllib import SGMLParser, SGMLParseError import codecs import types import re import sgmllib try: from htmlentitydefs import name2codepoint except ImportError: name2codepoint = {} #This hack makes Beautiful Soup able to parse XML with namespaces sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') DEFAULT_OUTPUT_ENCODING = "utf-8" # First, the classes that represent markup elements. class PageElement: """Contains the navigational information for some part of the page (either a tag or a piece of text)""" def setup(self, parent=None, previous=None): """Sets up the initial relations between this element and other elements.""" self.parent = parent self.previous = previous self.next = None self.previousSibling = None self.nextSibling = None if self.parent and self.parent.contents: self.previousSibling = self.parent.contents[-1] self.previousSibling.nextSibling = self def replaceWith(self, replaceWith): oldParent = self.parent myIndex = self.parent.contents.index(self) if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent: # We're replacing this element with one of its siblings. index = self.parent.contents.index(replaceWith) if index and index < myIndex: # Furthermore, it comes before this element. That # means that when we extract it, the index of this # element will change. myIndex = myIndex - 1 self.extract() oldParent.insert(myIndex, replaceWith) def extract(self): """Destructively rips this element out of the tree.""" if self.parent: try: self.parent.contents.remove(self) except ValueError: pass #Find the two elements that would be next to each other if #this element (and any children) hadn't been parsed. Connect #the two. lastChild = self._lastRecursiveChild() nextElement = lastChild.next if self.previous: self.previous.next = nextElement if nextElement: nextElement.previous = self.previous self.previous = None lastChild.next = None self.parent = None if self.previousSibling: self.previousSibling.nextSibling = self.nextSibling if self.nextSibling: self.nextSibling.previousSibling = self.previousSibling self.previousSibling = self.nextSibling = None def _lastRecursiveChild(self): "Finds the last element beneath this object to be parsed." lastChild = self while hasattr(lastChild, 'contents') and lastChild.contents: lastChild = lastChild.contents[-1] return lastChild def insert(self, position, newChild): if (isinstance(newChild, basestring) or isinstance(newChild, unicode)) \ and not isinstance(newChild, NavigableString): newChild = NavigableString(newChild) position = min(position, len(self.contents)) if hasattr(newChild, 'parent') and newChild.parent != None: # We're 'inserting' an element that's already one # of this object's children. if newChild.parent == self: index = self.find(newChild) if index and index < position: # Furthermore we're moving it further down the # list of this object's children. That means that # when we extract this element, our target index # will jump down one. position = position - 1 newChild.extract() newChild.parent = self previousChild = None if position == 0: newChild.previousSibling = None newChild.previous = self else: previousChild = self.contents[position-1] newChild.previousSibling = previousChild newChild.previousSibling.nextSibling = newChild newChild.previous = previousChild._lastRecursiveChild() if newChild.previous: newChild.previous.next = newChild newChildsLastElement = newChild._lastRecursiveChild() if position >= len(self.contents): newChild.nextSibling = None parent = self parentsNextSibling = None while not parentsNextSibling: parentsNextSibling = parent.nextSibling parent = parent.parent if not parent: # This is the last element in the document. break if parentsNextSibling: newChildsLastElement.next = parentsNextSibling else: newChildsLastElement.next = None else: nextChild = self.contents[position] newChild.nextSibling = nextChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild newChildsLastElement.next = nextChild if newChildsLastElement.next: newChildsLastElement.next.previous = newChildsLastElement self.contents.insert(position, newChild) def findNext(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findAllNext, name, attrs, text, **kwargs) def findAllNext(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear before after Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextGenerator) def findNextSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears after this Tag in the document.""" return self._findOne(self.findNextSiblings, name, attrs, text, **kwargs) def findNextSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear after this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.nextSiblingGenerator, **kwargs) fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x def findPrevious(self, name=None, attrs={}, text=None, **kwargs): """Returns the first item that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) def findAllPrevious(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns all items that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousGenerator, **kwargs) fetchPrevious = findAllPrevious # Compatibility with pre-3.x def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): """Returns the closest sibling to this Tag that matches the given criteria and appears before this Tag in the document.""" return self._findOne(self.findPreviousSiblings, name, attrs, text, **kwargs) def findPreviousSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousSiblingGenerator, **kwargs) fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x def findParent(self, name=None, attrs={}, **kwargs): """Returns the closest parent of this Tag that matches the given criteria.""" # NOTE: We can't use _findOne because findParents takes a different # set of arguments. r = None l = self.findParents(name, attrs, 1) if l: r = l[0] return r def findParents(self, name=None, attrs={}, limit=None, **kwargs): """Returns the parents of this Tag that match the given criteria.""" return self._findAll(name, attrs, None, limit, self.parentGenerator, **kwargs) fetchParents = findParents # Compatibility with pre-3.x #These methods do the real heavy lifting. def _findOne(self, method, name, attrs, text, **kwargs): r = None l = method(name, attrs, text, 1, **kwargs) if l: r = l[0] return r def _findAll(self, name, attrs, text, limit, generator, **kwargs): "Iterates over a generator looking for things that match." if isinstance(name, SoupStrainer): strainer = name else: # Build a SoupStrainer strainer = SoupStrainer(name, attrs, text, **kwargs) results = ResultSet(strainer) g = generator() while True: try: i = g.next() except StopIteration: break if i: found = strainer.search(i) if found: results.append(found) if limit and len(results) >= limit: break return results #These Generators can be used to navigate starting from both #NavigableStrings and Tags. def nextGenerator(self): i = self while i: i = i.next yield i def nextSiblingGenerator(self): i = self while i: i = i.nextSibling yield i def previousGenerator(self): i = self while i: i = i.previous yield i def previousSiblingGenerator(self): i = self while i: i = i.previousSibling yield i def parentGenerator(self): i = self while i: i = i.parent yield i # Utility methods def substituteEncoding(self, str, encoding=None): encoding = encoding or "utf-8" return str.replace("%SOUP-ENCODING%", encoding) def toEncoding(self, s, encoding=None): """Encodes an object to a string in some encoding, or to Unicode. .""" if isinstance(s, unicode): if encoding: s = s.encode(encoding) elif isinstance(s, str): if encoding: s = s.encode(encoding) else: s = unicode(s) else: if encoding: s = self.toEncoding(str(s), encoding) else: s = unicode(s) return s class NavigableString(unicode, PageElement): def __getattr__(self, attr): """text.string gives you text. This is for backwards compatibility for Navigable*String, but for CData* it lets you get the string without the CData wrapper.""" if attr == 'string': return self else: raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) def __unicode__(self): return self.__str__(None) def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): if encoding: return self.encode(encoding) else: return self class CData(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding) class ProcessingInstruction(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): output = self if "%SOUP-ENCODING%" in output: output = self.substituteEncoding(output, encoding) return "<?%s?>" % self.toEncoding(output, encoding) class Comment(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!--%s-->" % NavigableString.__str__(self, encoding) class Declaration(NavigableString): def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): return "<!%s>" % NavigableString.__str__(self, encoding) class Tag(PageElement): """Represents a found HTML tag with its attributes and contents.""" XML_SPECIAL_CHARS_TO_ENTITIES = { "'" : "squot", '"' : "quote", "&" : "amp", "<" : "lt", ">" : "gt" } def __init__(self, parser, name, attrs=None, parent=None, previous=None): "Basic constructor." # We don't actually store the parser object: that lets extracted # chunks be garbage-collected self.parserClass = parser.__class__ self.isSelfClosing = parser.isSelfClosingTag(name) self.name = name if attrs == None: attrs = [] self.attrs = attrs self.contents = [] self.setup(parent, previous) self.hidden = False self.containsSubstitutions = False def get(self, key, default=None): """Returns the value of the 'key' attribute for the tag, or the value given for 'default' if it doesn't have that attribute.""" return self._getAttrMap().get(key, default) def has_key(self, key): return self._getAttrMap().has_key(key) def __getitem__(self, key): """tag[key] returns the value of the 'key' attribute for the tag, and throws an exception if it's not there.""" return self._getAttrMap()[key] def __iter__(self): "Iterating over a tag iterates over its contents." return iter(self.contents) def __len__(self): "The length of a tag is the length of its list of contents." return len(self.contents) def __contains__(self, x): return x in self.contents def __nonzero__(self): "A tag is non-None even if it has no contents." return True def __setitem__(self, key, value): """Setting tag[key] sets the value of the 'key' attribute for the tag.""" self._getAttrMap() self.attrMap[key] = value found = False for i in range(0, len(self.attrs)): if self.attrs[i][0] == key: self.attrs[i] = (key, value) found = True if not found: self.attrs.append((key, value)) self._getAttrMap()[key] = value def __delitem__(self, key): "Deleting tag[key] deletes all 'key' attributes for the tag." for item in self.attrs: if item[0] == key: self.attrs.remove(item) #We don't break because bad HTML can define the same #attribute multiple times. self._getAttrMap() if self.attrMap.has_key(key): del self.attrMap[key] def __call__(self, *args, **kwargs): """Calling a tag like a function is the same as calling its findAll() method. Eg. tag('a') returns a list of all the A tags found within this tag.""" return apply(self.findAll, args, kwargs) def __getattr__(self, tag): #print "Getattr %s.%s" % (self.__class__, tag) if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: return self.find(tag[:-3]) elif tag.find('__') != 0: return self.find(tag) def __eq__(self, other): """Returns true iff this tag has the same name, the same attributes, and the same contents (recursively) as the given tag. NOTE: right now this will return false if two tags have the same attributes in a different order. Should this be fixed?""" if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): return False for i in range(0, len(self.contents)): if self.contents[i] != other.contents[i]: return False return True def __ne__(self, other): """Returns true iff this tag is not identical to the other tag, as defined in __eq__.""" return not self == other def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING): """Renders this tag as a string.""" return self.__str__(encoding) def __unicode__(self): return self.__str__(None) def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Returns a string or Unicode representation of this tag and its contents. To get Unicode, pass None for encoding. NOTE: since Python's HTML parser consumes whitespace, this method is not certain to reproduce the whitespace present in the original string.""" encodedName = self.toEncoding(self.name, encoding) attrs = [] if self.attrs: for key, val in self.attrs: fmt = '%s="%s"' if isString(val): if self.containsSubstitutions and '%SOUP-ENCODING%' in val: val = self.substituteEncoding(val, encoding) # The attribute value either: # # * Contains no embedded double quotes or single quotes. # No problem: we enclose it in double quotes. # * Contains embedded single quotes. No problem: # double quotes work here too. # * Contains embedded double quotes. No problem: # we enclose it in single quotes. # * Embeds both single _and_ double quotes. This # can't happen naturally, but it can happen if # you modify an attribute value after parsing # the document. Now we have a bit of a # problem. We solve it by enclosing the # attribute in single quotes, and escaping any # embedded single quotes to XML entities. if '"' in val: fmt = "%s='%s'" # This can't happen naturally, but it can happen # if you modify an attribute value after parsing. if "'" in val: val = val.replace("'", "&squot;") # Now we're okay w/r/t quotes. But the attribute # value might also contain angle brackets, or # ampersands that aren't part of entities. We need # to escape those to XML entities too. val = re.sub("([<>]|&(?![^\s]+;))", lambda x: "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";", val) attrs.append(fmt % (self.toEncoding(key, encoding), self.toEncoding(val, encoding))) close = '' closeTag = '' if self.isSelfClosing: close = ' /' else: closeTag = '</%s>' % encodedName indentTag, indentContents = 0, 0 if prettyPrint: indentTag = indentLevel space = (' ' * (indentTag-1)) indentContents = indentTag + 1 contents = self.renderContents(encoding, prettyPrint, indentContents) if self.hidden: s = contents else: s = [] attributeString = '' if attrs: attributeString = ' ' + ' '.join(attrs) if prettyPrint: s.append(space) s.append('<%s%s%s>' % (encodedName, attributeString, close)) if prettyPrint: s.append("\n") s.append(contents) if prettyPrint and contents and contents[-1] != "\n": s.append("\n") if prettyPrint and closeTag: s.append(space) s.append(closeTag) if prettyPrint and closeTag and self.nextSibling: s.append("\n") s = ''.join(s) return s def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING): return self.__str__(encoding, True) def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, prettyPrint=False, indentLevel=0): """Renders the contents of this tag as a string in the given encoding. If encoding is None, returns a Unicode string..""" s=[] for c in self: text = None if isinstance(c, NavigableString): text = c.__str__(encoding) elif isinstance(c, Tag): s.append(c.__str__(encoding, prettyPrint, indentLevel)) if text and prettyPrint: text = text.strip() if text: if prettyPrint: s.append(" " * (indentLevel-1)) s.append(text) if prettyPrint: s.append("\n") return ''.join(s) #Soup methods def find(self, name=None, attrs={}, recursive=True, text=None, **kwargs): """Return only the first child of this Tag matching the given criteria.""" r = None l = self.findAll(name, attrs, recursive, text, 1, **kwargs) if l: r = l[0] return r findChild = find def findAll(self, name=None, attrs={}, recursive=True, text=None, limit=None, **kwargs): """Extracts a list of Tag objects that match the given criteria. You can specify the name of the Tag and any attributes you want the Tag to have. The value of a key-value pair in the 'attrs' map can be a string, a list of strings, a regular expression object, or a callable that takes a string and returns whether or not the string matches for some custom definition of 'matches'. The same is true of the tag name.""" generator = self.recursiveChildGenerator if not recursive: generator = self.childGenerator return self._findAll(name, attrs, text, limit, generator, **kwargs) findChildren = findAll # Pre-3.x compatibility methods first = find fetch = findAll def fetchText(self, text=None, recursive=True, limit=None): return self.findAll(text=text, recursive=recursive, limit=limit) def firstText(self, text=None, recursive=True): return self.find(text=text, recursive=recursive) #Utility methods def append(self, tag): """Appends the given tag to the contents of this tag.""" self.contents.append(tag) #Private methods def _getAttrMap(self): """Initializes a map representation of this tag's attributes, if not already initialized.""" if not getattr(self, 'attrMap'): self.attrMap = {} for (key, value) in self.attrs: self.attrMap[key] = value return self.attrMap #Generator methods def childGenerator(self): for i in range(0, len(self.contents)): yield self.contents[i] raise StopIteration def recursiveChildGenerator(self): stack = [(self, 0)] while stack: tag, start = stack.pop() if isinstance(tag, Tag): for i in range(start, len(tag.contents)): a = tag.contents[i] yield a if isinstance(a, Tag) and tag.contents: if i < len(tag.contents) - 1: stack.append((tag, i+1)) stack.append((a, 0)) break raise StopIteration # Next, a couple classes to represent queries and their results. class SoupStrainer: """Encapsulates a number of ways of matching a markup element (tag or text).""" def __init__(self, name=None, attrs={}, text=None, **kwargs): self.name = name if isString(attrs): kwargs['class'] = attrs attrs = None if kwargs: if attrs: attrs = attrs.copy() attrs.update(kwargs) else: attrs = kwargs self.attrs = attrs self.text = text def __str__(self): if self.text: return self.text else: return "%s|%s" % (self.name, self.attrs) def searchTag(self, markupName=None, markupAttrs={}): found = None markup = None if isinstance(markupName, Tag): markup = markupName markupAttrs = markup callFunctionWithTagData = callable(self.name) \ and not isinstance(markupName, Tag) if (not self.name) \ or callFunctionWithTagData \ or (markup and self._matches(markup, self.name)) \ or (not markup and self._matches(markupName, self.name)): if callFunctionWithTagData: match = self.name(markupName, markupAttrs) else: match = True markupAttrMap = None for attr, matchAgainst in self.attrs.items(): if not markupAttrMap: if hasattr(markupAttrs, 'get'): markupAttrMap = markupAttrs else: markupAttrMap = {} for k,v in markupAttrs: markupAttrMap[k] = v attrValue = markupAttrMap.get(attr) if not self._matches(attrValue, matchAgainst): match = False break if match: if markup: found = markup else: found = markupName return found def search(self, markup): #print 'looking for %s in %s' % (self, markup) found = None # If given a list of items, scan it for a text element that # matches. if isList(markup) and not isinstance(markup, Tag): for element in markup: if isinstance(element, NavigableString) \ and self.search(element): found = element break # If it's a Tag, make sure its name or attributes match. # Don't bother with Tags if we're searching for text. elif isinstance(markup, Tag): if not self.text: found = self.searchTag(markup) # If it's text, make sure the text matches. elif isinstance(markup, NavigableString) or \ isString(markup): if self._matches(markup, self.text): found = markup else: raise Exception, "I don't know how to match against a %s" \ % markup.__class__ return found def _matches(self, markup, matchAgainst): #print "Matching %s against %s" % (markup, matchAgainst) result = False if matchAgainst == True and type(matchAgainst) == types.BooleanType: result = markup != None elif callable(matchAgainst): result = matchAgainst(markup) else: #Custom match methods take the tag as an argument, but all #other ways of matching match the tag name as a string. if isinstance(markup, Tag): markup = markup.name if markup and not isString(markup): markup = unicode(markup) #Now we know that chunk is either a string, or None. if hasattr(matchAgainst, 'match'): # It's a regexp object. result = markup and matchAgainst.search(markup) elif isList(matchAgainst): result = markup in matchAgainst elif hasattr(matchAgainst, 'items'): result = markup.has_key(matchAgainst) elif matchAgainst and isString(markup): if isinstance(markup, unicode): matchAgainst = unicode(matchAgainst) else: matchAgainst = str(matchAgainst) if not result: result = matchAgainst == markup return result class ResultSet(list): """A ResultSet is just a list that keeps track of the SoupStrainer that created it.""" def __init__(self, source): list.__init__([]) self.source = source # Now, some helper functions. def isList(l): """Convenience method that works with all 2.x versions of Python to determine whether or not something is listlike.""" return hasattr(l, '__iter__') \ or (type(l) in (types.ListType, types.TupleType)) def isString(s): """Convenience method that works with all 2.x versions of Python to determine whether or not something is stringlike.""" try: return isinstance(s, unicode) or isintance(s, basestring) except NameError: return isinstance(s, str) def buildTagMap(default, *args): """Turns a list of maps, lists, or scalars into a single map. Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and NESTING_RESET_TAGS maps out of lists and partial maps.""" built = {} for portion in args: if hasattr(portion, 'items'): #It's a map. Merge it. for k,v in portion.items(): built[k] = v elif isList(portion): #It's a list. Map each item to the default. for k in portion: built[k] = default else: #It's a scalar. Map it to the default. built[portion] = default return built # Now, the parser classes. class BeautifulStoneSoup(Tag, SGMLParser): """This class contains the basic parser and search code. It defines a parser that knows nothing about tag behavior except for the following: You can't close a tag without closing all the tags it encloses. That is, "<foo><bar></foo>" actually means "<foo><bar></bar></foo>". [Another possible explanation is "<foo><bar /></foo>", but since this class defines no SELF_CLOSING_TAGS, it will never use that explanation.] This class is useful for parsing XML or made-up markup languages, or when BeautifulSoup makes an assumption counter to what you were expecting.""" XML_ENTITY_LIST = {} for i in Tag.XML_SPECIAL_CHARS_TO_ENTITIES.values(): XML_ENTITY_LIST[i] = True SELF_CLOSING_TAGS = {} NESTABLE_TAGS = {} RESET_NESTING_TAGS = {} QUOTE_TAGS = {} MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'), lambda x: x.group(1) + ' />'), (re.compile('<!\s+([^<>]*)>'), lambda x: '<!' + x.group(1) + '>') ] ROOT_TAG_NAME = u'[document]' HTML_ENTITIES = "html" XML_ENTITIES = "xml" def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None, markupMassage=True, smartQuotesTo=XML_ENTITIES, convertEntities=None, selfClosingTags=None): """The Soup object is initialized as the 'root tag', and the provided markup (which can be a string or a file-like object) is fed into the underlying parser. sgmllib will process most bad HTML, and the BeautifulSoup class has some tricks for dealing with some HTML that kills sgmllib, but Beautiful Soup can nonetheless choke or lose data if your data uses self-closing tags or declarations incorrectly. By default, Beautiful Soup uses regexes to sanitize input, avoiding the vast majority of these problems. If the problems don't apply to you, pass in False for markupMassage, and you'll get better performance. The default parser massage techniques fix the two most common instances of invalid HTML that choke sgmllib: <br/> (No space between name of closing tag and tag close) <! --Comment--> (Extraneous whitespace in declaration) You can pass in a custom list of (RE object, replace method) tuples to get Beautiful Soup to scrub your input the way you want.""" self.parseOnlyThese = parseOnlyThese self.fromEncoding = fromEncoding self.smartQuotesTo = smartQuotesTo self.convertEntities = convertEntities if self.convertEntities: # It doesn't make sense to convert encoded characters to # entities even while you're converting entities to Unicode. # Just convert it all to Unicode. self.smartQuotesTo = None self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags) SGMLParser.__init__(self) if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() self.markup = markup self.markupMassage = markupMassage try: self._feed() except StopParsing: pass self.markup = None # The markup can now be GCed def _feed(self, inDocumentEncoding=None): # Convert the document to Unicode. markup = self.markup if isinstance(markup, unicode): if not hasattr(self, 'originalEncoding'): self.originalEncoding = None else: dammit = UnicodeDammit\ (markup, [self.fromEncoding, inDocumentEncoding], smartQuotesTo=self.smartQuotesTo) markup = dammit.unicode self.originalEncoding = dammit.originalEncoding if markup: if self.markupMassage: if not isList(self.markupMassage): self.markupMassage = self.MARKUP_MASSAGE for fix, m in self.markupMassage: markup = fix.sub(m, markup) self.reset() SGMLParser.feed(self, markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def __getattr__(self, methodName): """This method routes method call requests to either the SGMLParser superclass or the Tag superclass, depending on the method name.""" #print "__getattr__ called on %s.%s" % (self.__class__, methodName) if methodName.find('start_') == 0 or methodName.find('end_') == 0 \ or methodName.find('do_') == 0: return SGMLParser.__getattr__(self, methodName) elif methodName.find('__') != 0: return Tag.__getattr__(self, methodName) else: raise AttributeError def isSelfClosingTag(self, name): """Returns true iff the given string is the name of a self-closing tag according to this parser.""" return self.SELF_CLOSING_TAGS.has_key(name) \ or self.instanceSelfClosingTags.has_key(name) def reset(self): Tag.__init__(self, self, self.ROOT_TAG_NAME) self.hidden = 1 SGMLParser.reset(self) self.currentData = [] self.currentTag = None self.tagStack = [] self.quoteStack = [] self.pushTag(self) def popTag(self): tag = self.tagStack.pop() # Tags with just one string-owning child get the child as a # 'string' property, so that soup.tag.string is shorthand for # soup.tag.contents[0] if len(self.currentTag.contents) == 1 and \ isinstance(self.currentTag.contents[0], NavigableString): self.currentTag.string = self.currentTag.contents[0] #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] def endData(self, containerClass=NavigableString): if self.currentData: currentData = ''.join(self.currentData) if not currentData.strip(): if '\n' in currentData: currentData = '\n' else: currentData = ' ' self.currentData = [] if self.parseOnlyThese and len(self.tagStack) <= 1 and \ (not self.parseOnlyThese.text or \ not self.parseOnlyThese.search(currentData)): return o = containerClass(currentData) o.setup(self.currentTag, self.previous) if self.previous: self.previous.next = o self.previous = o self.currentTag.contents.append(o) def _popToTag(self, name, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: return numPops = 0 mostRecentTag = None for i in range(len(self.tagStack)-1, 0, -1): if name == self.tagStack[i].name: numPops = len(self.tagStack)-i break if not inclusivePop: numPops = numPops - 1 for i in range(0, numPops): mostRecentTag = self.popTag() return mostRecentTag def _smartPop(self, name): """We need to pop up to the previous tag of this type, unless one of this tag's nesting reset triggers comes between this tag and the previous tag of this type, OR unless this tag is a generic nesting trigger and another generic nesting trigger comes between this tag and the previous tag of this type. Examples: <p>Foo<b>Bar<p> should pop to 'p', not 'b'. <p>Foo<table>Bar<p> should pop to 'table', not 'p'. <p>Foo<table><tr>Bar<p> should pop to 'tr', not 'p'. <p>Foo<b>Bar<p> should pop to 'p', not 'b'. <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' <td><tr><td> *<td>* should pop to 'tr', not the first 'td' """ nestingResetTriggers = self.NESTABLE_TAGS.get(name) isNestable = nestingResetTriggers != None isResetNesting = self.RESET_NESTING_TAGS.has_key(name) popTo = None inclusive = True for i in range(len(self.tagStack)-1, 0, -1): p = self.tagStack[i] if (not p or p.name == name) and not isNestable: #Non-nestable tags get popped to the top or to their #last occurrence. popTo = name break if (nestingResetTriggers != None and p.name in nestingResetTriggers) \ or (nestingResetTriggers == None and isResetNesting and self.RESET_NESTING_TAGS.has_key(p.name)): #If we encounter one of the nesting reset triggers #peculiar to this tag, or we encounter another tag #that causes nesting to reset, pop up to but not #including that tag. popTo = p.name inclusive = False break p = p.parent if popTo: self._popToTag(popTo, inclusive) def unknown_starttag(self, name, attrs, selfClosing=0): #print "Start tag %s: %s" % (name, attrs) if self.quoteStack: #This is not a real tag. #print "<%s> is not real!" % name attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs)) self.handle_data('<%s%s>' % (name, attrs)) return self.endData() if not self.isSelfClosingTag(name) and not selfClosing: self._smartPop(name) if self.parseOnlyThese and len(self.tagStack) <= 1 \ and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)): return tag = Tag(self, name, attrs, self.currentTag, self.previous) if self.previous: self.previous.next = tag self.previous = tag self.pushTag(tag) if selfClosing or self.isSelfClosingTag(name): self.popTag() if name in self.QUOTE_TAGS: #print "Beginning quote (%s)" % name self.quoteStack.append(name) self.literal = 1 return tag def unknown_endtag(self, name): #print "End tag %s" % name if self.quoteStack and self.quoteStack[-1] != name: #This is not a real end tag. #print "</%s> is not real!" % name self.handle_data('</%s>' % name) return self.endData() self._popToTag(name) if self.quoteStack and self.quoteStack[-1] == name: self.quoteStack.pop() self.literal = (len(self.quoteStack) > 0) def handle_data(self, data): self.currentData.append(data) def _toStringSubclass(self, text, subclass): """Adds a certain piece of text to the tree as a NavigableString subclass.""" self.endData() self.handle_data(text) self.endData(subclass) def handle_pi(self, text): """Handle a processing instruction as a ProcessingInstruction object, possibly one with a %SOUP-ENCODING% slot into which an encoding will be plugged later.""" if text[:3] == "xml": text = "xml version='1.0' encoding='%SOUP-ENCODING%'" self._toStringSubclass(text, ProcessingInstruction) def handle_comment(self, text): "Handle comments as Comment objects." self._toStringSubclass(text, Comment) def handle_charref(self, ref): "Handle character references as data." if self.convertEntities in [self.HTML_ENTITIES, self.XML_ENTITIES]: data = unichr(int(ref)) else: data = '&#%s;' % ref self.handle_data(data) def handle_entityref(self, ref): """Handle entity references as data, possibly converting known HTML entity references to the corresponding Unicode characters.""" data = None if self.convertEntities == self.HTML_ENTITIES or \ (self.convertEntities == self.XML_ENTITIES and \ self.XML_ENTITY_LIST.get(ref)): try: data = unichr(name2codepoint[ref]) except KeyError: pass if not data: data = '&%s;' % ref self.handle_data(data) def handle_decl(self, data): "Handle DOCTYPEs and the like as Declaration objects." self._toStringSubclass(data, Declaration) def parse_declaration(self, i): """Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.""" j = None if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) data = self.rawdata[i+9:k] j = k+3 self._toStringSubclass(data, CData) else: try: j = SGMLParser.parse_declaration(self, i) except SGMLParseError: toHandle = self.rawdata[i:] self.handle_data(toHandle) j = i + len(toHandle) return j class BeautifulSoup(BeautifulStoneSoup): """This parser knows the following facts about HTML: * Some tags have no closing tag and should be interpreted as being closed as soon as they are encountered. * The text inside some tags (ie. 'script') may contain tags which are not really part of the document and which should be parsed as text, not tags. If you want to parse the text as tags, you can always fetch it and parse it explicitly. * Tag nesting rules: Most tags can't be nested at all. For instance, the occurrence of a <p> tag should implicitly close the previous <p> tag. <p>Para1<p>Para2 should be transformed into: <p>Para1</p><p>Para2 Some tags can be nested arbitrarily. For instance, the occurrence of a <blockquote> tag should _not_ implicitly close the previous <blockquote> tag. Alice said: <blockquote>Bob said: <blockquote>Blah should NOT be transformed into: Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah Some tags can be nested, but the nesting is reset by the interposition of other tags. For instance, a <tr> tag should implicitly close the previous <tr> tag within the same <table>, but not close a <tr> tag in another table. <table><tr>Blah<tr>Blah should be transformed into: <table><tr>Blah</tr><tr>Blah but, <tr>Blah<table><tr>Blah should NOT be transformed into <tr>Blah<table></tr><tr>Blah Differing assumptions about tag nesting rules are a major source of problems with the BeautifulSoup class. If BeautifulSoup is not treating as nestable a tag your page author treats as nestable, try ICantBelieveItsBeautifulSoup, MinimalSoup, or BeautifulStoneSoup before writing your own subclass.""" def __init__(self, *args, **kwargs): if not kwargs.has_key('smartQuotesTo'): kwargs['smartQuotesTo'] = self.HTML_ENTITIES BeautifulStoneSoup.__init__(self, *args, **kwargs) SELF_CLOSING_TAGS = buildTagMap(None, ['br' , 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base']) QUOTE_TAGS = {'script': None} #According to the HTML standard, each of these inline tags can #contain another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', 'center'] #According to the HTML standard, these block tags can contain #another tag of the same type. Furthermore, it's common #to actually use these tags this way. NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del'] #Lists can contain other lists, but there are restrictions. NESTABLE_LIST_TAGS = { 'ol' : [], 'ul' : [], 'li' : ['ul', 'ol'], 'dl' : [], 'dd' : ['dl'], 'dt' : ['dl'] } #Tables can contain other tables, but there are restrictions. NESTABLE_TABLE_TAGS = {'table' : [], 'tr' : ['table', 'tbody', 'tfoot', 'thead'], 'td' : ['tr'], 'th' : ['tr'], 'thead' : ['table'], 'tbody' : ['table'], 'tfoot' : ['table'], } NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre'] #If one of these tags is encountered, all tags up to the next tag of #this type are popped. RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', NON_NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) # Used to detect the charset in a META tag; see start_meta CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)") def start_meta(self, attrs): """Beautiful Soup can detect a charset included in a META tag, try to convert the document to that charset, and re-parse the document from the beginning.""" httpEquiv = None contentType = None contentTypeIndex = None tagNeedsEncodingSubstitution = False for i in range(0, len(attrs)): key, value = attrs[i] key = key.lower() if key == 'http-equiv': httpEquiv = value elif key == 'content': contentType = value contentTypeIndex = i if httpEquiv and contentType: # It's an interesting meta tag. match = self.CHARSET_RE.search(contentType) if match: if getattr(self, 'declaredHTMLEncoding') or \ (self.originalEncoding == self.fromEncoding): # This is our second pass through the document, or # else an encoding was specified explicitly and it # worked. Rewrite the meta tag. newAttr = self.CHARSET_RE.sub\ (lambda(match):match.group(1) + "%SOUP-ENCODING%", value) attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], newAttr) tagNeedsEncodingSubstitution = True else: # This is our first pass through the document. # Go through it again with the new information. newCharset = match.group(3) if newCharset and newCharset != self.originalEncoding: self.declaredHTMLEncoding = newCharset self._feed(self.declaredHTMLEncoding) raise StopParsing tag = self.unknown_starttag("meta", attrs) if tag and tagNeedsEncodingSubstitution: tag.containsSubstitutions = True class StopParsing(Exception): pass class ICantBelieveItsBeautifulSoup(BeautifulSoup): """The BeautifulSoup class is oriented towards skipping over common HTML errors like unclosed tags. However, sometimes it makes errors of its own. For instance, consider this fragment: <b>Foo<b>Bar</b></b> This is perfectly valid (if bizarre) HTML. However, the BeautifulSoup class will implicitly close the first b tag when it encounters the second 'b'. It will think the author wrote "<b>Foo<b>Bar", and didn't close the first 'b' tag, because there's no real-world reason to bold something that's already bold. When it encounters '</b></b>' it will close two more 'b' tags, for a grand total of three tags closed instead of two. This can throw off the rest of your document structure. The same is true of a number of other tags, listed below. It's much more common for someone to forget to close a 'b' tag than to actually use nested 'b' tags, and the BeautifulSoup class handles the common case. This class handles the not-co-common case: where you can't believe someone wrote what they did, but it's valid HTML and BeautifulSoup screwed up by assuming it wouldn't be.""" I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ ['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', 'big'] I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript'] NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) class MinimalSoup(BeautifulSoup): """The MinimalSoup class is for parsing HTML that contains pathologically bad markup. It makes no assumptions about tag nesting, but it does know which tags are self-closing, that <script> tags contain Javascript and should not be parsed, that META tags may contain encoding information, and so on. This also makes it better for subclassing than BeautifulStoneSoup or BeautifulSoup.""" RESET_NESTING_TAGS = buildTagMap('noscript') NESTABLE_TAGS = {} class BeautifulSOAP(BeautifulStoneSoup): """This class will push a tag with only a single string child into the tag's parent as an attribute. The attribute's name is the tag name, and the value is the string child. An example should give the flavor of the change: <foo><bar>baz</bar></foo> => <foo bar="baz"><bar>baz</bar></foo> You can then access fooTag['bar'] instead of fooTag.barTag.string. This is, of course, useful for scraping structures that tend to use subelements instead of attributes, such as SOAP messages. Note that it modifies its input, so don't print the modified version out. I'm not sure how many people really want to use this class; let me know if you do. Mainly I like the name.""" def popTag(self): if len(self.tagStack) > 1: tag = self.tagStack[-1] parent = self.tagStack[-2] parent._getAttrMap() if (isinstance(tag, Tag) and len(tag.contents) == 1 and isinstance(tag.contents[0], NavigableString) and not parent.attrMap.has_key(tag.name)): parent[tag.name] = tag.contents[0] BeautifulStoneSoup.popTag(self) #Enterprise class names! It has come to our attention that some people #think the names of the Beautiful Soup parser classes are too silly #and "unprofessional" for use in enterprise screen-scraping. We feel #your pain! For such-minded folk, the Beautiful Soup Consortium And #All-Night Kosher Bakery recommends renaming this file to #"RobustParser.py" (or, in cases of extreme enterprisness, #"RobustParserBeanInterface.class") and using the following #enterprise-friendly class aliases: class RobustXMLParser(BeautifulStoneSoup): pass class RobustHTMLParser(BeautifulSoup): pass class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): pass class RobustInsanelyWackAssHTMLParser(MinimalSoup): pass class SimplifyingSOAPParser(BeautifulSOAP): pass ###################################################### # # Bonus library: Unicode, Dammit # # This class forces XML data into a standard format (usually to UTF-8 # or Unicode). It is heavily based on code from Mark Pilgrim's # Universal Feed Parser. It does not rewrite the XML or HTML to # reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi # (XML) and BeautifulSoup.start_meta (HTML). # Autodetects character encodings. # Download from http://chardet.feedparser.org/ try: import chardet # import chardet.constants # chardet.constants._debug = 1 except: chardet = None chardet = None # cjkcodecs and iconv_codec make Python know about more character encodings. # Both are available from http://cjkpython.i18n.org/ # They're built in if you use Python 2.4. try: import cjkcodecs.aliases except: pass try: import iconv_codec except: pass class UnicodeDammit: """A class for detecting the encoding of a *ML document and converting it to a Unicode string. If the source encoding is windows-1252, can replace MS smart quotes with their HTML or XML equivalents.""" # This dictionary maps commonly seen values for "charset" in HTML # meta tags to the corresponding Python codec names. It only covers # values that aren't in Python's aliases and can't be determined # by the heuristics in find_codec. CHARSET_ALIASES = { "macintosh" : "mac-roman", "x-sjis" : "shift-jis" } def __init__(self, markup, overrideEncodings=[], smartQuotesTo='xml'): self.markup, documentEncoding, sniffedEncoding = \ self._detectEncoding(markup) self.smartQuotesTo = smartQuotesTo self.triedEncodings = [] if markup == '' or isinstance(markup, unicode): self.originalEncoding = None self.unicode = unicode(markup) return u = None for proposedEncoding in overrideEncodings: u = self._convertFrom(proposedEncoding) if u: break if not u: for proposedEncoding in (documentEncoding, sniffedEncoding): u = self._convertFrom(proposedEncoding) if u: break # If no luck and we have auto-detection library, try that: if not u and chardet and not isinstance(self.markup, unicode): u = self._convertFrom(chardet.detect(self.markup)['encoding']) # As a last resort, try utf-8 and windows-1252: if not u: for proposed_encoding in ("utf-8", "windows-1252"): u = self._convertFrom(proposed_encoding) if u: break self.unicode = u if not u: self.originalEncoding = None def _subMSChar(self, orig): """Changes a MS smart quote character to an XML or HTML entity.""" sub = self.MS_CHARS.get(orig) if type(sub) == types.TupleType: if self.smartQuotesTo == 'xml': sub = '&#x%s;' % sub[1] else: sub = '&%s;' % sub[0] return sub def _convertFrom(self, proposed): proposed = self.find_codec(proposed) if not proposed or proposed in self.triedEncodings: return None self.triedEncodings.append(proposed) markup = self.markup # Convert smart quotes to HTML if coming from an encoding # that might have them. if self.smartQuotesTo and proposed.lower() in("windows-1252", "iso-8859-1", "iso-8859-2"): markup = re.compile("([\x80-\x9f])").sub \ (lambda(x): self._subMSChar(x.group(1)), markup) try: # print "Trying to convert document to %s" % proposed u = self._toUnicode(markup, proposed) self.markup = u self.originalEncoding = proposed except Exception, e: # print "That didn't work!" # print e return None #print "Correct encoding: %s" % proposed return self.markup def _toUnicode(self, data, encoding): '''Given a string and its encoding, decodes the string into Unicode. %encoding is a string recognized by encodings.aliases''' # strip Byte Order Mark (if present) if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16be' data = data[2:] elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ and (data[2:4] != '\x00\x00'): encoding = 'utf-16le' data = data[2:] elif data[:3] == '\xef\xbb\xbf': encoding = 'utf-8' data = data[3:] elif data[:4] == '\x00\x00\xfe\xff': encoding = 'utf-32be' data = data[4:] elif data[:4] == '\xff\xfe\x00\x00': encoding = 'utf-32le' data = data[4:] newdata = unicode(data, encoding) return newdata def _detectEncoding(self, xml_data): """Given a document, tries to detect its XML encoding.""" xml_encoding = sniffed_xml_encoding = None try: if xml_data[:4] == '\x4c\x6f\xa7\x94': # EBCDIC xml_data = self._ebcdic_to_ascii(xml_data) elif xml_data[:4] == '\x00\x3c\x00\x3f': # UTF-16BE sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \ and (xml_data[2:4] != '\x00\x00'): # UTF-16BE with BOM sniffed_xml_encoding = 'utf-16be' xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x3f\x00': # UTF-16LE sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \ (xml_data[2:4] != '\x00\x00'): # UTF-16LE with BOM sniffed_xml_encoding = 'utf-16le' xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') elif xml_data[:4] == '\x00\x00\x00\x3c': # UTF-32BE sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') elif xml_data[:4] == '\x3c\x00\x00\x00': # UTF-32LE sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') elif xml_data[:4] == '\x00\x00\xfe\xff': # UTF-32BE with BOM sniffed_xml_encoding = 'utf-32be' xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') elif xml_data[:4] == '\xff\xfe\x00\x00': # UTF-32LE with BOM sniffed_xml_encoding = 'utf-32le' xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') elif xml_data[:3] == '\xef\xbb\xbf': # UTF-8 with BOM sniffed_xml_encoding = 'utf-8' xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') else: sniffed_xml_encoding = 'ascii' pass xml_encoding_match = re.compile \ ('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')\ .match(xml_data) except: xml_encoding_match = None if xml_encoding_match: xml_encoding = xml_encoding_match.groups()[0].lower() if sniffed_xml_encoding and \ (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): xml_encoding = sniffed_xml_encoding return xml_data, xml_encoding, sniffed_xml_encoding def find_codec(self, charset): return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ or (charset and self._codec(charset.replace("-", ""))) \ or (charset and self._codec(charset.replace("-", "_"))) \ or charset def _codec(self, charset): if not charset: return charset codec = None try: codecs.lookup(charset) codec = charset except LookupError: pass return codec EBCDIC_TO_ASCII_MAP = None def _ebcdic_to_ascii(self, s): c = self.__class__ if not c.EBCDIC_TO_ASCII_MAP: emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, 201,202,106,107,108,109,110,111,112,113,114,203,204,205, 206,207,208,209,126,115,116,117,118,119,120,121,122,210, 211,212,213,214,215,216,217,218,219,220,221,222,223,224, 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, 250,251,252,253,254,255) import string c.EBCDIC_TO_ASCII_MAP = string.maketrans( \ ''.join(map(chr, range(256))), ''.join(map(chr, emap))) return s.translate(c.EBCDIC_TO_ASCII_MAP) MS_CHARS = { '\x80' : ('euro', '20AC'), '\x81' : ' ', '\x82' : ('sbquo', '201A'), '\x83' : ('fnof', '192'), '\x84' : ('bdquo', '201E'), '\x85' : ('hellip', '2026'), '\x86' : ('dagger', '2020'), '\x87' : ('Dagger', '2021'), '\x88' : ('circ', '2C6'), '\x89' : ('permil', '2030'), '\x8A' : ('Scaron', '160'), '\x8B' : ('lsaquo', '2039'), '\x8C' : ('OElig', '152'), '\x8D' : '?', '\x8E' : ('#x17D', '17D'), '\x8F' : '?', '\x90' : '?', '\x91' : ('lsquo', '2018'), '\x92' : ('rsquo', '2019'), '\x93' : ('ldquo', '201C'), '\x94' : ('rdquo', '201D'), '\x95' : ('bull', '2022'), '\x96' : ('ndash', '2013'), '\x97' : ('mdash', '2014'), '\x98' : ('tilde', '2DC'), '\x99' : ('trade', '2122'), '\x9a' : ('scaron', '161'), '\x9b' : ('rsaquo', '203A'), '\x9c' : ('oelig', '153'), '\x9d' : '?', '\x9e' : ('#x17E', '17E'), '\x9f' : ('Yuml', ''),} ####################################################################### #By default, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin.read()) print soup.prettify()
Renji/weevely3
refs/heads/master
modules/file/cd.py
16
from core.vectors import PhpCode, ModuleExec from core.module import Module from core import messages from core.loggers import log import random class Cd(Module): """Change current working directory.""" aliases = [ 'cd' ] def init(self): self.register_info( { 'author': [ 'Emilio Pinna' ], 'license': 'GPLv3' } ) self.register_arguments([ { 'name' : 'dir', 'help' : 'Target folder', 'nargs' : '?' } ]) def run(self): # When no folder is specified, change folder to SCRIPT_NAME to # simulate the bash behaviour. If not available, use current dir. if not self.args.get('dir'): script_folder = ModuleExec( 'system_info', [ '-info', 'script_folder' ] ).load_result_or_run( result_name = 'script_folder' ) self.args['dir'] = script_folder if script_folder else '.' # The execution and result storage is done manually cause # no result has to be stored if the execution fails. This # is not simple to implement using # self.vectors.get_result(.., store_result). folder = PhpCode("""@chdir('${dir}')&&print(@getcwd());""", "chdir").run( self.args ) if folder: self._store_result('cwd', folder) else: log.warning( messages.module_file_cd.failed_directory_change_to_s % (self.args['dir']) ) def run_alias(self, line, cmd): # Run this alias independently from the shell_sh status return self.run_cmdline(line)
wllmtrng/wllmtrng.github.io-src
refs/heads/master
fabfile.py
19
from fabric.api import * import fabric.contrib.project as project import os import shutil import sys import SocketServer from pelican.server import ComplexHTTPRequestHandler # Local path configuration (can be absolute or relative to fabfile) env.deploy_path = 'output' DEPLOY_PATH = env.deploy_path # Remote server configuration production = 'root@localhost:22' dest_path = '/var/www' # Rackspace Cloud Files configuration settings env.cloudfiles_username = 'my_rackspace_username' env.cloudfiles_api_key = 'my_rackspace_api_key' env.cloudfiles_container = 'my_cloudfiles_container' # Github Pages configuration env.github_pages_branch = "master" # Port for `serve` PORT = 8000 def clean(): """Remove generated files""" if os.path.isdir(DEPLOY_PATH): shutil.rmtree(DEPLOY_PATH) os.makedirs(DEPLOY_PATH) def build(): """Build local version of site""" local('pelican -s pelicanconf.py') def rebuild(): """`build` with the delete switch""" local('pelican -d -s pelicanconf.py') def regenerate(): """Automatically regenerate site upon file modification""" local('pelican -r -s pelicanconf.py') def serve(): """Serve site at http://localhost:8000/""" os.chdir(env.deploy_path) class AddressReuseTCPServer(SocketServer.TCPServer): allow_reuse_address = True server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler) sys.stderr.write('Serving on port {0} ...\n'.format(PORT)) server.serve_forever() def reserve(): """`build`, then `serve`""" build() serve() def preview(): """Build production version of site""" local('pelican -s publishconf.py') def cf_upload(): """Publish to Rackspace Cloud Files""" rebuild() with lcd(DEPLOY_PATH): local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 ' '-U {cloudfiles_username} ' '-K {cloudfiles_api_key} ' 'upload -c {cloudfiles_container} .'.format(**env)) @hosts(production) def publish(): """Publish to production via rsync""" local('pelican -s publishconf.py') project.rsync_project( remote_dir=dest_path, exclude=".DS_Store", local_dir=DEPLOY_PATH.rstrip('/') + '/', delete=True, extra_opts='-c', ) def gh_pages(): """Publish to GitHub Pages""" rebuild() local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env))
redhat-openstack/neutron
refs/heads/f22-patches
neutron/plugins/ml2/models.py
8
# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from sqlalchemy import orm from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import portbindings BINDING_PROFILE_LEN = 4095 class NetworkSegment(model_base.BASEV2, models_v2.HasId): """Represent persistent state of a network segment. A network segment is a portion of a neutron network with a specific physical realization. A neutron network can consist of one or more segments. """ __tablename__ = 'ml2_network_segments' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), nullable=False) network_type = sa.Column(sa.String(32), nullable=False) physical_network = sa.Column(sa.String(64)) segmentation_id = sa.Column(sa.Integer) is_dynamic = sa.Column(sa.Boolean, default=False, nullable=False, server_default=sa.sql.false()) class PortBinding(model_base.BASEV2): """Represent binding-related state of a port. A port binding stores the port attributes required for the portbindings extension, as well as internal ml2 state such as which MechanismDriver and which segment are used by the port binding. """ __tablename__ = 'ml2_port_bindings' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) host = sa.Column(sa.String(255), nullable=False, default='', server_default='') vnic_type = sa.Column(sa.String(64), nullable=False, default=portbindings.VNIC_NORMAL, server_default=portbindings.VNIC_NORMAL) profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False, default='', server_default='') vif_type = sa.Column(sa.String(64), nullable=False) vif_details = sa.Column(sa.String(4095), nullable=False, default='', server_default='') driver = sa.Column(sa.String(64)) segment = sa.Column(sa.String(36), sa.ForeignKey('ml2_network_segments.id', ondelete="SET NULL")) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly load port bindings port = orm.relationship( models_v2.Port, backref=orm.backref("port_binding", lazy='joined', uselist=False, cascade='delete')) class DVRPortBinding(model_base.BASEV2): """Represent binding-related state of a DVR port. Port binding for all the ports associated to a DVR identified by router_id. """ __tablename__ = 'ml2_dvr_port_bindings' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) host = sa.Column(sa.String(255), nullable=False, primary_key=True) router_id = sa.Column(sa.String(36), nullable=True) vif_type = sa.Column(sa.String(64), nullable=False) vif_details = sa.Column(sa.String(4095), nullable=False, default='', server_default='') vnic_type = sa.Column(sa.String(64), nullable=False, default=portbindings.VNIC_NORMAL, server_default=portbindings.VNIC_NORMAL) profile = sa.Column(sa.String(BINDING_PROFILE_LEN), nullable=False, default='', server_default='') cap_port_filter = sa.Column(sa.Boolean, nullable=False) driver = sa.Column(sa.String(64)) segment = sa.Column(sa.String(36), sa.ForeignKey('ml2_network_segments.id', ondelete="SET NULL")) status = sa.Column(sa.String(16), nullable=False) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly load port bindings port = orm.relationship( models_v2.Port, backref=orm.backref("dvr_port_binding", lazy='joined', uselist=False, cascade='delete'))
tomduijf/home-assistant
refs/heads/master
tests/components/switch/test_mqtt.py
3
""" tests.components.switch.test_mqtt ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tests mqtt switch. """ import unittest from homeassistant.const import STATE_ON, STATE_OFF import homeassistant.core as ha import homeassistant.components.switch as switch from tests.common import mock_mqtt_component, fire_mqtt_message class TestSensorMQTT(unittest.TestCase): """ Test the MQTT switch. """ def setUp(self): # pylint: disable=invalid-name self.hass = ha.HomeAssistant() self.mock_publish = mock_mqtt_component(self.hass) def tearDown(self): # pylint: disable=invalid-name """ Stop down stuff we started. """ self.hass.stop() def test_controlling_state_via_topic(self): self.assertTrue(switch.setup(self.hass, { 'switch': { 'platform': 'mqtt', 'name': 'test', 'state_topic': 'state-topic', 'command_topic': 'command-topic', 'payload_on': 'beer on', 'payload_off': 'beer off' } })) state = self.hass.states.get('switch.test') self.assertEqual(STATE_OFF, state.state) fire_mqtt_message(self.hass, 'state-topic', 'beer on') self.hass.pool.block_till_done() state = self.hass.states.get('switch.test') self.assertEqual(STATE_ON, state.state) fire_mqtt_message(self.hass, 'state-topic', 'beer off') self.hass.pool.block_till_done() state = self.hass.states.get('switch.test') self.assertEqual(STATE_OFF, state.state) def test_sending_mqtt_commands_and_optimistic(self): self.assertTrue(switch.setup(self.hass, { 'switch': { 'platform': 'mqtt', 'name': 'test', 'command_topic': 'command-topic', 'payload_on': 'beer on', 'payload_off': 'beer off', 'qos': 2 } })) state = self.hass.states.get('switch.test') self.assertEqual(STATE_OFF, state.state) switch.turn_on(self.hass, 'switch.test') self.hass.pool.block_till_done() self.assertEqual(('command-topic', 'beer on', 2), self.mock_publish.mock_calls[-1][1]) state = self.hass.states.get('switch.test') self.assertEqual(STATE_ON, state.state) switch.turn_off(self.hass, 'switch.test') self.hass.pool.block_till_done() self.assertEqual(('command-topic', 'beer off', 2), self.mock_publish.mock_calls[-1][1]) state = self.hass.states.get('switch.test') self.assertEqual(STATE_OFF, state.state)
anthgur/servo
refs/heads/master
tests/wpt/web-platform-tests/workers/baseurl/beta/worker.py
241
def main(request, response): return (302, "Moved"), [("Location", "../gamma/worker.js")], "postMessage('executed redirecting script');"
jdereus/labman
refs/heads/master
labcontrol/db/tests/__init__.py
7
# ---------------------------------------------------------------------------- # Copyright (c) 2017-, LabControl development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ----------------------------------------------------------------------------
bkirui/odoo
refs/heads/8.0
openerp/conf/__init__.py
442
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ Library-wide configuration variables. For now, configuration code is in openerp.tools.config. It is in mainly unprocessed form, e.g. addons_path is a string with commas-separated paths. The aim is to have code related to configuration (command line parsing, configuration file loading and saving, ...) in this module and provide real Python variables, e.g. addons_paths is really a list of paths. To initialize properly this module, openerp.tools.config.parse_config() must be used. """ import deprecation # Paths to search for OpenERP addons. addons_paths = [] # List of server-wide modules to load. Those modules are supposed to provide # features not necessarily tied to a particular database. This is in contrast # to modules that are always bound to a specific database when they are # installed (i.e. the majority of OpenERP addons). This is set with the --load # command-line option. server_wide_modules = [] # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
cchurch/ansible
refs/heads/devel
test/units/modules/net_tools/nios/test_nios_mx_record.py
68
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.modules.net_tools.nios import nios_mx_record from ansible.module_utils.net_tools.nios import api from units.compat.mock import patch, MagicMock, Mock from .test_nios_module import TestNiosModule, load_fixture class TestNiosMXRecordModule(TestNiosModule): module = nios_mx_record def setUp(self): super(TestNiosMXRecordModule, self).setUp() self.module = MagicMock(name='ansible.modules.net_tools.nios.nios_mx_record.WapiModule') self.module.check_mode = False self.module.params = {'provider': None} self.mock_wapi = patch('ansible.modules.net_tools.nios.nios_mx_record.WapiModule') self.exec_command = self.mock_wapi.start() self.mock_wapi_run = patch('ansible.modules.net_tools.nios.nios_mx_record.WapiModule.run') self.mock_wapi_run.start() self.load_config = self.mock_wapi_run.start() def tearDown(self): super(TestNiosMXRecordModule, self).tearDown() self.mock_wapi.stop() self.mock_wapi_run.stop() def _get_wapi(self, test_object): wapi = api.WapiModule(self.module) wapi.get_object = Mock(name='get_object', return_value=test_object) wapi.create_object = Mock(name='create_object') wapi.update_object = Mock(name='update_object') wapi.delete_object = Mock(name='delete_object') return wapi def load_fixtures(self, commands=None): self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None) self.load_config.return_value = dict(diff=None, session='session') def test_nios_mx_record_create(self): self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible.com', 'mx': 'mailhost.ansible.com', 'preference': 0, 'comment': None, 'extattrs': None} test_object = None test_spec = { "name": {"ib_req": True}, "mx": {"ib_req": True}, "preference": {"ib_req": True}, "comment": {}, "extattrs": {} } wapi = self._get_wapi(test_object) print("WAPI: ", wapi) res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(), 'mx': 'mailhost.ansible.com', 'preference': 0}) def test_nios_mx_record_update_comment(self): self.module.params = {'provider': None, 'state': 'present', 'name': 'ansible.com', 'mx': 'mailhost.ansible.com', 'preference': 0, 'comment': 'updated comment', 'extattrs': None} test_object = [ { "comment": "test comment", "_ref": "mxrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true", "name": "ansible.com", "mx": "mailhost.ansible.com", "preference": 0, "extattrs": {} } ] test_spec = { "name": {"ib_req": True}, "mx": {"ib_req": True}, "preference": {"ib_req": True}, "comment": {}, "extattrs": {} } wapi = self._get_wapi(test_object) res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) def test_nios_mx_record_remove(self): self.module.params = {'provider': None, 'state': 'absent', 'name': 'ansible.com', 'mx': 'mailhost.ansible.com', 'preference': 0, 'comment': None, 'extattrs': None} ref = "mxrecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false" test_object = [{ "comment": "test comment", "_ref": ref, "name": "ansible.com", "mx": "mailhost.ansible.com", "extattrs": {'Site': {'value': 'test'}} }] test_spec = { "name": {"ib_req": True}, "mx": {"ib_req": True}, "preference": {"ib_req": True}, "comment": {}, "extattrs": {} } wapi = self._get_wapi(test_object) res = wapi.run('testobject', test_spec) self.assertTrue(res['changed']) wapi.delete_object.assert_called_once_with(ref)
yzl0083/orange
refs/heads/master
Orange/testing/regression/tests_20/modules_kmeans-cmp-init.py
6
import orange import orngClustering import random data_names = ["iris.tab", "housing.tab", "vehicle.tab"] data_sets = [orange.ExampleTable(name) for name in data_names] print "%10s %3s %3s %3s" % ("", "Rnd", "Div", "HC") for data, name in zip(data_sets, data_names): random.seed(42) km_random = orngClustering.KMeans(data, centroids=3) km_diversity = orngClustering.KMeans(data, centroids=3, \ initialization=orngClustering.kmeans_init_diversity) km_hc = orngClustering.KMeans(data, centroids=3, \ initialization=orngClustering.KMeans_init_hierarchicalClustering(n=100)) print "%10s %3d %3d %3d" % (name, km_random.iteration, km_diversity.iteration, km_hc.iteration)
aronsky/home-assistant
refs/heads/dev
homeassistant/components/binary_sensor/axis.py
3
""" Support for Axis binary sensors. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/binary_sensor.axis/ """ from datetime import timedelta import logging from homeassistant.components.axis import AxisDeviceEvent from homeassistant.components.binary_sensor import BinarySensorDevice from homeassistant.const import CONF_TRIGGER_TIME from homeassistant.helpers.event import track_point_in_utc_time from homeassistant.util.dt import utcnow DEPENDENCIES = ['axis'] _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Axis binary devices.""" add_entities([AxisBinarySensor(hass, discovery_info)], True) class AxisBinarySensor(AxisDeviceEvent, BinarySensorDevice): """Representation of a binary Axis event.""" def __init__(self, hass, event_config): """Initialize the Axis binary sensor.""" self.hass = hass self._state = False self._delay = event_config[CONF_TRIGGER_TIME] self._timer = None AxisDeviceEvent.__init__(self, event_config) @property def is_on(self): """Return true if event is active.""" return self._state def update(self): """Get the latest data and update the state.""" self._state = self.axis_event.is_tripped def _update_callback(self): """Update the sensor's state, if needed.""" self.update() if self._timer is not None: self._timer() self._timer = None if self._delay > 0 and not self.is_on: # Set timer to wait until updating the state def _delay_update(now): """Timer callback for sensor update.""" _LOGGER.debug("%s called delayed (%s sec) update", self._name, self._delay) self.schedule_update_ha_state() self._timer = None self._timer = track_point_in_utc_time( self.hass, _delay_update, utcnow() + timedelta(seconds=self._delay)) else: self.schedule_update_ha_state()
radiasoft/radtrack
refs/heads/master
radtrack/ui/__init__.py
17
__author__ = 'David'
sgerhart/ansible
refs/heads/maintenance_policy_module
lib/ansible/modules/cloud/amazon/aws_batch_job_definition.py
40
#!/usr/bin/python # Copyright (c) 2017 Jon Meran <jonathan.meran@sonos.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: aws_batch_job_definition short_description: Manage AWS Batch Job Definitions description: - This module allows the management of AWS Batch Job Definitions. It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions. version_added: "2.5" author: Jon Meran (@jonmer85) options: job_definition_arn: description: - The arn for the job definition job_definition_name: description: - The name for the job definition required: true state: description: - Describes the desired state. required: true default: "present" choices: ["present", "absent"] type: description: - The type of job definition required: true parameters: description: - Default parameter substitution placeholders to set in the job definition. Parameters are specified as a key-value pair mapping. Parameters in a SubmitJob request override any corresponding parameter defaults from the job definition. image: description: - The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with `` repository-url /image <colon>tag ``. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of docker run. vcpus: description: - The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. memory: description: - The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. command: description: - The command that is passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd. job_role_arn: description: - The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions. volumes: description: - A list of data volumes used in a job. List of dictionaries. suboptions: host: description: - The contents of the host parameter determine whether your data volume persists on the host container instance and where it is stored. If the host parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data is not guaranteed to persist after the containers associated with it stop running. This is a dictionary with one property, sourcePath - The path on the host container instance that is presented to the container. If this parameter is empty,then the Docker daemon has assigned a host path for you. If the host parameter contains a sourcePath file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the sourcePath value does not exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported. name: description: - The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints. environment: description: - The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run. List of dictionaries. suboptions: name: description: - The name of the key value pair. For environment variables, this is the name of the environment variable. value: description: - The value of the key value pair. For environment variables, this is the value of the environment variable. mount_points: description: - The mount points for data volumes in your container. This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run. List of dictionaries. suboptions: containerPath: description: - The path on the container at which to mount the host volume. readOnly: description: - If this value is true , the container has read-only access to the volume; otherwise, the container can write to the volume. The default value is false. sourceVolume: description: - The name of the volume to mount. readonly_root_filesystem: description: - When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run. privileged: description: - When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run. ulimits: description: - A list of ulimits to set in the container. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run. List of dictionaries. suboptions: hardLimit: description: - The hard limit for the ulimit type. name: description: - The type of the ulimit. softLimit: description: - The soft limit for the ulimit type. user: description: - The user name to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run. attempts: description: - Retry strategy - The number of times to move a job to the RUNNABLE status. You may specify between 1 and 10 attempts. If attempts is greater than one, the job is retried if it fails until it has moved to RUNNABLE that many times. requirements: - boto3 extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' --- - hosts: localhost gather_facts: no vars: state: present tasks: - name: My Batch Job Definition batch_job_definition: job_definition_name: My Batch Job Definition state: present type: container parameters: Param1: Val1 Param2: Val2 image: <Docker Image URL> vcpus: 1 memory: 512 command: - python - run_my_script.py - arg1 job_role_arn: <Job Role ARN> attempts: 3 register: job_definition_create_result - name: show results debug: var=job_definition_create_result ''' RETURN = ''' --- output: description: "returns what action was taken, whether something was changed, invocation and response" returned: always sample: batch_job_definition_action: none changed: false response: job_definition_arn: "arn:aws:batch:...." job_definition_name: <name> status: INACTIVE type: container type: dict ''' from ansible.module_utils._text import to_native from ansible.module_utils.aws.batch import AWSConnection, cc, set_api_params from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3 from ansible.module_utils.ec2 import camel_dict_to_snake_dict import traceback try: from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError except ImportError: pass # Handled by HAS_BOTO3 # --------------------------------------------------------------------------------------------------- # # Helper Functions & classes # # --------------------------------------------------------------------------------------------------- # logger = logging.getLogger() # logging.basicConfig(filename='ansible_debug.log') # logger.setLevel(logging.DEBUG) def validate_params(module, aws): """ Performs basic parameter validation. :param module: :param aws: :return: """ return # --------------------------------------------------------------------------------------------------- # # Batch Job Definition functions # # --------------------------------------------------------------------------------------------------- def get_current_job_definition(module, connection): try: environments = connection.client().describe_job_definitions( jobDefinitionName=module.params['job_definition_name'] ) if len(environments['jobDefinitions']) > 0: latest_revision = max(map(lambda d: d['revision'], environments['jobDefinitions'])) latest_definition = next((x for x in environments['jobDefinitions'] if x['revision'] == latest_revision), None) return latest_definition return None except ClientError: return None def create_job_definition(module, aws): """ Adds a Batch job definition :param module: :param aws: :return: """ client = aws.client('batch') changed = False # set API parameters api_params = set_api_params(module, get_base_params()) container_properties_params = set_api_params(module, get_container_property_params()) retry_strategy_params = set_api_params(module, get_retry_strategy_params()) api_params['retryStrategy'] = retry_strategy_params api_params['containerProperties'] = container_properties_params try: if not module.check_mode: client.register_job_definition(**api_params) changed = True except (ClientError, ParamValidationError, MissingParametersError) as e: module.fail_json(msg='Error registering job definition: {0}'.format(to_native(e)), exception=traceback.format_exc()) return changed def get_retry_strategy_params(): return 'attempts', def get_container_property_params(): return ('image', 'vcpus', 'memory', 'command', 'job_role_arn', 'volumes', 'environment', 'mount_points', 'readonly_root_filesystem', 'privileged', 'ulimits', 'user') def get_base_params(): return 'job_definition_name', 'type', 'parameters' def get_compute_environment_order_list(module): compute_environment_order_list = [] for ceo in module.params['compute_environment_order']: compute_environment_order_list.append(dict(order=ceo['order'], computeEnvironment=ceo['compute_environment'])) return compute_environment_order_list def remove_job_definition(module, aws): """ Remove a Batch job definition :param module: :param aws: :return: """ client = aws.client('batch') changed = False try: if not module.check_mode: client.deregister_job_definition(jobDefinition=module.params['job_definition_arn']) changed = True except (ClientError, ParamValidationError, MissingParametersError) as e: module.fail_json(msg='Error removing job definition: {0}'.format(to_native(e)), exception=traceback.format_exc()) return changed def job_definition_equal(module, current_definition): equal = True for param in get_base_params(): if module.params.get(param) != current_definition.get(cc(param)): equal = False break for param in get_container_property_params(): if module.params.get(param) != current_definition.get('containerProperties').get(cc(param)): equal = False break for param in get_retry_strategy_params(): if module.params.get(param) != current_definition.get('retryStrategy').get(cc(param)): equal = False break return equal def manage_state(module, aws): changed = False current_state = 'absent' state = module.params['state'] job_definition_name = module.params['job_definition_name'] action_taken = 'none' response = None check_mode = module.check_mode # check if the job definition exists current_job_definition = get_current_job_definition(module, aws) if current_job_definition: current_state = 'present' if state == 'present': if current_state == 'present': # check if definition has changed and register a new version if necessary if not job_definition_equal(module, current_job_definition): create_job_definition(module, aws) action_taken = 'updated with new version' changed = True else: # Create Job definition changed = create_job_definition(module, aws) action_taken = 'added' response = get_current_job_definition(module, aws) if not response: module.fail_json(msg='Unable to get job definition information after creating/updating') else: if current_state == 'present': # remove the Job definition changed = remove_job_definition(module, aws) action_taken = 'deregistered' return dict(changed=changed, batch_job_definition_action=action_taken, response=response) # --------------------------------------------------------------------------------------------------- # # MAIN # # --------------------------------------------------------------------------------------------------- def main(): """ Main entry point. :return dict: ansible facts """ argument_spec = ec2_argument_spec() argument_spec.update( dict( state=dict(required=False, default='present', choices=['present', 'absent']), job_definition_name=dict(required=True), job_definition_arn=dict(), type=dict(required=True), parameters=dict(type='dict'), image=dict(required=True), vcpus=dict(type='int', required=True), memory=dict(type='int', required=True), command=dict(type='list', default=[]), job_role_arn=dict(), volumes=dict(type='list', default=[]), environment=dict(type='list', default=[]), mount_points=dict(type='list', default=[]), readonly_root_filesystem=dict(), privileged=dict(), ulimits=dict(type='list', default=[]), user=dict(), attempts=dict(type='int'), region=dict(aliases=['aws_region', 'ec2_region']) ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True ) # validate dependencies if not HAS_BOTO3: module.fail_json(msg='boto3 is required for this module.') aws = AWSConnection(module, ['batch']) validate_params(module, aws) results = manage_state(module, aws) module.exit_json(**camel_dict_to_snake_dict(results)) if __name__ == '__main__': main()
firerszd/kbengine
refs/heads/master
kbe/src/lib/python/Tools/clinic/clinic.py
30
#!/usr/bin/env python3 # # Argument Clinic # Copyright 2012-2013 by Larry Hastings. # Licensed to the PSF under a contributor agreement. # import abc import ast import atexit import collections import contextlib import copy import cpp import functools import hashlib import inspect import io import itertools import os import pprint import re import shlex import string import sys import tempfile import textwrap import traceback import uuid # TODO: # # soon: # # * allow mixing any two of {positional-only, positional-or-keyword, # keyword-only} # * dict constructor uses positional-only and keyword-only # * max and min use positional only with an optional group # and keyword-only # version = '1' _empty = inspect._empty _void = inspect._void NoneType = type(None) class Unspecified: def __repr__(self): return '<Unspecified>' unspecified = Unspecified() class Null: def __repr__(self): return '<Null>' NULL = Null() class Unknown: def __repr__(self): return '<Unknown>' unknown = Unknown() def _text_accumulator(): text = [] def output(): s = ''.join(text) text.clear() return s return text, text.append, output def text_accumulator(): """ Creates a simple text accumulator / joiner. Returns a pair of callables: append, output "append" appends a string to the accumulator. "output" returns the contents of the accumulator joined together (''.join(accumulator)) and empties the accumulator. """ text, append, output = _text_accumulator() return append, output def warn_or_fail(fail=False, *args, filename=None, line_number=None): joined = " ".join([str(a) for a in args]) add, output = text_accumulator() if fail: add("Error") else: add("Warning") if clinic: if filename is None: filename = clinic.filename if getattr(clinic, 'block_parser', None) and (line_number is None): line_number = clinic.block_parser.line_number if filename is not None: add(' in file "' + filename + '"') if line_number is not None: add(" on line " + str(line_number)) add(':\n') add(joined) print(output()) if fail: sys.exit(-1) def warn(*args, filename=None, line_number=None): return warn_or_fail(False, *args, filename=filename, line_number=line_number) def fail(*args, filename=None, line_number=None): return warn_or_fail(True, *args, filename=filename, line_number=line_number) def quoted_for_c_string(s): for old, new in ( ('\\', '\\\\'), # must be first! ('"', '\\"'), ("'", "\\'"), ): s = s.replace(old, new) return s def c_repr(s): return '"' + s + '"' is_legal_c_identifier = re.compile('^[A-Za-z_][A-Za-z0-9_]*$').match def is_legal_py_identifier(s): return all(is_legal_c_identifier(field) for field in s.split('.')) # identifiers that are okay in Python but aren't a good idea in C. # so if they're used Argument Clinic will add "_value" to the end # of the name in C. c_keywords = set(""" asm auto break case char const continue default do double else enum extern float for goto if inline int long register return short signed sizeof static struct switch typedef typeof union unsigned void volatile while """.strip().split()) def ensure_legal_c_identifier(s): # for now, just complain if what we're given isn't legal if not is_legal_c_identifier(s): fail("Illegal C identifier: {}".format(s)) # but if we picked a C keyword, pick something else if s in c_keywords: return s + "_value" return s def rstrip_lines(s): text, add, output = _text_accumulator() for line in s.split('\n'): add(line.rstrip()) add('\n') text.pop() return output() def linear_format(s, **kwargs): """ Perform str.format-like substitution, except: * The strings substituted must be on lines by themselves. (This line is the "source line".) * If the substitution text is empty, the source line is removed in the output. * If the field is not recognized, the original line is passed unmodified through to the output. * If the substitution text is not empty: * Each line of the substituted text is indented by the indent of the source line. * A newline will be added to the end. """ add, output = text_accumulator() for line in s.split('\n'): indent, curly, trailing = line.partition('{') if not curly: add(line) add('\n') continue name, curl, trailing = trailing.partition('}') if not curly or name not in kwargs: add(line) add('\n') continue if trailing: fail("Text found after {" + name + "} block marker! It must be on a line by itself.") if indent.strip(): fail("Non-whitespace characters found before {" + name + "} block marker! It must be on a line by itself.") value = kwargs[name] if not value: continue value = textwrap.indent(rstrip_lines(value), indent) add(value) add('\n') return output()[:-1] def indent_all_lines(s, prefix): """ Returns 's', with 'prefix' prepended to all lines. If the last line is empty, prefix is not prepended to it. (If s is blank, returns s unchanged.) (textwrap.indent only adds to non-blank lines.) """ split = s.split('\n') last = split.pop() final = [] for line in split: final.append(prefix) final.append(line) final.append('\n') if last: final.append(prefix) final.append(last) return ''.join(final) def suffix_all_lines(s, suffix): """ Returns 's', with 'suffix' appended to all lines. If the last line is empty, suffix is not appended to it. (If s is blank, returns s unchanged.) """ split = s.split('\n') last = split.pop() final = [] for line in split: final.append(line) final.append(suffix) final.append('\n') if last: final.append(last) final.append(suffix) return ''.join(final) def version_splitter(s): """Splits a version string into a tuple of integers. The following ASCII characters are allowed, and employ the following conversions: a -> -3 b -> -2 c -> -1 (This permits Python-style version strings such as "1.4b3".) """ version = [] accumulator = [] def flush(): if not accumulator: raise ValueError('Unsupported version string: ' + repr(s)) version.append(int(''.join(accumulator))) accumulator.clear() for c in s: if c.isdigit(): accumulator.append(c) elif c == '.': flush() elif c in 'abc': flush() version.append('abc'.index(c) - 3) else: raise ValueError('Illegal character ' + repr(c) + ' in version string ' + repr(s)) flush() return tuple(version) def version_comparitor(version1, version2): iterator = itertools.zip_longest(version_splitter(version1), version_splitter(version2), fillvalue=0) for i, (a, b) in enumerate(iterator): if a < b: return -1 if a > b: return 1 return 0 class CRenderData: def __init__(self): # The C statements to declare variables. # Should be full lines with \n eol characters. self.declarations = [] # The C statements required to initialize the variables before the parse call. # Should be full lines with \n eol characters. self.initializers = [] # The C statements needed to dynamically modify the values # parsed by the parse call, before calling the impl. self.modifications = [] # The entries for the "keywords" array for PyArg_ParseTuple. # Should be individual strings representing the names. self.keywords = [] # The "format units" for PyArg_ParseTuple. # Should be individual strings that will get self.format_units = [] # The varargs arguments for PyArg_ParseTuple. self.parse_arguments = [] # The parameter declarations for the impl function. self.impl_parameters = [] # The arguments to the impl function at the time it's called. self.impl_arguments = [] # For return converters: the name of the variable that # should receive the value returned by the impl. self.return_value = "return_value" # For return converters: the code to convert the return # value from the parse function. This is also where # you should check the _return_value for errors, and # "goto exit" if there are any. self.return_conversion = [] # The C statements required to clean up after the impl call. self.cleanup = [] class FormatCounterFormatter(string.Formatter): """ This counts how many instances of each formatter "replacement string" appear in the format string. e.g. after evaluating "string {a}, {b}, {c}, {a}" the counts dict would now look like {'a': 2, 'b': 1, 'c': 1} """ def __init__(self): self.counts = collections.Counter() def get_value(self, key, args, kwargs): self.counts[key] += 1 return '' class Language(metaclass=abc.ABCMeta): start_line = "" body_prefix = "" stop_line = "" checksum_line = "" def __init__(self, filename): pass @abc.abstractmethod def render(self, clinic, signatures): pass def parse_line(self, line): pass def validate(self): def assert_only_one(attr, *additional_fields): """ Ensures that the string found at getattr(self, attr) contains exactly one formatter replacement string for each valid field. The list of valid fields is ['dsl_name'] extended by additional_fields. e.g. self.fmt = "{dsl_name} {a} {b}" # this passes self.assert_only_one('fmt', 'a', 'b') # this fails, the format string has a {b} in it self.assert_only_one('fmt', 'a') # this fails, the format string doesn't have a {c} in it self.assert_only_one('fmt', 'a', 'b', 'c') # this fails, the format string has two {a}s in it, # it must contain exactly one self.fmt2 = '{dsl_name} {a} {a}' self.assert_only_one('fmt2', 'a') """ fields = ['dsl_name'] fields.extend(additional_fields) line = getattr(self, attr) fcf = FormatCounterFormatter() fcf.format(line) def local_fail(should_be_there_but_isnt): if should_be_there_but_isnt: fail("{} {} must contain {{{}}} exactly once!".format( self.__class__.__name__, attr, name)) else: fail("{} {} must not contain {{{}}}!".format( self.__class__.__name__, attr, name)) for name, count in fcf.counts.items(): if name in fields: if count > 1: local_fail(True) else: local_fail(False) for name in fields: if fcf.counts.get(name) != 1: local_fail(True) assert_only_one('start_line') assert_only_one('stop_line') field = "arguments" if "{arguments}" in self.checksum_line else "checksum" assert_only_one('checksum_line', field) class PythonLanguage(Language): language = 'Python' start_line = "#/*[{dsl_name} input]" body_prefix = "#" stop_line = "#[{dsl_name} start generated code]*/" checksum_line = "#/*[{dsl_name} end generated code: {arguments}]*/" def permute_left_option_groups(l): """ Given [1, 2, 3], should yield: () (3,) (2, 3) (1, 2, 3) """ yield tuple() accumulator = [] for group in reversed(l): accumulator = list(group) + accumulator yield tuple(accumulator) def permute_right_option_groups(l): """ Given [1, 2, 3], should yield: () (1,) (1, 2) (1, 2, 3) """ yield tuple() accumulator = [] for group in l: accumulator.extend(group) yield tuple(accumulator) def permute_optional_groups(left, required, right): """ Generator function that computes the set of acceptable argument lists for the provided iterables of argument groups. (Actually it generates a tuple of tuples.) Algorithm: prefer left options over right options. If required is empty, left must also be empty. """ required = tuple(required) result = [] if not required: assert not left accumulator = [] counts = set() for r in permute_right_option_groups(right): for l in permute_left_option_groups(left): t = l + required + r if len(t) in counts: continue counts.add(len(t)) accumulator.append(t) accumulator.sort(key=len) return tuple(accumulator) def strip_leading_and_trailing_blank_lines(s): lines = s.rstrip().split('\n') while lines: line = lines[0] if line.strip(): break del lines[0] return '\n'.join(lines) @functools.lru_cache() def normalize_snippet(s, *, indent=0): """ Reformats s: * removes leading and trailing blank lines * ensures that it does not end with a newline * dedents so the first nonwhite character on any line is at column "indent" """ s = strip_leading_and_trailing_blank_lines(s) s = textwrap.dedent(s) if indent: s = textwrap.indent(s, ' ' * indent) return s class CLanguage(Language): body_prefix = "#" language = 'C' start_line = "/*[{dsl_name} input]" body_prefix = "" stop_line = "[{dsl_name} start generated code]*/" checksum_line = "/*[{dsl_name} end generated code: {arguments}]*/" def __init__(self, filename): super().__init__(filename) self.cpp = cpp.Monitor(filename) self.cpp.fail = fail def parse_line(self, line): self.cpp.writeline(line) def render(self, clinic, signatures): function = None for o in signatures: if isinstance(o, Function): if function: fail("You may specify at most one function per block.\nFound a block containing at least two:\n\t" + repr(function) + " and " + repr(o)) function = o return self.render_function(clinic, function) def docstring_for_c_string(self, f): text, add, output = _text_accumulator() # turn docstring into a properly quoted C string for line in f.docstring.split('\n'): add('"') add(quoted_for_c_string(line)) add('\\n"\n') text.pop() add('"') return ''.join(text) def output_templates(self, f): parameters = list(f.parameters.values()) assert parameters assert isinstance(parameters[0].converter, self_converter) del parameters[0] converters = [p.converter for p in parameters] has_option_groups = parameters and (parameters[0].group or parameters[-1].group) default_return_converter = (not f.return_converter or f.return_converter.type == 'PyObject *') positional = parameters and (parameters[-1].kind == inspect.Parameter.POSITIONAL_ONLY) all_boring_objects = False # yes, this will be false if there are 0 parameters, it's fine first_optional = len(parameters) for i, p in enumerate(parameters): c = p.converter if type(c) != object_converter: break if c.format_unit != 'O': break if p.default is not unspecified: first_optional = min(first_optional, i) else: all_boring_objects = True new_or_init = f.kind in (METHOD_NEW, METHOD_INIT) meth_o = (len(parameters) == 1 and parameters[0].kind == inspect.Parameter.POSITIONAL_ONLY and not converters[0].is_optional() and isinstance(converters[0], object_converter) and converters[0].format_unit == 'O' and not new_or_init) # we have to set these things before we're done: # # docstring_prototype # docstring_definition # impl_prototype # methoddef_define # parser_prototype # parser_definition # impl_definition # cpp_if # cpp_endif # methoddef_ifndef return_value_declaration = "PyObject *return_value = NULL;" methoddef_define = normalize_snippet(""" #define {methoddef_name} \\ {{"{name}", (PyCFunction){c_basename}, {methoddef_flags}, {c_basename}__doc__}}, """) if new_or_init and not f.docstring: docstring_prototype = docstring_definition = '' else: docstring_prototype = normalize_snippet(""" PyDoc_VAR({c_basename}__doc__); """) docstring_definition = normalize_snippet(""" PyDoc_STRVAR({c_basename}__doc__, {docstring}); """) impl_definition = normalize_snippet(""" static {impl_return_type} {c_basename}_impl({impl_parameters}) """) impl_prototype = parser_prototype = parser_definition = None parser_prototype_keyword = normalize_snippet(""" static PyObject * {c_basename}({self_type}{self_name}, PyObject *args, PyObject *kwargs) """) parser_prototype_varargs = normalize_snippet(""" static PyObject * {c_basename}({self_type}{self_name}, PyObject *args) """) # parser_body_fields remembers the fields passed in to the # previous call to parser_body. this is used for an awful hack. parser_body_fields = () def parser_body(prototype, *fields): nonlocal parser_body_fields add, output = text_accumulator() add(prototype) parser_body_fields = fields fields = list(fields) fields.insert(0, normalize_snippet(""" {{ {return_value_declaration} {declarations} {initializers} """) + "\n") # just imagine--your code is here in the middle fields.append(normalize_snippet(""" {modifications} {return_value} = {c_basename}_impl({impl_arguments}); {return_conversion} {exit_label} {cleanup} return return_value; }} """)) for field in fields: add('\n') add(field) return output() def insert_keywords(s): return linear_format(s, declarations="static char *_keywords[] = {{{keywords}, NULL}};\n{declarations}") if not parameters: # no parameters, METH_NOARGS flags = "METH_NOARGS" parser_prototype = normalize_snippet(""" static PyObject * {c_basename}({self_type}{self_name}, PyObject *Py_UNUSED(ignored)) """) parser_definition = parser_prototype if default_return_converter: parser_definition = parser_prototype + '\n' + normalize_snippet(""" {{ return {c_basename}_impl({impl_arguments}); }} """) else: parser_definition = parser_body(parser_prototype) elif meth_o: flags = "METH_O" meth_o_prototype = normalize_snippet(""" static PyObject * {c_basename}({impl_parameters}) """) if default_return_converter: # maps perfectly to METH_O, doesn't need a return converter. # so we skip making a parse function # and call directly into the impl function. impl_prototype = parser_prototype = parser_definition = '' impl_definition = meth_o_prototype else: # SLIGHT HACK # use impl_parameters for the parser here! parser_prototype = meth_o_prototype parser_definition = parser_body(parser_prototype) elif has_option_groups: # positional parameters with option groups # (we have to generate lots of PyArg_ParseTuple calls # in a big switch statement) flags = "METH_VARARGS" parser_prototype = parser_prototype_varargs parser_definition = parser_body(parser_prototype, ' {option_group_parsing}') elif positional and all_boring_objects: # positional-only, but no option groups, # and nothing but normal objects: # PyArg_UnpackTuple! flags = "METH_VARARGS" parser_prototype = parser_prototype_varargs parser_definition = parser_body(parser_prototype, normalize_snippet(""" if (!PyArg_UnpackTuple(args, "{name}", {unpack_min}, {unpack_max}, {parse_arguments})) goto exit; """, indent=4)) elif positional: # positional-only, but no option groups # we only need one call to PyArg_ParseTuple flags = "METH_VARARGS" parser_prototype = parser_prototype_varargs parser_definition = parser_body(parser_prototype, normalize_snippet(""" if (!PyArg_ParseTuple(args, "{format_units}:{name}", {parse_arguments})) goto exit; """, indent=4)) else: # positional-or-keyword arguments flags = "METH_VARARGS|METH_KEYWORDS" parser_prototype = parser_prototype_keyword body = normalize_snippet(""" if (!PyArg_ParseTupleAndKeywords(args, kwargs, "{format_units}:{name}", _keywords, {parse_arguments})) goto exit; """, indent=4) parser_definition = parser_body(parser_prototype, normalize_snippet(""" if (!PyArg_ParseTupleAndKeywords(args, kwargs, "{format_units}:{name}", _keywords, {parse_arguments})) goto exit; """, indent=4)) parser_definition = insert_keywords(parser_definition) if new_or_init: methoddef_define = '' if f.kind == METHOD_NEW: parser_prototype = parser_prototype_keyword else: return_value_declaration = "int return_value = -1;" parser_prototype = normalize_snippet(""" static int {c_basename}({self_type}{self_name}, PyObject *args, PyObject *kwargs) """) fields = list(parser_body_fields) parses_positional = 'METH_NOARGS' not in flags parses_keywords = 'METH_KEYWORDS' in flags if parses_keywords: assert parses_positional if not parses_keywords: fields.insert(0, normalize_snippet(""" if ({self_type_check}!_PyArg_NoKeywords("{name}", kwargs)) goto exit; """, indent=4)) if not parses_positional: fields.insert(0, normalize_snippet(""" if ({self_type_check}!_PyArg_NoPositional("{name}", args)) goto exit; """, indent=4)) parser_definition = parser_body(parser_prototype, *fields) if parses_keywords: parser_definition = insert_keywords(parser_definition) if f.methoddef_flags: flags += '|' + f.methoddef_flags methoddef_define = methoddef_define.replace('{methoddef_flags}', flags) methoddef_ifndef = '' conditional = self.cpp.condition() if not conditional: cpp_if = cpp_endif = '' else: cpp_if = "#if " + conditional cpp_endif = "#endif /* " + conditional + " */" if methoddef_define: methoddef_ifndef = normalize_snippet(""" #ifndef {methoddef_name} #define {methoddef_name} #endif /* !defined({methoddef_name}) */ """) # add ';' to the end of parser_prototype and impl_prototype # (they mustn't be None, but they could be an empty string.) assert parser_prototype is not None if parser_prototype: assert not parser_prototype.endswith(';') parser_prototype += ';' if impl_prototype is None: impl_prototype = impl_definition if impl_prototype: impl_prototype += ";" parser_definition = parser_definition.replace("{return_value_declaration}", return_value_declaration) d = { "docstring_prototype" : docstring_prototype, "docstring_definition" : docstring_definition, "impl_prototype" : impl_prototype, "methoddef_define" : methoddef_define, "parser_prototype" : parser_prototype, "parser_definition" : parser_definition, "impl_definition" : impl_definition, "cpp_if" : cpp_if, "cpp_endif" : cpp_endif, "methoddef_ifndef" : methoddef_ifndef, } # make sure we didn't forget to assign something, # and wrap each non-empty value in \n's d2 = {} for name, value in d.items(): assert value is not None, "got a None value for template " + repr(name) if value: value = '\n' + value + '\n' d2[name] = value return d2 @staticmethod def group_to_variable_name(group): adjective = "left_" if group < 0 else "right_" return "group_" + adjective + str(abs(group)) def render_option_group_parsing(self, f, template_dict): # positional only, grouped, optional arguments! # can be optional on the left or right. # here's an example: # # [ [ [ A1 A2 ] B1 B2 B3 ] C1 C2 ] D1 D2 D3 [ E1 E2 E3 [ F1 F2 F3 ] ] # # Here group D are required, and all other groups are optional. # (Group D's "group" is actually None.) # We can figure out which sets of arguments we have based on # how many arguments are in the tuple. # # Note that you need to count up on both sides. For example, # you could have groups C+D, or C+D+E, or C+D+E+F. # # What if the number of arguments leads us to an ambiguous result? # Clinic prefers groups on the left. So in the above example, # five arguments would map to B+C, not C+D. add, output = text_accumulator() parameters = list(f.parameters.values()) if isinstance(parameters[0].converter, self_converter): del parameters[0] groups = [] group = None left = [] right = [] required = [] last = unspecified for p in parameters: group_id = p.group if group_id != last: last = group_id group = [] if group_id < 0: left.append(group) elif group_id == 0: group = required else: right.append(group) group.append(p) count_min = sys.maxsize count_max = -1 add("switch (PyTuple_GET_SIZE(args)) {{\n") for subset in permute_optional_groups(left, required, right): count = len(subset) count_min = min(count_min, count) count_max = max(count_max, count) if count == 0: add(""" case 0: break; """) continue group_ids = {p.group for p in subset} # eliminate duplicates d = {} d['count'] = count d['name'] = f.name d['groups'] = sorted(group_ids) d['format_units'] = "".join(p.converter.format_unit for p in subset) parse_arguments = [] for p in subset: p.converter.parse_argument(parse_arguments) d['parse_arguments'] = ", ".join(parse_arguments) group_ids.discard(0) lines = [self.group_to_variable_name(g) + " = 1;" for g in group_ids] lines = "\n".join(lines) s = """ case {count}: if (!PyArg_ParseTuple(args, "{format_units}:{name}", {parse_arguments})) goto exit; {group_booleans} break; """[1:] s = linear_format(s, group_booleans=lines) s = s.format_map(d) add(s) add(" default:\n") s = ' PyErr_SetString(PyExc_TypeError, "{} requires {} to {} arguments");\n' add(s.format(f.full_name, count_min, count_max)) add(' goto exit;\n') add("}}") template_dict['option_group_parsing'] = output() def render_function(self, clinic, f): if not f: return "" add, output = text_accumulator() data = CRenderData() assert f.parameters, "We should always have a 'self' at this point!" parameters = f.render_parameters converters = [p.converter for p in parameters] templates = self.output_templates(f) f_self = parameters[0] selfless = parameters[1:] assert isinstance(f_self.converter, self_converter), "No self parameter in " + repr(f.full_name) + "!" last_group = 0 first_optional = len(selfless) positional = selfless and selfless[-1].kind == inspect.Parameter.POSITIONAL_ONLY new_or_init = f.kind in (METHOD_NEW, METHOD_INIT) default_return_converter = (not f.return_converter or f.return_converter.type == 'PyObject *') has_option_groups = False # offset i by -1 because first_optional needs to ignore self for i, p in enumerate(parameters, -1): c = p.converter if (i != -1) and (p.default is not unspecified): first_optional = min(first_optional, i) # insert group variable group = p.group if last_group != group: last_group = group if group: group_name = self.group_to_variable_name(group) data.impl_arguments.append(group_name) data.declarations.append("int " + group_name + " = 0;") data.impl_parameters.append("int " + group_name) has_option_groups = True c.render(p, data) if has_option_groups and (not positional): fail("You cannot use optional groups ('[' and ']')\nunless all parameters are positional-only ('/').") # HACK # when we're METH_O, but have a custom return converter, # we use "impl_parameters" for the parsing function # because that works better. but that means we must # supress actually declaring the impl's parameters # as variables in the parsing function. but since it's # METH_O, we have exactly one anyway, so we know exactly # where it is. if ("METH_O" in templates['methoddef_define'] and not default_return_converter): data.declarations.pop(0) template_dict = {} full_name = f.full_name template_dict['full_name'] = full_name if new_or_init: name = f.cls.name else: name = f.name template_dict['name'] = name if f.c_basename: c_basename = f.c_basename else: fields = full_name.split(".") if fields[-1] == '__new__': fields.pop() c_basename = "_".join(fields) template_dict['c_basename'] = c_basename methoddef_name = "{}_METHODDEF".format(c_basename.upper()) template_dict['methoddef_name'] = methoddef_name template_dict['docstring'] = self.docstring_for_c_string(f) template_dict['self_name'] = template_dict['self_type'] = template_dict['self_type_check'] = '' f_self.converter.set_template_dict(template_dict) f.return_converter.render(f, data) template_dict['impl_return_type'] = f.return_converter.type template_dict['declarations'] = "\n".join(data.declarations) template_dict['initializers'] = "\n\n".join(data.initializers) template_dict['modifications'] = '\n\n'.join(data.modifications) template_dict['keywords'] = '"' + '", "'.join(data.keywords) + '"' template_dict['format_units'] = ''.join(data.format_units) template_dict['parse_arguments'] = ', '.join(data.parse_arguments) template_dict['impl_parameters'] = ", ".join(data.impl_parameters) template_dict['impl_arguments'] = ", ".join(data.impl_arguments) template_dict['return_conversion'] = "".join(data.return_conversion).rstrip() template_dict['cleanup'] = "".join(data.cleanup) template_dict['return_value'] = data.return_value # used by unpack tuple code generator ignore_self = -1 if isinstance(converters[0], self_converter) else 0 unpack_min = first_optional unpack_max = len(selfless) template_dict['unpack_min'] = str(unpack_min) template_dict['unpack_max'] = str(unpack_max) if has_option_groups: self.render_option_group_parsing(f, template_dict) for name, destination in clinic.field_destinations.items(): template = templates[name] if has_option_groups: template = linear_format(template, option_group_parsing=template_dict['option_group_parsing']) template = linear_format(template, declarations=template_dict['declarations'], return_conversion=template_dict['return_conversion'], initializers=template_dict['initializers'], modifications=template_dict['modifications'], cleanup=template_dict['cleanup'], ) # Only generate the "exit:" label # if we have any gotos need_exit_label = "goto exit;" in template template = linear_format(template, exit_label="exit:" if need_exit_label else '' ) s = template.format_map(template_dict) if clinic.line_prefix: s = indent_all_lines(s, clinic.line_prefix) if clinic.line_suffix: s = suffix_all_lines(s, clinic.line_suffix) destination.append(s) return clinic.get_destination('block').dump() @contextlib.contextmanager def OverrideStdioWith(stdout): saved_stdout = sys.stdout sys.stdout = stdout try: yield finally: assert sys.stdout is stdout sys.stdout = saved_stdout def create_regex(before, after, word=True, whole_line=True): """Create an re object for matching marker lines.""" group_re = "\w+" if word else ".+" pattern = r'{}({}){}' if whole_line: pattern = '^' + pattern + '$' pattern = pattern.format(re.escape(before), group_re, re.escape(after)) return re.compile(pattern) class Block: r""" Represents a single block of text embedded in another file. If dsl_name is None, the block represents verbatim text, raw original text from the file, in which case "input" will be the only non-false member. If dsl_name is not None, the block represents a Clinic block. input is always str, with embedded \n characters. input represents the original text from the file; if it's a Clinic block, it is the original text with the body_prefix and redundant leading whitespace removed. dsl_name is either str or None. If str, it's the text found on the start line of the block between the square brackets. signatures is either list or None. If it's a list, it may only contain clinic.Module, clinic.Class, and clinic.Function objects. At the moment it should contain at most one of each. output is either str or None. If str, it's the output from this block, with embedded '\n' characters. indent is either str or None. It's the leading whitespace that was found on every line of input. (If body_prefix is not empty, this is the indent *after* removing the body_prefix.) preindent is either str or None. It's the whitespace that was found in front of every line of input *before* the "body_prefix" (see the Language object). If body_prefix is empty, preindent must always be empty too. To illustrate indent and preindent: Assume that '_' represents whitespace. If the block processed was in a Python file, and looked like this: ____#/*[python] ____#__for a in range(20): ____#____print(a) ____#[python]*/ "preindent" would be "____" and "indent" would be "__". """ def __init__(self, input, dsl_name=None, signatures=None, output=None, indent='', preindent=''): assert isinstance(input, str) self.input = input self.dsl_name = dsl_name self.signatures = signatures or [] self.output = output self.indent = indent self.preindent = preindent def __repr__(self): dsl_name = self.dsl_name or "text" def summarize(s): s = repr(s) if len(s) > 30: return s[:26] + "..." + s[0] return s return "".join(( "<Block ", dsl_name, " input=", summarize(self.input), " output=", summarize(self.output), ">")) class BlockParser: """ Block-oriented parser for Argument Clinic. Iterator, yields Block objects. """ def __init__(self, input, language, *, verify=True): """ "input" should be a str object with embedded \n characters. "language" should be a Language object. """ language.validate() self.input = collections.deque(reversed(input.splitlines(keepends=True))) self.block_start_line_number = self.line_number = 0 self.language = language before, _, after = language.start_line.partition('{dsl_name}') assert _ == '{dsl_name}' self.find_start_re = create_regex(before, after, whole_line=False) self.start_re = create_regex(before, after) self.verify = verify self.last_checksum_re = None self.last_dsl_name = None self.dsl_name = None self.first_block = True def __iter__(self): return self def __next__(self): while True: if not self.input: raise StopIteration if self.dsl_name: return_value = self.parse_clinic_block(self.dsl_name) self.dsl_name = None self.first_block = False return return_value block = self.parse_verbatim_block() if self.first_block and not block.input: continue self.first_block = False return block def is_start_line(self, line): match = self.start_re.match(line.lstrip()) return match.group(1) if match else None def _line(self): self.line_number += 1 line = self.input.pop() self.language.parse_line(line) return line def parse_verbatim_block(self): add, output = text_accumulator() self.block_start_line_number = self.line_number while self.input: line = self._line() dsl_name = self.is_start_line(line) if dsl_name: self.dsl_name = dsl_name break add(line) return Block(output()) def parse_clinic_block(self, dsl_name): input_add, input_output = text_accumulator() self.block_start_line_number = self.line_number + 1 stop_line = self.language.stop_line.format(dsl_name=dsl_name) body_prefix = self.language.body_prefix.format(dsl_name=dsl_name) def is_stop_line(line): # make sure to recognize stop line even if it # doesn't end with EOL (it could be the very end of the file) if not line.startswith(stop_line): return False remainder = line[len(stop_line):] return (not remainder) or remainder.isspace() # consume body of program while self.input: line = self._line() if is_stop_line(line) or self.is_start_line(line): break if body_prefix: line = line.lstrip() assert line.startswith(body_prefix) line = line[len(body_prefix):] input_add(line) # consume output and checksum line, if present. if self.last_dsl_name == dsl_name: checksum_re = self.last_checksum_re else: before, _, after = self.language.checksum_line.format(dsl_name=dsl_name, arguments='{arguments}').partition('{arguments}') assert _ == '{arguments}' checksum_re = create_regex(before, after, word=False) self.last_dsl_name = dsl_name self.last_checksum_re = checksum_re # scan forward for checksum line output_add, output_output = text_accumulator() arguments = None while self.input: line = self._line() match = checksum_re.match(line.lstrip()) arguments = match.group(1) if match else None if arguments: break output_add(line) if self.is_start_line(line): break output = output_output() if arguments: d = {} for field in shlex.split(arguments): name, equals, value = field.partition('=') if not equals: fail("Mangled Argument Clinic marker line: {!r}".format(line)) d[name.strip()] = value.strip() if self.verify: if 'input' in d: checksum = d['output'] input_checksum = d['input'] else: checksum = d['checksum'] input_checksum = None computed = compute_checksum(output, len(checksum)) if checksum != computed: fail("Checksum mismatch!\nExpected: {}\nComputed: {}\n" "Suggested fix: remove all generated code including " "the end marker,\n" "or use the '-f' option." .format(checksum, computed)) else: # put back output output_lines = output.splitlines(keepends=True) self.line_number -= len(output_lines) self.input.extend(reversed(output_lines)) output = None return Block(input_output(), dsl_name, output=output) class BlockPrinter: def __init__(self, language, f=None): self.language = language self.f = f or io.StringIO() def print_block(self, block): input = block.input output = block.output dsl_name = block.dsl_name write = self.f.write assert not ((dsl_name == None) ^ (output == None)), "you must specify dsl_name and output together, dsl_name " + repr(dsl_name) if not dsl_name: write(input) return write(self.language.start_line.format(dsl_name=dsl_name)) write("\n") body_prefix = self.language.body_prefix.format(dsl_name=dsl_name) if not body_prefix: write(input) else: for line in input.split('\n'): write(body_prefix) write(line) write("\n") write(self.language.stop_line.format(dsl_name=dsl_name)) write("\n") input = ''.join(block.input) output = ''.join(block.output) if output: if not output.endswith('\n'): output += '\n' write(output) arguments="output={} input={}".format(compute_checksum(output, 16), compute_checksum(input, 16)) write(self.language.checksum_line.format(dsl_name=dsl_name, arguments=arguments)) write("\n") def write(self, text): self.f.write(text) class Destination: def __init__(self, name, type, clinic, *args): self.name = name self.type = type self.clinic = clinic valid_types = ('buffer', 'file', 'suppress', 'two-pass') if type not in valid_types: fail("Invalid destination type " + repr(type) + " for " + name + " , must be " + ', '.join(valid_types)) extra_arguments = 1 if type == "file" else 0 if len(args) < extra_arguments: fail("Not enough arguments for destination " + name + " new " + type) if len(args) > extra_arguments: fail("Too many arguments for destination " + name + " new " + type) if type =='file': d = {} filename = clinic.filename d['path'] = filename dirname, basename = os.path.split(filename) if not dirname: dirname = '.' d['dirname'] = dirname d['basename'] = basename d['basename_root'], d['basename_extension'] = os.path.splitext(filename) self.filename = args[0].format_map(d) if type == 'two-pass': self.id = None self.text, self.append, self._dump = _text_accumulator() def __repr__(self): if self.type == 'file': file_repr = " " + repr(self.filename) else: file_repr = '' return "".join(("<Destination ", self.name, " ", self.type, file_repr, ">")) def clear(self): if self.type != 'buffer': fail("Can't clear destination" + self.name + " , it's not of type buffer") self.text.clear() def dump(self): if self.type == 'two-pass': if self.id is None: self.id = str(uuid.uuid4()) return self.id fail("You can only dump a two-pass buffer exactly once!") return self._dump() # maps strings to Language objects. # "languages" maps the name of the language ("C", "Python"). # "extensions" maps the file extension ("c", "py"). languages = { 'C': CLanguage, 'Python': PythonLanguage } extensions = { name: CLanguage for name in "c cc cpp cxx h hh hpp hxx".split() } extensions['py'] = PythonLanguage # maps strings to callables. # these callables must be of the form: # def foo(name, default, *, ...) # The callable may have any number of keyword-only parameters. # The callable must return a CConverter object. # The callable should not call builtins.print. converters = {} # maps strings to callables. # these callables follow the same rules as those for "converters" above. # note however that they will never be called with keyword-only parameters. legacy_converters = {} # maps strings to callables. # these callables must be of the form: # def foo(*, ...) # The callable may have any number of keyword-only parameters. # The callable must return a CConverter object. # The callable should not call builtins.print. return_converters = {} clinic = None class Clinic: presets_text = """ preset block everything block docstring_prototype suppress parser_prototype suppress cpp_if suppress cpp_endif suppress methoddef_ifndef buffer preset original everything block docstring_prototype suppress parser_prototype suppress cpp_if suppress cpp_endif suppress methoddef_ifndef buffer preset file everything file docstring_prototype suppress parser_prototype suppress impl_definition block preset buffer everything buffer docstring_prototype suppress impl_prototype suppress parser_prototype suppress impl_definition block preset partial-buffer everything buffer docstring_prototype block impl_prototype suppress methoddef_define block parser_prototype block impl_definition block preset two-pass everything buffer docstring_prototype two-pass impl_prototype suppress methoddef_define two-pass parser_prototype two-pass impl_definition block """ def __init__(self, language, printer=None, *, force=False, verify=True, filename=None): # maps strings to Parser objects. # (instantiated from the "parsers" global.) self.parsers = {} self.language = language if printer: fail("Custom printers are broken right now") self.printer = printer or BlockPrinter(language) self.verify = verify self.force = force self.filename = filename self.modules = collections.OrderedDict() self.classes = collections.OrderedDict() self.functions = [] self.line_prefix = self.line_suffix = '' self.destinations = {} self.add_destination("block", "buffer") self.add_destination("suppress", "suppress") self.add_destination("buffer", "buffer") self.add_destination("two-pass", "two-pass") if filename: self.add_destination("file", "file", "{dirname}/clinic/{basename}.h") d = self.destinations.get self.field_destinations = collections.OrderedDict(( ('cpp_if', d('suppress')), ('docstring_prototype', d('suppress')), ('docstring_definition', d('block')), ('methoddef_define', d('block')), ('impl_prototype', d('block')), ('parser_prototype', d('suppress')), ('parser_definition', d('block')), ('cpp_endif', d('suppress')), ('methoddef_ifndef', d('buffer')), ('impl_definition', d('block')), )) self.field_destinations_stack = [] self.presets = {} preset = None for line in self.presets_text.strip().split('\n'): line = line.strip() if not line: continue name, value = line.split() if name == 'preset': self.presets[value] = preset = collections.OrderedDict() continue destination = self.get_destination(value) if name == 'everything': for name in self.field_destinations: preset[name] = destination continue assert name in self.field_destinations preset[name] = destination global clinic clinic = self def get_destination(self, name, default=unspecified): d = self.destinations.get(name) if not d: if default is not unspecified: return default fail("Destination does not exist: " + repr(name)) return d def add_destination(self, name, type, *args): if name in self.destinations: fail("Destination already exists: " + repr(name)) self.destinations[name] = Destination(name, type, self, *args) def parse(self, input): printer = self.printer self.block_parser = BlockParser(input, self.language, verify=self.verify) for block in self.block_parser: dsl_name = block.dsl_name if dsl_name: if dsl_name not in self.parsers: assert dsl_name in parsers, "No parser to handle {!r} block.".format(dsl_name) self.parsers[dsl_name] = parsers[dsl_name](self) parser = self.parsers[dsl_name] try: parser.parse(block) except Exception: fail('Exception raised during parsing:\n' + traceback.format_exc().rstrip()) printer.print_block(block) second_pass_replacements = {} for name, destination in self.destinations.items(): if destination.type == 'suppress': continue output = destination._dump() if destination.type == 'two-pass': if destination.id: second_pass_replacements[destination.id] = output elif output: fail("Two-pass buffer " + repr(name) + " not empty at end of file!") continue if output: block = Block("", dsl_name="clinic", output=output) if destination.type == 'buffer': block.input = "dump " + name + "\n" warn("Destination buffer " + repr(name) + " not empty at end of file, emptying.") printer.write("\n") printer.print_block(block) continue if destination.type == 'file': try: dirname = os.path.dirname(destination.filename) try: os.makedirs(dirname) except FileExistsError: if not os.path.isdir(dirname): fail("Can't write to destination {}, " "can't make directory {}!".format( destination.filename, dirname)) if self.verify: with open(destination.filename, "rt") as f: parser_2 = BlockParser(f.read(), language=self.language) blocks = list(parser_2) if (len(blocks) != 1) or (blocks[0].input != 'preserve\n'): fail("Modified destination file " + repr(destination.filename) + ", not overwriting!") except FileNotFoundError: pass block.input = 'preserve\n' printer_2 = BlockPrinter(self.language) printer_2.print_block(block) with open(destination.filename, "wt") as f: f.write(printer_2.f.getvalue()) continue text = printer.f.getvalue() if second_pass_replacements: printer_2 = BlockPrinter(self.language) parser_2 = BlockParser(text, self.language) changed = False for block in parser_2: if block.dsl_name: for id, replacement in second_pass_replacements.items(): if id in block.output: changed = True block.output = block.output.replace(id, replacement) printer_2.print_block(block) if changed: text = printer_2.f.getvalue() return text def _module_and_class(self, fields): """ fields should be an iterable of field names. returns a tuple of (module, class). the module object could actually be self (a clinic object). this function is only ever used to find the parent of where a new class/module should go. """ in_classes = False parent = module = self cls = None so_far = [] for field in fields: so_far.append(field) if not in_classes: child = parent.modules.get(field) if child: parent = module = child continue in_classes = True if not hasattr(parent, 'classes'): return module, cls child = parent.classes.get(field) if not child: fail('Parent class or module ' + '.'.join(so_far) + " does not exist.") cls = parent = child return module, cls def parse_file(filename, *, force=False, verify=True, output=None, encoding='utf-8'): extension = os.path.splitext(filename)[1][1:] if not extension: fail("Can't extract file type for file " + repr(filename)) try: language = extensions[extension](filename) except KeyError: fail("Can't identify file type for file " + repr(filename)) with open(filename, 'r', encoding=encoding) as f: raw = f.read() # exit quickly if there are no clinic markers in the file find_start_re = BlockParser("", language).find_start_re if not find_start_re.search(raw): return clinic = Clinic(language, force=force, verify=verify, filename=filename) cooked = clinic.parse(raw) if (cooked == raw) and not force: return directory = os.path.dirname(filename) or '.' with tempfile.TemporaryDirectory(prefix="clinic", dir=directory) as tmpdir: bytes = cooked.encode(encoding) tmpfilename = os.path.join(tmpdir, os.path.basename(filename)) with open(tmpfilename, "wb") as f: f.write(bytes) os.replace(tmpfilename, output or filename) def compute_checksum(input, length=None): input = input or '' s = hashlib.sha1(input.encode('utf-8')).hexdigest() if length: s = s[:length] return s class PythonParser: def __init__(self, clinic): pass def parse(self, block): s = io.StringIO() with OverrideStdioWith(s): exec(block.input) block.output = s.getvalue() class Module: def __init__(self, name, module=None): self.name = name self.module = self.parent = module self.modules = collections.OrderedDict() self.classes = collections.OrderedDict() self.functions = [] def __repr__(self): return "<clinic.Module " + repr(self.name) + " at " + str(id(self)) + ">" class Class: def __init__(self, name, module=None, cls=None, typedef=None, type_object=None): self.name = name self.module = module self.cls = cls self.typedef = typedef self.type_object = type_object self.parent = cls or module self.classes = collections.OrderedDict() self.functions = [] def __repr__(self): return "<clinic.Class " + repr(self.name) + " at " + str(id(self)) + ">" unsupported_special_methods = set(""" __abs__ __add__ __and__ __bytes__ __call__ __complex__ __delitem__ __divmod__ __eq__ __float__ __floordiv__ __ge__ __getattr__ __getattribute__ __getitem__ __gt__ __hash__ __iadd__ __iand__ __idivmod__ __ifloordiv__ __ilshift__ __imod__ __imul__ __index__ __int__ __invert__ __ior__ __ipow__ __irshift__ __isub__ __iter__ __itruediv__ __ixor__ __le__ __len__ __lshift__ __lt__ __mod__ __mul__ __neg__ __new__ __next__ __or__ __pos__ __pow__ __radd__ __rand__ __rdivmod__ __repr__ __rfloordiv__ __rlshift__ __rmod__ __rmul__ __ror__ __round__ __rpow__ __rrshift__ __rshift__ __rsub__ __rtruediv__ __rxor__ __setattr__ __setitem__ __str__ __sub__ __truediv__ __xor__ """.strip().split()) INVALID, CALLABLE, STATIC_METHOD, CLASS_METHOD, METHOD_INIT, METHOD_NEW = """ INVALID, CALLABLE, STATIC_METHOD, CLASS_METHOD, METHOD_INIT, METHOD_NEW """.replace(",", "").strip().split() class Function: """ Mutable duck type for inspect.Function. docstring - a str containing * embedded line breaks * text outdented to the left margin * no trailing whitespace. It will always be true that (not docstring) or ((not docstring[0].isspace()) and (docstring.rstrip() == docstring)) """ def __init__(self, parameters=None, *, name, module, cls=None, c_basename=None, full_name=None, return_converter, return_annotation=_empty, docstring=None, kind=CALLABLE, coexist=False, docstring_only=False): self.parameters = parameters or collections.OrderedDict() self.return_annotation = return_annotation self.name = name self.full_name = full_name self.module = module self.cls = cls self.parent = cls or module self.c_basename = c_basename self.return_converter = return_converter self.docstring = docstring or '' self.kind = kind self.coexist = coexist self.self_converter = None # docstring_only means "don't generate a machine-readable # signature, just a normal docstring". it's True for # functions with optional groups because we can't represent # those accurately with inspect.Signature in 3.4. self.docstring_only = docstring_only self.rendered_parameters = None __render_parameters__ = None @property def render_parameters(self): if not self.__render_parameters__: self.__render_parameters__ = l = [] for p in self.parameters.values(): p = p.copy() p.converter.pre_render() l.append(p) return self.__render_parameters__ @property def methoddef_flags(self): if self.kind in (METHOD_INIT, METHOD_NEW): return None flags = [] if self.kind == CLASS_METHOD: flags.append('METH_CLASS') elif self.kind == STATIC_METHOD: flags.append('METH_STATIC') else: assert self.kind == CALLABLE, "unknown kind: " + repr(self.kind) if self.coexist: flags.append('METH_COEXIST') return '|'.join(flags) def __repr__(self): return '<clinic.Function ' + self.name + '>' def copy(self, **overrides): kwargs = { 'name': self.name, 'module': self.module, 'parameters': self.parameters, 'cls': self.cls, 'c_basename': self.c_basename, 'full_name': self.full_name, 'return_converter': self.return_converter, 'return_annotation': self.return_annotation, 'docstring': self.docstring, 'kind': self.kind, 'coexist': self.coexist, 'docstring_only': self.docstring_only, } kwargs.update(overrides) f = Function(**kwargs) parameters = collections.OrderedDict() for name, value in f.parameters.items(): value = value.copy(function=f) parameters[name] = value f.parameters = parameters return f class Parameter: """ Mutable duck type of inspect.Parameter. """ def __init__(self, name, kind, *, default=_empty, function, converter, annotation=_empty, docstring=None, group=0): self.name = name self.kind = kind self.default = default self.function = function self.converter = converter self.annotation = annotation self.docstring = docstring or '' self.group = group def __repr__(self): return '<clinic.Parameter ' + self.name + '>' def is_keyword_only(self): return self.kind == inspect.Parameter.KEYWORD_ONLY def is_positional_only(self): return self.kind == inspect.Parameter.POSITIONAL_ONLY def copy(self, **overrides): kwargs = { 'name': self.name, 'kind': self.kind, 'default':self.default, 'function': self.function, 'converter': self.converter, 'annotation': self.annotation, 'docstring': self.docstring, 'group': self.group, } kwargs.update(overrides) if 'converter' not in overrides: converter = copy.copy(self.converter) converter.function = kwargs['function'] kwargs['converter'] = converter return Parameter(**kwargs) class LandMine: # try to access any def __init__(self, message): self.__message__ = message def __repr__(self): return '<LandMine ' + repr(self.__message__) + ">" def __getattribute__(self, name): if name in ('__repr__', '__message__'): return super().__getattribute__(name) # raise RuntimeError(repr(name)) fail("Stepped on a land mine, trying to access attribute " + repr(name) + ":\n" + self.__message__) def add_c_converter(f, name=None): if not name: name = f.__name__ if not name.endswith('_converter'): return f name = name[:-len('_converter')] converters[name] = f return f def add_default_legacy_c_converter(cls): # automatically add converter for default format unit # (but without stomping on the existing one if it's already # set, in case you subclass) if ((cls.format_unit not in ('O&', '')) and (cls.format_unit not in legacy_converters)): legacy_converters[cls.format_unit] = cls return cls def add_legacy_c_converter(format_unit, **kwargs): """ Adds a legacy converter. """ def closure(f): if not kwargs: added_f = f else: added_f = functools.partial(f, **kwargs) if format_unit: legacy_converters[format_unit] = added_f return f return closure class CConverterAutoRegister(type): def __init__(cls, name, bases, classdict): add_c_converter(cls) add_default_legacy_c_converter(cls) class CConverter(metaclass=CConverterAutoRegister): """ For the init function, self, name, function, and default must be keyword-or-positional parameters. All other parameters must be keyword-only. """ # The C name to use for this variable. name = None # The Python name to use for this variable. py_name = None # The C type to use for this variable. # 'type' should be a Python string specifying the type, e.g. "int". # If this is a pointer type, the type string should end with ' *'. type = None # The Python default value for this parameter, as a Python value. # Or the magic value "unspecified" if there is no default. # Or the magic value "unknown" if this value is a cannot be evaluated # at Argument-Clinic-preprocessing time (but is presumed to be valid # at runtime). default = unspecified # If not None, default must be isinstance() of this type. # (You can also specify a tuple of types.) default_type = None # "default" converted into a C value, as a string. # Or None if there is no default. c_default = None # "default" converted into a Python value, as a string. # Or None if there is no default. py_default = None # The default value used to initialize the C variable when # there is no default, but not specifying a default may # result in an "uninitialized variable" warning. This can # easily happen when using option groups--although # properly-written code won't actually use the variable, # the variable does get passed in to the _impl. (Ah, if # only dataflow analysis could inline the static function!) # # This value is specified as a string. # Every non-abstract subclass should supply a valid value. c_ignored_default = 'NULL' # The C converter *function* to be used, if any. # (If this is not None, format_unit must be 'O&'.) converter = None # Should Argument Clinic add a '&' before the name of # the variable when passing it into the _impl function? impl_by_reference = False # Should Argument Clinic add a '&' before the name of # the variable when passing it into PyArg_ParseTuple (AndKeywords)? parse_by_reference = True ############################################################# ############################################################# ## You shouldn't need to read anything below this point to ## ## write your own converter functions. ## ############################################################# ############################################################# # The "format unit" to specify for this variable when # parsing arguments using PyArg_ParseTuple (AndKeywords). # Custom converters should always use the default value of 'O&'. format_unit = 'O&' # What encoding do we want for this variable? Only used # by format units starting with 'e'. encoding = None # Should this object be required to be a subclass of a specific type? # If not None, should be a string representing a pointer to a # PyTypeObject (e.g. "&PyUnicode_Type"). # Only used by the 'O!' format unit (and the "object" converter). subclass_of = None # Do we want an adjacent '_length' variable for this variable? # Only used by format units ending with '#'. length = False # Should we show this parameter in the generated # __text_signature__? This is *almost* always True. # (It's only False for __new__, __init__, and METH_STATIC functions.) show_in_signature = True # Overrides the name used in a text signature. # The name used for a "self" parameter must be one of # self, type, or module; however users can set their own. # This lets the self_converter overrule the user-settable # name, *just* for the text signature. # Only set by self_converter. signature_name = None # keep in sync with self_converter.__init__! def __init__(self, name, py_name, function, default=unspecified, *, c_default=None, py_default=None, annotation=unspecified, **kwargs): self.name = name self.py_name = py_name if default is not unspecified: if self.default_type and not isinstance(default, (self.default_type, Unknown)): if isinstance(self.default_type, type): types_str = self.default_type.__name__ else: types_str = ', '.join((cls.__name__ for cls in self.default_type)) fail("{}: default value {!r} for field {} is not of type {}".format( self.__class__.__name__, default, name, types_str)) self.default = default if c_default: self.c_default = c_default if py_default: self.py_default = py_default if annotation != unspecified: fail("The 'annotation' parameter is not currently permitted.") # this is deliberate, to prevent you from caching information # about the function in the init. # (that breaks if we get cloned.) # so after this change we will noisily fail. self.function = LandMine("Don't access members of self.function inside converter_init!") self.converter_init(**kwargs) self.function = function def converter_init(self): pass def is_optional(self): return (self.default is not unspecified) def _render_self(self, parameter, data): self.parameter = parameter original_name = self.name name = ensure_legal_c_identifier(original_name) # impl_arguments s = ("&" if self.impl_by_reference else "") + name data.impl_arguments.append(s) if self.length: data.impl_arguments.append(self.length_name()) # impl_parameters data.impl_parameters.append(self.simple_declaration(by_reference=self.impl_by_reference)) if self.length: data.impl_parameters.append("Py_ssize_clean_t " + self.length_name()) def _render_non_self(self, parameter, data): self.parameter = parameter original_name = self.name name = ensure_legal_c_identifier(original_name) # declarations d = self.declaration() data.declarations.append(d) # initializers initializers = self.initialize() if initializers: data.initializers.append('/* initializers for ' + name + ' */\n' + initializers.rstrip()) # modifications modifications = self.modify() if modifications: data.modifications.append('/* modifications for ' + name + ' */\n' + modifications.rstrip()) # keywords data.keywords.append(parameter.name) # format_units if self.is_optional() and '|' not in data.format_units: data.format_units.append('|') if parameter.is_keyword_only() and '$' not in data.format_units: data.format_units.append('$') data.format_units.append(self.format_unit) # parse_arguments self.parse_argument(data.parse_arguments) # cleanup cleanup = self.cleanup() if cleanup: data.cleanup.append('/* Cleanup for ' + name + ' */\n' + cleanup.rstrip() + "\n") def render(self, parameter, data): """ parameter is a clinic.Parameter instance. data is a CRenderData instance. """ self._render_self(parameter, data) self._render_non_self(parameter, data) def length_name(self): """Computes the name of the associated "length" variable.""" if not self.length: return None return ensure_legal_c_identifier(self.name) + "_length" # Why is this one broken out separately? # For "positional-only" function parsing, # which generates a bunch of PyArg_ParseTuple calls. def parse_argument(self, list): assert not (self.converter and self.encoding) if self.format_unit == 'O&': assert self.converter list.append(self.converter) if self.encoding: list.append(c_repr(self.encoding)) elif self.subclass_of: list.append(self.subclass_of) legal_name = ensure_legal_c_identifier(self.name) s = ("&" if self.parse_by_reference else "") + legal_name list.append(s) if self.length: list.append("&" + self.length_name()) # # All the functions after here are intended as extension points. # def simple_declaration(self, by_reference=False): """ Computes the basic declaration of the variable. Used in computing the prototype declaration and the variable declaration. """ prototype = [self.type] if by_reference or not self.type.endswith('*'): prototype.append(" ") if by_reference: prototype.append('*') prototype.append(ensure_legal_c_identifier(self.name)) return "".join(prototype) def declaration(self): """ The C statement to declare this variable. """ declaration = [self.simple_declaration()] default = self.c_default if not default and self.parameter.group: default = self.c_ignored_default if default: declaration.append(" = ") declaration.append(default) declaration.append(";") if self.length: declaration.append('\nPy_ssize_clean_t ') declaration.append(self.length_name()) declaration.append(';') s = "".join(declaration) # double up curly-braces, this string will be used # as part of a format_map() template later s = s.replace("{", "{{") s = s.replace("}", "}}") return s def initialize(self): """ The C statements required to set up this variable before parsing. Returns a string containing this code indented at column 0. If no initialization is necessary, returns an empty string. """ return "" def modify(self): """ The C statements required to modify this variable after parsing. Returns a string containing this code indented at column 0. If no initialization is necessary, returns an empty string. """ return "" def cleanup(self): """ The C statements required to clean up after this variable. Returns a string containing this code indented at column 0. If no cleanup is necessary, returns an empty string. """ return "" def pre_render(self): """ A second initialization function, like converter_init, called just before rendering. You are permitted to examine self.function here. """ pass class bool_converter(CConverter): type = 'int' default_type = bool format_unit = 'p' c_ignored_default = '0' def converter_init(self): if self.default is not unspecified: self.default = bool(self.default) self.c_default = str(int(self.default)) class char_converter(CConverter): type = 'char' default_type = str format_unit = 'c' c_ignored_default = "'\0'" def converter_init(self): if isinstance(self.default, str) and (len(self.default) != 1): fail("char_converter: illegal default value " + repr(self.default)) @add_legacy_c_converter('B', bitwise=True) class unsigned_char_converter(CConverter): type = 'unsigned char' default_type = int format_unit = 'b' c_ignored_default = "'\0'" def converter_init(self, *, bitwise=False): if bitwise: self.format_unit = 'B' class byte_converter(unsigned_char_converter): pass class short_converter(CConverter): type = 'short' default_type = int format_unit = 'h' c_ignored_default = "0" class unsigned_short_converter(CConverter): type = 'unsigned short' default_type = int format_unit = 'H' c_ignored_default = "0" def converter_init(self, *, bitwise=False): if not bitwise: fail("Unsigned shorts must be bitwise (for now).") @add_legacy_c_converter('C', types='str') class int_converter(CConverter): type = 'int' default_type = int format_unit = 'i' c_ignored_default = "0" def converter_init(self, *, types='int'): if types == 'str': self.format_unit = 'C' elif types != 'int': fail("int_converter: illegal 'types' argument") class unsigned_int_converter(CConverter): type = 'unsigned int' default_type = int format_unit = 'I' c_ignored_default = "0" def converter_init(self, *, bitwise=False): if not bitwise: fail("Unsigned ints must be bitwise (for now).") class long_converter(CConverter): type = 'long' default_type = int format_unit = 'l' c_ignored_default = "0" class unsigned_long_converter(CConverter): type = 'unsigned long' default_type = int format_unit = 'k' c_ignored_default = "0" def converter_init(self, *, bitwise=False): if not bitwise: fail("Unsigned longs must be bitwise (for now).") class PY_LONG_LONG_converter(CConverter): type = 'PY_LONG_LONG' default_type = int format_unit = 'L' c_ignored_default = "0" class unsigned_PY_LONG_LONG_converter(CConverter): type = 'unsigned PY_LONG_LONG' default_type = int format_unit = 'K' c_ignored_default = "0" def converter_init(self, *, bitwise=False): if not bitwise: fail("Unsigned PY_LONG_LONGs must be bitwise (for now).") class Py_ssize_t_converter(CConverter): type = 'Py_ssize_t' default_type = int format_unit = 'n' c_ignored_default = "0" class float_converter(CConverter): type = 'float' default_type = float format_unit = 'f' c_ignored_default = "0.0" class double_converter(CConverter): type = 'double' default_type = float format_unit = 'd' c_ignored_default = "0.0" class Py_complex_converter(CConverter): type = 'Py_complex' default_type = complex format_unit = 'D' c_ignored_default = "{0.0, 0.0}" class object_converter(CConverter): type = 'PyObject *' format_unit = 'O' def converter_init(self, *, converter=None, type=None, subclass_of=None): if converter: if subclass_of: fail("object: Cannot pass in both 'converter' and 'subclass_of'") self.format_unit = 'O&' self.converter = converter elif subclass_of: self.format_unit = 'O!' self.subclass_of = subclass_of if type is not None: self.type = type @add_legacy_c_converter('s#', length=True) @add_legacy_c_converter('y', types="bytes") @add_legacy_c_converter('y#', types="bytes", length=True) @add_legacy_c_converter('z', nullable=True) @add_legacy_c_converter('z#', nullable=True, length=True) class str_converter(CConverter): type = 'const char *' default_type = (str, Null, NoneType) format_unit = 's' def converter_init(self, *, encoding=None, types="str", length=False, nullable=False, zeroes=False): types = set(types.strip().split()) bytes_type = set(("bytes",)) str_type = set(("str",)) all_3_type = set(("bytearray",)) | bytes_type | str_type is_bytes = types == bytes_type is_str = types == str_type is_all_3 = types == all_3_type self.length = bool(length) format_unit = None if encoding: self.encoding = encoding if is_str and not (length or zeroes or nullable): format_unit = 'es' elif is_all_3 and not (length or zeroes or nullable): format_unit = 'et' elif is_str and length and zeroes and not nullable: format_unit = 'es#' elif is_all_3 and length and not (nullable or zeroes): format_unit = 'et#' if format_unit.endswith('#'): fail("Sorry: code using format unit ", repr(format_unit), "probably doesn't work properly yet.\nGive Larry your test case and he'll it.") # TODO set pointer to NULL # TODO add cleanup for buffer pass else: if zeroes: fail("str_converter: illegal combination of arguments (zeroes is only legal with an encoding)") if is_bytes and not (nullable or length): format_unit = 'y' elif is_bytes and length and not nullable: format_unit = 'y#' elif is_str and not (nullable or length): format_unit = 's' elif is_str and length and not nullable: format_unit = 's#' elif is_str and nullable and not length: format_unit = 'z' elif is_str and nullable and length: format_unit = 'z#' if not format_unit: fail("str_converter: illegal combination of arguments") self.format_unit = format_unit class PyBytesObject_converter(CConverter): type = 'PyBytesObject *' format_unit = 'S' class PyByteArrayObject_converter(CConverter): type = 'PyByteArrayObject *' format_unit = 'Y' class unicode_converter(CConverter): type = 'PyObject *' default_type = (str, Null, NoneType) format_unit = 'U' @add_legacy_c_converter('u#', length=True) @add_legacy_c_converter('Z', nullable=True) @add_legacy_c_converter('Z#', nullable=True, length=True) class Py_UNICODE_converter(CConverter): type = 'Py_UNICODE *' default_type = (str, Null, NoneType) format_unit = 'u' def converter_init(self, *, nullable=False, length=False): format_unit = 'Z' if nullable else 'u' if length: format_unit += '#' self.length = True self.format_unit = format_unit # # We define three string conventions for buffer types in the 'types' argument: # 'buffer' : any object supporting the buffer interface # 'rwbuffer': any object supporting the buffer interface, but must be writeable # 'robuffer': any object supporting the buffer interface, but must not be writeable # @add_legacy_c_converter('s*', types='str bytes bytearray buffer') @add_legacy_c_converter('z*', types='str bytes bytearray buffer', nullable=True) @add_legacy_c_converter('w*', types='bytearray rwbuffer') class Py_buffer_converter(CConverter): type = 'Py_buffer' format_unit = 'y*' impl_by_reference = True c_ignored_default = "{NULL, NULL}" def converter_init(self, *, types='bytes bytearray buffer', nullable=False): if self.default not in (unspecified, None): fail("The only legal default value for Py_buffer is None.") self.c_default = self.c_ignored_default types = set(types.strip().split()) bytes_type = set(('bytes',)) bytearray_type = set(('bytearray',)) buffer_type = set(('buffer',)) rwbuffer_type = set(('rwbuffer',)) robuffer_type = set(('robuffer',)) str_type = set(('str',)) bytes_bytearray_buffer_type = bytes_type | bytearray_type | buffer_type format_unit = None if types == (str_type | bytes_bytearray_buffer_type): format_unit = 's*' if not nullable else 'z*' else: if nullable: fail('Py_buffer_converter: illegal combination of arguments (nullable=True)') elif types == (bytes_bytearray_buffer_type): format_unit = 'y*' elif types == (bytearray_type | rwbuffer_type): format_unit = 'w*' if not format_unit: fail("Py_buffer_converter: illegal combination of arguments") self.format_unit = format_unit def cleanup(self): name = ensure_legal_c_identifier(self.name) return "".join(["if (", name, ".obj)\n PyBuffer_Release(&", name, ");\n"]) def correct_name_for_self(f): if f.kind in (CALLABLE, METHOD_INIT): if f.cls: return "PyObject *", "self" return "PyModuleDef *", "module" if f.kind == STATIC_METHOD: return "void *", "null" if f.kind in (CLASS_METHOD, METHOD_NEW): return "PyTypeObject *", "type" raise RuntimeError("Unhandled type of function f: " + repr(f.kind)) def required_type_for_self_for_parser(f): type, _ = correct_name_for_self(f) if f.kind in (METHOD_INIT, METHOD_NEW, STATIC_METHOD, CLASS_METHOD): return type return None class self_converter(CConverter): """ A special-case converter: this is the default converter used for "self". """ type = None format_unit = '' def converter_init(self, *, type=None): self.specified_type = type def pre_render(self): f = self.function default_type, default_name = correct_name_for_self(f) self.signature_name = default_name self.type = self.specified_type or self.type or default_type kind = self.function.kind new_or_init = kind in (METHOD_NEW, METHOD_INIT) if (kind == STATIC_METHOD) or new_or_init: self.show_in_signature = False # tp_new (METHOD_NEW) functions are of type newfunc: # typedef PyObject *(*newfunc)(struct _typeobject *, PyObject *, PyObject *); # PyTypeObject is a typedef for struct _typeobject. # # tp_init (METHOD_INIT) functions are of type initproc: # typedef int (*initproc)(PyObject *, PyObject *, PyObject *); # # All other functions generated by Argument Clinic are stored in # PyMethodDef structures, in the ml_meth slot, which is of type PyCFunction: # typedef PyObject *(*PyCFunction)(PyObject *, PyObject *); # However! We habitually cast these functions to PyCFunction, # since functions that accept keyword arguments don't fit this signature # but are stored there anyway. So strict type equality isn't important # for these functions. # # So: # # * The name of the first parameter to the impl and the parsing function will always # be self.name. # # * The type of the first parameter to the impl will always be of self.type. # # * If the function is neither tp_new (METHOD_NEW) nor tp_init (METHOD_INIT): # * The type of the first parameter to the parsing function is also self.type. # This means that if you step into the parsing function, your "self" parameter # is of the correct type, which may make debugging more pleasant. # # * Else if the function is tp_new (METHOD_NEW): # * The type of the first parameter to the parsing function is "PyTypeObject *", # so the type signature of the function call is an exact match. # * If self.type != "PyTypeObject *", we cast the first parameter to self.type # in the impl call. # # * Else if the function is tp_init (METHOD_INIT): # * The type of the first parameter to the parsing function is "PyObject *", # so the type signature of the function call is an exact match. # * If self.type != "PyObject *", we cast the first parameter to self.type # in the impl call. @property def parser_type(self): return required_type_for_self_for_parser(self.function) or self.type def render(self, parameter, data): """ parameter is a clinic.Parameter instance. data is a CRenderData instance. """ if self.function.kind == STATIC_METHOD: return self._render_self(parameter, data) if self.type != self.parser_type: # insert cast to impl_argument[0], aka self. # we know we're in the first slot in all the CRenderData lists, # because we render parameters in order, and self is always first. assert len(data.impl_arguments) == 1 assert data.impl_arguments[0] == self.name data.impl_arguments[0] = '(' + self.type + ")" + data.impl_arguments[0] def set_template_dict(self, template_dict): template_dict['self_name'] = self.name template_dict['self_type'] = self.parser_type kind = self.function.kind cls = self.function.cls if ((kind in (METHOD_NEW, METHOD_INIT)) and cls and cls.typedef): if kind == METHOD_NEW: passed_in_type = self.name else: passed_in_type = 'Py_TYPE({})'.format(self.name) line = '({passed_in_type} == {type_object}) &&\n ' d = { 'type_object': self.function.cls.type_object, 'passed_in_type': passed_in_type } template_dict['self_type_check'] = line.format_map(d) def add_c_return_converter(f, name=None): if not name: name = f.__name__ if not name.endswith('_return_converter'): return f name = name[:-len('_return_converter')] return_converters[name] = f return f class CReturnConverterAutoRegister(type): def __init__(cls, name, bases, classdict): add_c_return_converter(cls) class CReturnConverter(metaclass=CReturnConverterAutoRegister): # The C type to use for this variable. # 'type' should be a Python string specifying the type, e.g. "int". # If this is a pointer type, the type string should end with ' *'. type = 'PyObject *' # The Python default value for this parameter, as a Python value. # Or the magic value "unspecified" if there is no default. default = None def __init__(self, *, py_default=None, **kwargs): self.py_default = py_default try: self.return_converter_init(**kwargs) except TypeError as e: s = ', '.join(name + '=' + repr(value) for name, value in kwargs.items()) sys.exit(self.__class__.__name__ + '(' + s + ')\n' + str(e)) def return_converter_init(self): pass def declare(self, data, name="_return_value"): line = [] add = line.append add(self.type) if not self.type.endswith('*'): add(' ') add(name + ';') data.declarations.append(''.join(line)) data.return_value = name def err_occurred_if(self, expr, data): data.return_conversion.append('if (({}) && PyErr_Occurred())\n goto exit;\n'.format(expr)) def err_occurred_if_null_pointer(self, variable, data): data.return_conversion.append('if ({} == NULL)\n goto exit;\n'.format(variable)) def render(self, function, data): """ function is a clinic.Function instance. data is a CRenderData instance. """ pass add_c_return_converter(CReturnConverter, 'object') class NoneType_return_converter(CReturnConverter): def render(self, function, data): self.declare(data) data.return_conversion.append(''' if (_return_value != Py_None) goto exit; return_value = Py_None; Py_INCREF(Py_None); '''.strip()) class bool_return_converter(CReturnConverter): type = 'int' def render(self, function, data): self.declare(data) self.err_occurred_if("_return_value == -1", data) data.return_conversion.append('return_value = PyBool_FromLong((long)_return_value);\n') class long_return_converter(CReturnConverter): type = 'long' conversion_fn = 'PyLong_FromLong' cast = '' def render(self, function, data): self.declare(data) self.err_occurred_if("_return_value == -1", data) data.return_conversion.append( ''.join(('return_value = ', self.conversion_fn, '(', self.cast, '_return_value);\n'))) class int_return_converter(long_return_converter): type = 'int' cast = '(long)' class init_return_converter(long_return_converter): """ Special return converter for __init__ functions. """ type = 'int' cast = '(long)' def render(self, function, data): pass class unsigned_long_return_converter(long_return_converter): type = 'unsigned long' conversion_fn = 'PyLong_FromUnsignedLong' class unsigned_int_return_converter(unsigned_long_return_converter): type = 'unsigned int' cast = '(unsigned long)' class Py_ssize_t_return_converter(long_return_converter): type = 'Py_ssize_t' conversion_fn = 'PyLong_FromSsize_t' class size_t_return_converter(long_return_converter): type = 'size_t' conversion_fn = 'PyLong_FromSize_t' class double_return_converter(CReturnConverter): type = 'double' cast = '' def render(self, function, data): self.declare(data) self.err_occurred_if("_return_value == -1.0", data) data.return_conversion.append( 'return_value = PyFloat_FromDouble(' + self.cast + '_return_value);\n') class float_return_converter(double_return_converter): type = 'float' cast = '(double)' class DecodeFSDefault_return_converter(CReturnConverter): type = 'char *' def render(self, function, data): self.declare(data) self.err_occurred_if_null_pointer("_return_value", data) data.return_conversion.append( 'return_value = PyUnicode_DecodeFSDefault(_return_value);\n') class IndentStack: def __init__(self): self.indents = [] self.margin = None def _ensure(self): if not self.indents: fail('IndentStack expected indents, but none are defined.') def measure(self, line): """ Returns the length of the line's margin. """ if '\t' in line: fail('Tab characters are illegal in the Argument Clinic DSL.') stripped = line.lstrip() if not len(stripped): # we can't tell anything from an empty line # so just pretend it's indented like our current indent self._ensure() return self.indents[-1] return len(line) - len(stripped) def infer(self, line): """ Infer what is now the current margin based on this line. Returns: 1 if we have indented (or this is the first margin) 0 if the margin has not changed -N if we have dedented N times """ indent = self.measure(line) margin = ' ' * indent if not self.indents: self.indents.append(indent) self.margin = margin return 1 current = self.indents[-1] if indent == current: return 0 if indent > current: self.indents.append(indent) self.margin = margin return 1 # indent < current if indent not in self.indents: fail("Illegal outdent.") outdent_count = 0 while indent != current: self.indents.pop() current = self.indents[-1] outdent_count -= 1 self.margin = margin return outdent_count @property def depth(self): """ Returns how many margins are currently defined. """ return len(self.indents) def indent(self, line): """ Indents a line by the currently defined margin. """ return self.margin + line def dedent(self, line): """ Dedents a line by the currently defined margin. (The inverse of 'indent'.) """ margin = self.margin indent = self.indents[-1] if not line.startswith(margin): fail('Cannot dedent, line does not start with the previous margin:') return line[indent:] class DSLParser: def __init__(self, clinic): self.clinic = clinic self.directives = {} for name in dir(self): # functions that start with directive_ are added to directives _, s, key = name.partition("directive_") if s: self.directives[key] = getattr(self, name) # functions that start with at_ are too, with an @ in front _, s, key = name.partition("at_") if s: self.directives['@' + key] = getattr(self, name) self.reset() def reset(self): self.function = None self.state = self.state_dsl_start self.parameter_indent = None self.keyword_only = False self.group = 0 self.parameter_state = self.ps_start self.seen_positional_with_default = False self.indent = IndentStack() self.kind = CALLABLE self.coexist = False self.parameter_continuation = '' self.preserve_output = False def directive_version(self, required): global version if version_comparitor(version, required) < 0: fail("Insufficient Clinic version!\n Version: " + version + "\n Required: " + required) def directive_module(self, name): fields = name.split('.') new = fields.pop() module, cls = self.clinic._module_and_class(fields) if cls: fail("Can't nest a module inside a class!") if name in module.classes: fail("Already defined module " + repr(name) + "!") m = Module(name, module) module.modules[name] = m self.block.signatures.append(m) def directive_class(self, name, typedef, type_object): fields = name.split('.') in_classes = False parent = self name = fields.pop() so_far = [] module, cls = self.clinic._module_and_class(fields) parent = cls or module if name in parent.classes: fail("Already defined class " + repr(name) + "!") c = Class(name, module, cls, typedef, type_object) parent.classes[name] = c self.block.signatures.append(c) def directive_set(self, name, value): if name not in ("line_prefix", "line_suffix"): fail("unknown variable", repr(name)) value = value.format_map({ 'block comment start': '/*', 'block comment end': '*/', }) self.clinic.__dict__[name] = value def directive_destination(self, name, command, *args): if command == 'new': self.clinic.add_destination(name, *args) return if command == 'clear': self.clinic.get_destination(name).clear() fail("unknown destination command", repr(command)) def directive_output(self, field, destination=''): fd = self.clinic.field_destinations if field == "preset": preset = self.clinic.presets.get(destination) if not preset: fail("Unknown preset " + repr(destination) + "!") fd.update(preset) return if field == "push": self.clinic.field_destinations_stack.append(fd.copy()) return if field == "pop": if not self.clinic.field_destinations_stack: fail("Can't 'output pop', stack is empty!") previous_fd = self.clinic.field_destinations_stack.pop() fd.update(previous_fd) return # secret command for debugging! if field == "print": self.block.output.append(pprint.pformat(fd)) self.block.output.append('\n') return d = self.clinic.get_destination(destination) if field == "everything": for name in list(fd): fd[name] = d return if field not in fd: fail("Invalid field " + repr(field) + ", must be one of:\n preset push pop print everything " + " ".join(fd)) fd[field] = d def directive_dump(self, name): self.block.output.append(self.clinic.get_destination(name).dump()) def directive_print(self, *args): self.block.output.append(' '.join(args)) self.block.output.append('\n') def directive_preserve(self): if self.preserve_output: fail("Can't have preserve twice in one block!") self.preserve_output = True def at_classmethod(self): if self.kind is not CALLABLE: fail("Can't set @classmethod, function is not a normal callable") self.kind = CLASS_METHOD def at_staticmethod(self): if self.kind is not CALLABLE: fail("Can't set @staticmethod, function is not a normal callable") self.kind = STATIC_METHOD def at_coexist(self): if self.coexist: fail("Called @coexist twice!") self.coexist = True def parse(self, block): self.reset() self.block = block self.saved_output = self.block.output block.output = [] block_start = self.clinic.block_parser.line_number lines = block.input.split('\n') for line_number, line in enumerate(lines, self.clinic.block_parser.block_start_line_number): if '\t' in line: fail('Tab characters are illegal in the Clinic DSL.\n\t' + repr(line), line_number=block_start) self.state(line) self.next(self.state_terminal) self.state(None) block.output.extend(self.clinic.language.render(clinic, block.signatures)) if self.preserve_output: if block.output: fail("'preserve' only works for blocks that don't produce any output!") block.output = self.saved_output @staticmethod def ignore_line(line): # ignore comment-only lines if line.lstrip().startswith('#'): return True # Ignore empty lines too # (but not in docstring sections!) if not line.strip(): return True return False @staticmethod def calculate_indent(line): return len(line) - len(line.strip()) def next(self, state, line=None): # real_print(self.state.__name__, "->", state.__name__, ", line=", line) self.state = state if line is not None: self.state(line) def state_dsl_start(self, line): # self.block = self.ClinicOutputBlock(self) if self.ignore_line(line): return # is it a directive? fields = shlex.split(line) directive_name = fields[0] directive = self.directives.get(directive_name, None) if directive: try: directive(*fields[1:]) except TypeError as e: fail(str(e)) return self.next(self.state_modulename_name, line) def state_modulename_name(self, line): # looking for declaration, which establishes the leftmost column # line should be # modulename.fnname [as c_basename] [-> return annotation] # square brackets denote optional syntax. # # alternatively: # modulename.fnname [as c_basename] = modulename.existing_fn_name # clones the parameters and return converter from that # function. you can't modify them. you must enter a # new docstring. # # (but we might find a directive first!) # # this line is permitted to start with whitespace. # we'll call this number of spaces F (for "function"). if not line.strip(): return self.indent.infer(line) # are we cloning? before, equals, existing = line.rpartition('=') if equals: full_name, _, c_basename = before.partition(' as ') full_name = full_name.strip() c_basename = c_basename.strip() existing = existing.strip() if (is_legal_py_identifier(full_name) and (not c_basename or is_legal_c_identifier(c_basename)) and is_legal_py_identifier(existing)): # we're cloning! fields = [x.strip() for x in existing.split('.')] function_name = fields.pop() module, cls = self.clinic._module_and_class(fields) for existing_function in (cls or module).functions: if existing_function.name == function_name: break else: existing_function = None if not existing_function: print("class", cls, "module", module, "existing", existing) print("cls. functions", cls.functions) fail("Couldn't find existing function " + repr(existing) + "!") fields = [x.strip() for x in full_name.split('.')] function_name = fields.pop() module, cls = self.clinic._module_and_class(fields) if not (existing_function.kind == self.kind and existing_function.coexist == self.coexist): fail("'kind' of function and cloned function don't match! (@classmethod/@staticmethod/@coexist)") self.function = existing_function.copy(name=function_name, full_name=full_name, module=module, cls=cls, c_basename=c_basename, docstring='') self.block.signatures.append(self.function) (cls or module).functions.append(self.function) self.next(self.state_function_docstring) return line, _, returns = line.partition('->') full_name, _, c_basename = line.partition(' as ') full_name = full_name.strip() c_basename = c_basename.strip() or None if not is_legal_py_identifier(full_name): fail("Illegal function name: {}".format(full_name)) if c_basename and not is_legal_c_identifier(c_basename): fail("Illegal C basename: {}".format(c_basename)) return_converter = None if returns: ast_input = "def x() -> {}: pass".format(returns) module = None try: module = ast.parse(ast_input) except SyntaxError: pass if not module: fail("Badly-formed annotation for " + full_name + ": " + returns) try: name, legacy, kwargs = self.parse_converter(module.body[0].returns) if legacy: fail("Legacy converter {!r} not allowed as a return converter" .format(name)) if name not in return_converters: fail("No available return converter called " + repr(name)) return_converter = return_converters[name](**kwargs) except ValueError: fail("Badly-formed annotation for " + full_name + ": " + returns) fields = [x.strip() for x in full_name.split('.')] function_name = fields.pop() module, cls = self.clinic._module_and_class(fields) fields = full_name.split('.') if fields[-1] == '__new__': if (self.kind != CLASS_METHOD) or (not cls): fail("__new__ must be a class method!") self.kind = METHOD_NEW elif fields[-1] == '__init__': if (self.kind != CALLABLE) or (not cls): fail("__init__ must be a normal method, not a class or static method!") self.kind = METHOD_INIT if not return_converter: return_converter = init_return_converter() elif fields[-1] in unsupported_special_methods: fail(fields[-1] + " is a special method and cannot be converted to Argument Clinic! (Yet.)") if not return_converter: return_converter = CReturnConverter() if not module: fail("Undefined module used in declaration of " + repr(full_name.strip()) + ".") self.function = Function(name=function_name, full_name=full_name, module=module, cls=cls, c_basename=c_basename, return_converter=return_converter, kind=self.kind, coexist=self.coexist) self.block.signatures.append(self.function) # insert a self converter automatically type, name = correct_name_for_self(self.function) kwargs = {} if cls and type == "PyObject *": kwargs['type'] = cls.typedef sc = self.function.self_converter = self_converter(name, name, self.function, **kwargs) p_self = Parameter(sc.name, inspect.Parameter.POSITIONAL_ONLY, function=self.function, converter=sc) self.function.parameters[sc.name] = p_self (cls or module).functions.append(self.function) self.next(self.state_parameters_start) # Now entering the parameters section. The rules, formally stated: # # * All lines must be indented with spaces only. # * The first line must be a parameter declaration. # * The first line must be indented. # * This first line establishes the indent for parameters. # * We'll call this number of spaces P (for "parameter"). # * Thenceforth: # * Lines indented with P spaces specify a parameter. # * Lines indented with > P spaces are docstrings for the previous # parameter. # * We'll call this number of spaces D (for "docstring"). # * All subsequent lines indented with >= D spaces are stored as # part of the per-parameter docstring. # * All lines will have the first D spaces of the indent stripped # before they are stored. # * It's illegal to have a line starting with a number of spaces X # such that P < X < D. # * A line with < P spaces is the first line of the function # docstring, which ends processing for parameters and per-parameter # docstrings. # * The first line of the function docstring must be at the same # indent as the function declaration. # * It's illegal to have any line in the parameters section starting # with X spaces such that F < X < P. (As before, F is the indent # of the function declaration.) # # Also, currently Argument Clinic places the following restrictions on groups: # * Each group must contain at least one parameter. # * Each group may contain at most one group, which must be the furthest # thing in the group from the required parameters. (The nested group # must be the first in the group when it's before the required # parameters, and the last thing in the group when after the required # parameters.) # * There may be at most one (top-level) group to the left or right of # the required parameters. # * You must specify a slash, and it must be after all parameters. # (In other words: either all parameters are positional-only, # or none are.) # # Said another way: # * Each group must contain at least one parameter. # * All left square brackets before the required parameters must be # consecutive. (You can't have a left square bracket followed # by a parameter, then another left square bracket. You can't # have a left square bracket, a parameter, a right square bracket, # and then a left square bracket.) # * All right square brackets after the required parameters must be # consecutive. # # These rules are enforced with a single state variable: # "parameter_state". (Previously the code was a miasma of ifs and # separate boolean state variables.) The states are: # # [ [ a, b, ] c, ] d, e, f=3, [ g, h, [ i ] ] / <- line # 01 2 3 4 5 6 7 <- state transitions # # 0: ps_start. before we've seen anything. legal transitions are to 1 or 3. # 1: ps_left_square_before. left square brackets before required parameters. # 2: ps_group_before. in a group, before required parameters. # 3: ps_required. required parameters, positional-or-keyword or positional-only # (we don't know yet). (renumber left groups!) # 4: ps_optional. positional-or-keyword or positional-only parameters that # now must have default values. # 5: ps_group_after. in a group, after required parameters. # 6: ps_right_square_after. right square brackets after required parameters. # 7: ps_seen_slash. seen slash. ps_start, ps_left_square_before, ps_group_before, ps_required, \ ps_optional, ps_group_after, ps_right_square_after, ps_seen_slash = range(8) def state_parameters_start(self, line): if self.ignore_line(line): return # if this line is not indented, we have no parameters if not self.indent.infer(line): return self.next(self.state_function_docstring, line) self.parameter_continuation = '' return self.next(self.state_parameter, line) def to_required(self): """ Transition to the "required" parameter state. """ if self.parameter_state != self.ps_required: self.parameter_state = self.ps_required for p in self.function.parameters.values(): p.group = -p.group def state_parameter(self, line): if self.parameter_continuation: line = self.parameter_continuation + ' ' + line.lstrip() self.parameter_continuation = '' if self.ignore_line(line): return assert self.indent.depth == 2 indent = self.indent.infer(line) if indent == -1: # we outdented, must be to definition column return self.next(self.state_function_docstring, line) if indent == 1: # we indented, must be to new parameter docstring column return self.next(self.state_parameter_docstring_start, line) line = line.rstrip() if line.endswith('\\'): self.parameter_continuation = line[:-1] return line = line.lstrip() if line in ('*', '/', '[', ']'): self.parse_special_symbol(line) return if self.parameter_state in (self.ps_start, self.ps_required): self.to_required() elif self.parameter_state == self.ps_left_square_before: self.parameter_state = self.ps_group_before elif self.parameter_state == self.ps_group_before: if not self.group: self.to_required() elif self.parameter_state in (self.ps_group_after, self.ps_optional): pass else: fail("Function " + self.function.name + " has an unsupported group configuration. (Unexpected state " + str(self.parameter_state) + ".a)") # handle "as" for parameters too c_name = None name, have_as_token, trailing = line.partition(' as ') if have_as_token: name = name.strip() if ' ' not in name: fields = trailing.strip().split(' ') if not fields: fail("Invalid 'as' clause!") c_name = fields[0] if c_name.endswith(':'): name += ':' c_name = c_name[:-1] fields[0] = name line = ' '.join(fields) base, equals, default = line.rpartition('=') if not equals: base = default default = None module = None try: ast_input = "def x({}): pass".format(base) module = ast.parse(ast_input) except SyntaxError: try: # the last = was probably inside a function call, like # i: int(nullable=True) # so assume there was no actual default value. default = None ast_input = "def x({}): pass".format(line) module = ast.parse(ast_input) except SyntaxError: pass if not module: fail("Function " + self.function.name + " has an invalid parameter declaration:\n\t" + line) function_args = module.body[0].args parameter = function_args.args[0] parameter_name = parameter.arg name, legacy, kwargs = self.parse_converter(parameter.annotation) if not default: if self.parameter_state == self.ps_optional: fail("Can't have a parameter without a default (" + repr(parameter_name) + ")\nafter a parameter with a default!") value = unspecified if 'py_default' in kwargs: fail("You can't specify py_default without specifying a default value!") else: if self.parameter_state == self.ps_required: self.parameter_state = self.ps_optional default = default.strip() bad = False ast_input = "x = {}".format(default) bad = False try: module = ast.parse(ast_input) if 'c_default' not in kwargs: # we can only represent very simple data values in C. # detect whether default is okay, via a blacklist # of disallowed ast nodes. class DetectBadNodes(ast.NodeVisitor): bad = False def bad_node(self, node): self.bad = True # inline function call visit_Call = bad_node # inline if statement ("x = 3 if y else z") visit_IfExp = bad_node # comprehensions and generator expressions visit_ListComp = visit_SetComp = bad_node visit_DictComp = visit_GeneratorExp = bad_node # literals for advanced types visit_Dict = visit_Set = bad_node visit_List = visit_Tuple = bad_node # "starred": "a = [1, 2, 3]; *a" visit_Starred = bad_node # allow ellipsis, for now # visit_Ellipsis = bad_node blacklist = DetectBadNodes() blacklist.visit(module) bad = blacklist.bad else: # if they specify a c_default, we can be more lenient about the default value. # but at least make an attempt at ensuring it's a valid expression. try: value = eval(default) if value == unspecified: fail("'unspecified' is not a legal default value!") except NameError: pass # probably a named constant except Exception as e: fail("Malformed expression given as default value\n" "{!r} caused {!r}".format(default, e)) if bad: fail("Unsupported expression as default value: " + repr(default)) expr = module.body[0].value # mild hack: explicitly support NULL as a default value if isinstance(expr, ast.Name) and expr.id == 'NULL': value = NULL py_default = 'None' c_default = "NULL" elif (isinstance(expr, ast.BinOp) or (isinstance(expr, ast.UnaryOp) and not isinstance(expr.operand, ast.Num))): c_default = kwargs.get("c_default") if not (isinstance(c_default, str) and c_default): fail("When you specify an expression (" + repr(default) + ") as your default value,\nyou MUST specify a valid c_default.") py_default = default value = unknown elif isinstance(expr, ast.Attribute): a = [] n = expr while isinstance(n, ast.Attribute): a.append(n.attr) n = n.value if not isinstance(n, ast.Name): fail("Unsupported default value " + repr(default) + " (looked like a Python constant)") a.append(n.id) py_default = ".".join(reversed(a)) c_default = kwargs.get("c_default") if not (isinstance(c_default, str) and c_default): fail("When you specify a named constant (" + repr(py_default) + ") as your default value,\nyou MUST specify a valid c_default.") try: value = eval(py_default) except NameError: value = unknown else: value = ast.literal_eval(expr) py_default = repr(value) if isinstance(value, (bool, None.__class__)): c_default = "Py_" + py_default elif isinstance(value, str): c_default = c_repr(value) else: c_default = py_default except SyntaxError as e: fail("Syntax error: " + repr(e.text)) except (ValueError, AttributeError): value = unknown c_default = kwargs.get("c_default") py_default = default if not (isinstance(c_default, str) and c_default): fail("When you specify a named constant (" + repr(py_default) + ") as your default value,\nyou MUST specify a valid c_default.") kwargs.setdefault('c_default', c_default) kwargs.setdefault('py_default', py_default) dict = legacy_converters if legacy else converters legacy_str = "legacy " if legacy else "" if name not in dict: fail('{} is not a valid {}converter'.format(name, legacy_str)) # if you use a c_name for the parameter, we just give that name to the converter # but the parameter object gets the python name converter = dict[name](c_name or parameter_name, parameter_name, self.function, value, **kwargs) kind = inspect.Parameter.KEYWORD_ONLY if self.keyword_only else inspect.Parameter.POSITIONAL_OR_KEYWORD if isinstance(converter, self_converter): if len(self.function.parameters) == 1: if (self.parameter_state != self.ps_required): fail("A 'self' parameter cannot be marked optional.") if value is not unspecified: fail("A 'self' parameter cannot have a default value.") if self.group: fail("A 'self' parameter cannot be in an optional group.") kind = inspect.Parameter.POSITIONAL_ONLY self.parameter_state = self.ps_start self.function.parameters.clear() else: fail("A 'self' parameter, if specified, must be the very first thing in the parameter block.") p = Parameter(parameter_name, kind, function=self.function, converter=converter, default=value, group=self.group) if parameter_name in self.function.parameters: fail("You can't have two parameters named " + repr(parameter_name) + "!") self.function.parameters[parameter_name] = p def parse_converter(self, annotation): if isinstance(annotation, ast.Str): return annotation.s, True, {} if isinstance(annotation, ast.Name): return annotation.id, False, {} if not isinstance(annotation, ast.Call): fail("Annotations must be either a name, a function call, or a string.") name = annotation.func.id kwargs = {node.arg: ast.literal_eval(node.value) for node in annotation.keywords} return name, False, kwargs def parse_special_symbol(self, symbol): if self.parameter_state == self.ps_seen_slash: fail("Function " + self.function.name + " specifies " + symbol + " after /, which is unsupported.") if symbol == '*': if self.keyword_only: fail("Function " + self.function.name + " uses '*' more than once.") self.keyword_only = True elif symbol == '[': if self.parameter_state in (self.ps_start, self.ps_left_square_before): self.parameter_state = self.ps_left_square_before elif self.parameter_state in (self.ps_required, self.ps_group_after): self.parameter_state = self.ps_group_after else: fail("Function " + self.function.name + " has an unsupported group configuration. (Unexpected state " + str(self.parameter_state) + ".b)") self.group += 1 self.function.docstring_only = True elif symbol == ']': if not self.group: fail("Function " + self.function.name + " has a ] without a matching [.") if not any(p.group == self.group for p in self.function.parameters.values()): fail("Function " + self.function.name + " has an empty group.\nAll groups must contain at least one parameter.") self.group -= 1 if self.parameter_state in (self.ps_left_square_before, self.ps_group_before): self.parameter_state = self.ps_group_before elif self.parameter_state in (self.ps_group_after, self.ps_right_square_after): self.parameter_state = self.ps_right_square_after else: fail("Function " + self.function.name + " has an unsupported group configuration. (Unexpected state " + str(self.parameter_state) + ".c)") elif symbol == '/': # ps_required and ps_optional are allowed here, that allows positional-only without option groups # to work (and have default values!) if (self.parameter_state not in (self.ps_required, self.ps_optional, self.ps_right_square_after, self.ps_group_before)) or self.group: fail("Function " + self.function.name + " has an unsupported group configuration. (Unexpected state " + str(self.parameter_state) + ".d)") if self.keyword_only: fail("Function " + self.function.name + " mixes keyword-only and positional-only parameters, which is unsupported.") self.parameter_state = self.ps_seen_slash # fixup preceeding parameters for p in self.function.parameters.values(): if (p.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD and not isinstance(p.converter, self_converter)): fail("Function " + self.function.name + " mixes keyword-only and positional-only parameters, which is unsupported.") p.kind = inspect.Parameter.POSITIONAL_ONLY def state_parameter_docstring_start(self, line): self.parameter_docstring_indent = len(self.indent.margin) assert self.indent.depth == 3 return self.next(self.state_parameter_docstring, line) # every line of the docstring must start with at least F spaces, # where F > P. # these F spaces will be stripped. def state_parameter_docstring(self, line): stripped = line.strip() if stripped.startswith('#'): return indent = self.indent.measure(line) if indent < self.parameter_docstring_indent: self.indent.infer(line) assert self.indent.depth < 3 if self.indent.depth == 2: # back to a parameter return self.next(self.state_parameter, line) assert self.indent.depth == 1 return self.next(self.state_function_docstring, line) assert self.function.parameters last_parameter = next(reversed(list(self.function.parameters.values()))) new_docstring = last_parameter.docstring if new_docstring: new_docstring += '\n' if stripped: new_docstring += self.indent.dedent(line) last_parameter.docstring = new_docstring # the final stanza of the DSL is the docstring. def state_function_docstring(self, line): if self.group: fail("Function " + self.function.name + " has a ] without a matching [.") stripped = line.strip() if stripped.startswith('#'): return new_docstring = self.function.docstring if new_docstring: new_docstring += "\n" if stripped: line = self.indent.dedent(line).rstrip() else: line = '' new_docstring += line self.function.docstring = new_docstring def format_docstring(self): f = self.function new_or_init = f.kind in (METHOD_NEW, METHOD_INIT) if new_or_init and not f.docstring: # don't render a docstring at all, no signature, nothing. return f.docstring text, add, output = _text_accumulator() parameters = f.render_parameters ## ## docstring first line ## if new_or_init: # classes get *just* the name of the class # not __new__, not __init__, and not module.classname assert f.cls add(f.cls.name) else: add(f.name) add('(') # populate "right_bracket_count" field for every parameter assert parameters, "We should always have a self parameter. " + repr(f) assert isinstance(parameters[0].converter, self_converter) parameters[0].right_bracket_count = 0 parameters_after_self = parameters[1:] if parameters_after_self: # for now, the only way Clinic supports positional-only parameters # is if all of them are positional-only... # # ... except for self! self is always positional-only. positional_only_parameters = [p.kind == inspect.Parameter.POSITIONAL_ONLY for p in parameters_after_self] if parameters_after_self[0].kind == inspect.Parameter.POSITIONAL_ONLY: assert all(positional_only_parameters) for p in parameters: p.right_bracket_count = abs(p.group) else: # don't put any right brackets around non-positional-only parameters, ever. for p in parameters_after_self: p.right_bracket_count = 0 right_bracket_count = 0 def fix_right_bracket_count(desired): nonlocal right_bracket_count s = '' while right_bracket_count < desired: s += '[' right_bracket_count += 1 while right_bracket_count > desired: s += ']' right_bracket_count -= 1 return s need_slash = False added_slash = False need_a_trailing_slash = False # we only need a trailing slash: # * if this is not a "docstring_only" signature # * and if the last *shown* parameter is # positional only if not f.docstring_only: for p in reversed(parameters): if not p.converter.show_in_signature: continue if p.is_positional_only(): need_a_trailing_slash = True break added_star = False first_parameter = True last_p = parameters[-1] line_length = len(''.join(text)) indent = " " * line_length def add_parameter(text): nonlocal line_length nonlocal first_parameter if first_parameter: s = text first_parameter = False else: s = ' ' + text if line_length + len(s) >= 72: add('\n') add(indent) line_length = len(indent) s = text line_length += len(s) add(s) for p in parameters: if not p.converter.show_in_signature: continue assert p.name is_self = isinstance(p.converter, self_converter) if is_self and f.docstring_only: # this isn't a real machine-parsable signature, # so let's not print the "self" parameter continue if p.is_positional_only(): need_slash = not f.docstring_only elif need_slash and not (added_slash or p.is_positional_only()): added_slash = True add_parameter('/,') if p.is_keyword_only() and not added_star: added_star = True add_parameter('*,') p_add, p_output = text_accumulator() p_add(fix_right_bracket_count(p.right_bracket_count)) if isinstance(p.converter, self_converter): # annotate first parameter as being a "self". # # if inspect.Signature gets this function, # and it's already bound, the self parameter # will be stripped off. # # if it's not bound, it should be marked # as positional-only. # # note: we don't print "self" for __init__, # because this isn't actually the signature # for __init__. (it can't be, __init__ doesn't # have a docstring.) if this is an __init__ # (or __new__), then this signature is for # calling the class to contruct a new instance. p_add('$') name = p.converter.signature_name or p.name p_add(name) if p.converter.is_optional(): p_add('=') value = p.converter.py_default if not value: value = repr(p.converter.default) p_add(value) if (p != last_p) or need_a_trailing_slash: p_add(',') add_parameter(p_output()) add(fix_right_bracket_count(0)) if need_a_trailing_slash: add_parameter('/') add(')') # PEP 8 says: # # The Python standard library will not use function annotations # as that would result in a premature commitment to a particular # annotation style. Instead, the annotations are left for users # to discover and experiment with useful annotation styles. # # therefore this is commented out: # # if f.return_converter.py_default: # add(' -> ') # add(f.return_converter.py_default) if not f.docstring_only: add("\n--\n") docstring_first_line = output() # now fix up the places where the brackets look wrong docstring_first_line = docstring_first_line.replace(', ]', ',] ') # okay. now we're officially building the "parameters" section. # create substitution text for {parameters} spacer_line = False for p in parameters: if not p.docstring.strip(): continue if spacer_line: add('\n') else: spacer_line = True add(" ") add(p.name) add('\n') add(textwrap.indent(rstrip_lines(p.docstring.rstrip()), " ")) parameters = output() if parameters: parameters += '\n' ## ## docstring body ## docstring = f.docstring.rstrip() lines = [line.rstrip() for line in docstring.split('\n')] # Enforce the summary line! # The first line of a docstring should be a summary of the function. # It should fit on one line (80 columns? 79 maybe?) and be a paragraph # by itself. # # Argument Clinic enforces the following rule: # * either the docstring is empty, # * or it must have a summary line. # # Guido said Clinic should enforce this: # http://mail.python.org/pipermail/python-dev/2013-June/127110.html if len(lines) >= 2: if lines[1]: fail("Docstring for " + f.full_name + " does not have a summary line!\n" + "Every non-blank function docstring must start with\n" + "a single line summary followed by an empty line.") elif len(lines) == 1: # the docstring is only one line right now--the summary line. # add an empty line after the summary line so we have space # between it and the {parameters} we're about to add. lines.append('') parameters_marker_count = len(docstring.split('{parameters}')) - 1 if parameters_marker_count > 1: fail('You may not specify {parameters} more than once in a docstring!') if not parameters_marker_count: # insert after summary line lines.insert(2, '{parameters}') # insert at front of docstring lines.insert(0, docstring_first_line) docstring = "\n".join(lines) add(docstring) docstring = output() docstring = linear_format(docstring, parameters=parameters) docstring = docstring.rstrip() return docstring def state_terminal(self, line): """ Called when processing the block is done. """ assert not line if not self.function: return if self.keyword_only: values = self.function.parameters.values() if not values: no_parameter_after_star = True else: last_parameter = next(reversed(list(values))) no_parameter_after_star = last_parameter.kind != inspect.Parameter.KEYWORD_ONLY if no_parameter_after_star: fail("Function " + self.function.name + " specifies '*' without any parameters afterwards.") # remove trailing whitespace from all parameter docstrings for name, value in self.function.parameters.items(): if not value: continue value.docstring = value.docstring.rstrip() self.function.docstring = self.format_docstring() # maps strings to callables. # the callable should return an object # that implements the clinic parser # interface (__init__ and parse). # # example parsers: # "clinic", handles the Clinic DSL # "python", handles running Python code # parsers = {'clinic' : DSLParser, 'python': PythonParser} clinic = None def main(argv): import sys if sys.version_info.major < 3 or sys.version_info.minor < 3: sys.exit("Error: clinic.py requires Python 3.3 or greater.") import argparse cmdline = argparse.ArgumentParser() cmdline.add_argument("-f", "--force", action='store_true') cmdline.add_argument("-o", "--output", type=str) cmdline.add_argument("-v", "--verbose", action='store_true') cmdline.add_argument("--converters", action='store_true') cmdline.add_argument("--make", action='store_true') cmdline.add_argument("filename", type=str, nargs="*") ns = cmdline.parse_args(argv) if ns.converters: if ns.filename: print("Usage error: can't specify --converters and a filename at the same time.") print() cmdline.print_usage() sys.exit(-1) converters = [] return_converters = [] ignored = set(""" add_c_converter add_c_return_converter add_default_legacy_c_converter add_legacy_c_converter """.strip().split()) module = globals() for name in module: for suffix, ids in ( ("_return_converter", return_converters), ("_converter", converters), ): if name in ignored: continue if name.endswith(suffix): ids.append((name, name[:-len(suffix)])) break print() print("Legacy converters:") legacy = sorted(legacy_converters) print(' ' + ' '.join(c for c in legacy if c[0].isupper())) print(' ' + ' '.join(c for c in legacy if c[0].islower())) print() for title, attribute, ids in ( ("Converters", 'converter_init', converters), ("Return converters", 'return_converter_init', return_converters), ): print(title + ":") longest = -1 for name, short_name in ids: longest = max(longest, len(short_name)) for name, short_name in sorted(ids, key=lambda x: x[1].lower()): cls = module[name] callable = getattr(cls, attribute, None) if not callable: continue signature = inspect.signature(callable) parameters = [] for parameter_name, parameter in signature.parameters.items(): if parameter.kind == inspect.Parameter.KEYWORD_ONLY: if parameter.default != inspect.Parameter.empty: s = '{}={!r}'.format(parameter_name, parameter.default) else: s = parameter_name parameters.append(s) print(' {}({})'.format(short_name, ', '.join(parameters))) print() print("All converters also accept (c_default=None, py_default=None, annotation=None).") print("All return converters also accept (py_default=None).") sys.exit(0) if ns.make: if ns.output or ns.filename: print("Usage error: can't use -o or filenames with --make.") print() cmdline.print_usage() sys.exit(-1) for root, dirs, files in os.walk('.'): for rcs_dir in ('.svn', '.git', '.hg', 'build'): if rcs_dir in dirs: dirs.remove(rcs_dir) for filename in files: if not (filename.endswith('.c') or filename.endswith('.h')): continue path = os.path.join(root, filename) if ns.verbose: print(path) parse_file(path, force=ns.force, verify=not ns.force) return if not ns.filename: cmdline.print_usage() sys.exit(-1) if ns.output and len(ns.filename) > 1: print("Usage error: can't use -o with multiple filenames.") print() cmdline.print_usage() sys.exit(-1) for filename in ns.filename: if ns.verbose: print(filename) parse_file(filename, output=ns.output, force=ns.force, verify=not ns.force) if __name__ == "__main__": sys.exit(main(sys.argv[1:]))
lukauskas/scipy
refs/heads/master
scipy/special/tests/test_mpmath.py
40
""" Test Scipy functions versus mpmath, if available. """ from __future__ import division, print_function, absolute_import import sys import time from distutils.version import LooseVersion import numpy as np from numpy.testing import dec, run_module_suite, assert_ from numpy import pi import scipy.special as sc from scipy._lib.six import reraise, with_metaclass from scipy._lib._testutils import knownfailure_overridable from scipy.special._testutils import FuncData, assert_func_equal try: import mpmath except ImportError: try: import sympy.mpmath as mpmath except ImportError: mpmath = None def mpmath_check(min_ver): if mpmath is None: return dec.skipif(True, "mpmath is not installed") return dec.skipif(LooseVersion(mpmath.__version__) < LooseVersion(min_ver), "mpmath version >= %s required" % min_ver) #------------------------------------------------------------------------------ # expi #------------------------------------------------------------------------------ @mpmath_check('0.10') def test_expi_complex(): dataset = [] for r in np.logspace(-99, 2, 10): for p in np.linspace(0, 2*np.pi, 30): z = r*np.exp(1j*p) dataset.append((z, complex(mpmath.ei(z)))) dataset = np.array(dataset, dtype=np.complex_) FuncData(sc.expi, dataset, 0, 1).check() #------------------------------------------------------------------------------ # hyp2f1 #------------------------------------------------------------------------------ @mpmath_check('0.14') def test_hyp2f1_strange_points(): pts = [ (2,-1,-1,0.7), (2,-2,-2,0.7), ] kw = dict(eliminate=True) dataset = [p + (float(mpmath.hyp2f1(*p, **kw)),) for p in pts] dataset = np.array(dataset, dtype=np.float_) FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() @mpmath_check('0.13') def test_hyp2f1_real_some_points(): pts = [ (1,2,3,0), (1./3, 2./3, 5./6, 27./32), (1./4, 1./2, 3./4, 80./81), (2,-2,-3,3), (2,-3,-2,3), (2,-1.5,-1.5,3), (1,2,3,0), (0.7235, -1, -5, 0.3), (0.25, 1./3, 2, 0.999), (0.25, 1./3, 2, -1), (2,3,5,0.99), (3./2,-0.5,3,0.99), (2,2.5,-3.25,0.999), (-8, 18.016500331508873, 10.805295997850628, 0.90875647507000001), (-10,900,-10.5,0.99), (-10,900,10.5,0.99), (-1,2,1,1.0), (-1,2,1,-1.0), (-3,13,5,1.0), (-3,13,5,-1.0), (0.5, 1 - 270.5, 1.5, 0.999**2), # from issue 1561 ] dataset = [p + (float(mpmath.hyp2f1(*p)),) for p in pts] dataset = np.array(dataset, dtype=np.float_) olderr = np.seterr(invalid='ignore') try: FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() finally: np.seterr(**olderr) @mpmath_check('0.14') def test_hyp2f1_some_points_2(): # Taken from mpmath unit tests -- this point failed for mpmath 0.13 but # was fixed in their SVN since then pts = [ (112, (51,10), (-9,10), -0.99999), (10,-900,10.5,0.99), (10,-900,-10.5,0.99), ] def fev(x): if isinstance(x, tuple): return float(x[0]) / x[1] else: return x dataset = [tuple(map(fev, p)) + (float(mpmath.hyp2f1(*p)),) for p in pts] dataset = np.array(dataset, dtype=np.float_) FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check() @mpmath_check('0.13') def test_hyp2f1_real_some(): dataset = [] for a in [-10, -5, -1.8, 1.8, 5, 10]: for b in [-2.5, -1, 1, 7.4]: for c in [-9, -1.8, 5, 20.4]: for z in [-10, -1.01, -0.99, 0, 0.6, 0.95, 1.5, 10]: try: v = float(mpmath.hyp2f1(a, b, c, z)) except: continue dataset.append((a, b, c, z, v)) dataset = np.array(dataset, dtype=np.float_) olderr = np.seterr(invalid='ignore') try: FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-9, ignore_inf_sign=True).check() finally: np.seterr(**olderr) @mpmath_check('0.12') @dec.slow def test_hyp2f1_real_random(): dataset = [] npoints = 500 dataset = np.zeros((npoints, 5), np.float_) np.random.seed(1234) dataset[:,0] = np.random.pareto(1.5, npoints) dataset[:,1] = np.random.pareto(1.5, npoints) dataset[:,2] = np.random.pareto(1.5, npoints) dataset[:,3] = 2*np.random.rand(npoints) - 1 dataset[:,0] *= (-1)**np.random.randint(2, npoints) dataset[:,1] *= (-1)**np.random.randint(2, npoints) dataset[:,2] *= (-1)**np.random.randint(2, npoints) for ds in dataset: if mpmath.__version__ < '0.14': # mpmath < 0.14 fails for c too much smaller than a, b if abs(ds[:2]).max() > abs(ds[2]): ds[2] = abs(ds[:2]).max() ds[4] = float(mpmath.hyp2f1(*tuple(ds[:4]))) FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-9).check() #------------------------------------------------------------------------------ # erf (complex) #------------------------------------------------------------------------------ @mpmath_check('0.14') def test_erf_complex(): # need to increase mpmath precision for this test old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec try: mpmath.mp.dps = 70 x1, y1 = np.meshgrid(np.linspace(-10, 1, 31), np.linspace(-10, 1, 11)) x2, y2 = np.meshgrid(np.logspace(-80, .8, 31), np.logspace(-80, .8, 11)) points = np.r_[x1.ravel(),x2.ravel()] + 1j*np.r_[y1.ravel(),y2.ravel()] assert_func_equal(sc.erf, lambda x: complex(mpmath.erf(x)), points, vectorized=False, rtol=1e-13) assert_func_equal(sc.erfc, lambda x: complex(mpmath.erfc(x)), points, vectorized=False, rtol=1e-13) finally: mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec #------------------------------------------------------------------------------ # lpmv #------------------------------------------------------------------------------ @mpmath_check('0.15') def test_lpmv(): pts = [] for x in [-0.99, -0.557, 1e-6, 0.132, 1]: pts.extend([ (1, 1, x), (1, -1, x), (-1, 1, x), (-1, -2, x), (1, 1.7, x), (1, -1.7, x), (-1, 1.7, x), (-1, -2.7, x), (1, 10, x), (1, 11, x), (3, 8, x), (5, 11, x), (-3, 8, x), (-5, 11, x), (3, -8, x), (5, -11, x), (-3, -8, x), (-5, -11, x), (3, 8.3, x), (5, 11.3, x), (-3, 8.3, x), (-5, 11.3, x), (3, -8.3, x), (5, -11.3, x), (-3, -8.3, x), (-5, -11.3, x), ]) def mplegenp(nu, mu, x): if mu == int(mu) and x == 1: # mpmath 0.17 gets this wrong if mu == 0: return 1 else: return 0 return mpmath.legenp(nu, mu, x) dataset = [p + (mplegenp(p[1], p[0], p[2]),) for p in pts] dataset = np.array(dataset, dtype=np.float_) def evf(mu, nu, x): return sc.lpmv(mu.astype(int), nu, x) olderr = np.seterr(invalid='ignore') try: FuncData(evf, dataset, (0,1,2), 3, rtol=1e-10, atol=1e-14).check() finally: np.seterr(**olderr) #------------------------------------------------------------------------------ # beta #------------------------------------------------------------------------------ @mpmath_check('0.15') def test_beta(): np.random.seed(1234) b = np.r_[np.logspace(-200, 200, 4), np.logspace(-10, 10, 4), np.logspace(-1, 1, 4), np.arange(-10, 11, 1), np.arange(-10, 11, 1) + 0.5, -1, -2.3, -3, -100.3, -10003.4] a = b ab = np.array(np.broadcast_arrays(a[:,None], b[None,:])).reshape(2, -1).T old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec try: mpmath.mp.dps = 400 assert_func_equal(sc.beta, lambda a, b: float(mpmath.beta(a, b)), ab, vectorized=False, rtol=1e-10, ignore_inf_sign=True) assert_func_equal( sc.betaln, lambda a, b: float(mpmath.log(abs(mpmath.beta(a, b)))), ab, vectorized=False, rtol=1e-10) finally: mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec #------------------------------------------------------------------------------ # Machinery for systematic tests #------------------------------------------------------------------------------ class Arg(object): """ Generate a set of numbers on the real axis, concentrating on 'interesting' regions and covering all orders of magnitude. """ def __init__(self, a=-np.inf, b=np.inf, inclusive_a=True, inclusive_b=True): self.a = a self.b = b self.inclusive_a = inclusive_a self.inclusive_b = inclusive_b if self.a == -np.inf: self.a = -np.finfo(float).max/2 if self.b == np.inf: self.b = np.finfo(float).max/2 def values(self, n): """Return an array containing approximatively `n` numbers.""" n1 = max(2, int(0.3*n)) n2 = max(2, int(0.2*n)) n3 = max(8, n - n1 - n2) v1 = np.linspace(-1, 1, n1) v2 = np.r_[np.linspace(-10, 10, max(0, n2-4)), -9, -5.5, 5.5, 9] if self.a >= 0 and self.b > 0: v3 = np.r_[ np.logspace(-30, -1, 2 + n3//4), np.logspace(5, np.log10(self.b), 1 + n3//4), ] v4 = np.logspace(1, 5, 1 + n3//2) elif self.a < 0 and self.b > 0: v3 = np.r_[ np.logspace(-30, -1, 2 + n3//8), np.logspace(5, np.log10(self.b), 1 + n3//8), -np.logspace(-30, -1, 2 + n3//8), -np.logspace(5, np.log10(-self.a), 1 + n3//8) ] v4 = np.r_[ np.logspace(1, 5, 1 + n3//4), -np.logspace(1, 5, 1 + n3//4) ] elif self.b < 0: v3 = np.r_[ -np.logspace(-30, -1, 2 + n3//4), -np.logspace(5, np.log10(-self.b), 1 + n3//4), ] v4 = -np.logspace(1, 5, 1 + n3//2) else: v3 = [] v4 = [] v = np.r_[v1, v2, v3, v4, 0] if self.inclusive_a: v = v[v >= self.a] else: v = v[v > self.a] if self.inclusive_b: v = v[v <= self.b] else: v = v[v < self.b] return np.unique(v) class FixedArg(object): def __init__(self, values): self._values = np.asarray(values) def values(self, n): return self._values class ComplexArg(object): def __init__(self, a=complex(-np.inf, -np.inf), b=complex(np.inf, np.inf)): self.real = Arg(a.real, b.real) self.imag = Arg(a.imag, b.imag) def values(self, n): m = max(2, int(np.sqrt(n))) x = self.real.values(m) y = self.imag.values(m) return (x[:,None] + 1j*y[None,:]).ravel() class IntArg(object): def __init__(self, a=-1000, b=1000): self.a = a self.b = b def values(self, n): v1 = Arg(self.a, self.b).values(max(1 + n//2, n-5)).astype(int) v2 = np.arange(-5, 5) v = np.unique(np.r_[v1, v2]) v = v[(v >= self.a) & (v < self.b)] return v class MpmathData(object): def __init__(self, scipy_func, mpmath_func, arg_spec, name=None, dps=None, prec=None, n=5000, rtol=1e-7, atol=1e-300, ignore_inf_sign=False): self.scipy_func = scipy_func self.mpmath_func = mpmath_func self.arg_spec = arg_spec self.dps = dps self.prec = prec self.n = n self.rtol = rtol self.atol = atol self.ignore_inf_sign = ignore_inf_sign if isinstance(self.arg_spec, np.ndarray): self.is_complex = np.issubdtype(self.arg_spec.dtype, np.complexfloating) else: self.is_complex = any([isinstance(arg, ComplexArg) for arg in self.arg_spec]) self.ignore_inf_sign = ignore_inf_sign if not name or name == '<lambda>': name = getattr(scipy_func, '__name__', None) if not name or name == '<lambda>': name = getattr(mpmath_func, '__name__', None) self.name = name def check(self): np.random.seed(1234) # Generate values for the arguments if isinstance(self.arg_spec, np.ndarray): argarr = self.arg_spec.copy() else: num_args = len(self.arg_spec) ms = np.asarray([1.5 if isinstance(arg, ComplexArg) else 1.0 for arg in self.arg_spec]) ms = (self.n**(ms/sum(ms))).astype(int) + 1 argvals = [] for arg, m in zip(self.arg_spec, ms): argvals.append(arg.values(m)) argarr = np.array(np.broadcast_arrays(*np.ix_(*argvals))).reshape(num_args, -1).T # Check old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec try: if self.dps is not None: dps_list = [self.dps] else: dps_list = [20] if self.prec is not None: mpmath.mp.prec = self.prec # Proper casting of mpmath input and output types. Using # native mpmath types as inputs gives improved precision # in some cases. if np.issubdtype(argarr.dtype, np.complexfloating): pytype = complex mptype = lambda x: mpmath.mpc(complex(x)) else: mptype = lambda x: mpmath.mpf(float(x)) def pytype(x): if abs(x.imag) > 1e-16*(1 + abs(x.real)): return np.nan else: return float(x.real) # Try out different dps until one (or none) works for j, dps in enumerate(dps_list): mpmath.mp.dps = dps try: assert_func_equal(self.scipy_func, lambda *a: pytype(self.mpmath_func(*map(mptype, a))), argarr, vectorized=False, rtol=self.rtol, atol=self.atol, ignore_inf_sign=self.ignore_inf_sign, nan_ok=True) break except AssertionError: if j >= len(dps_list)-1: reraise(*sys.exc_info()) finally: mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec def __repr__(self): if self.is_complex: return "<MpmathData: %s (complex)>" % (self.name,) else: return "<MpmathData: %s>" % (self.name,) def assert_mpmath_equal(*a, **kw): d = MpmathData(*a, **kw) d.check() def nonfunctional_tooslow(func): return dec.skipif(True, " Test not yet functional (too slow), needs more work.")(func) class _SystematicMeta(type): """ Metaclass which decorates all of the test_* methods with - @mpmath_check(...) - @dec.slow """ mpmath_min_version = '0.17' def __new__(cls, cls_name, bases, dct): for name, item in list(dct.items()): if name.startswith('test_'): item = dec.slow(item) item = mpmath_check(cls.mpmath_min_version)(item) dct[name] = item return type.__new__(cls, cls_name, bases, dct) #------------------------------------------------------------------------------ # Dealing with mpmath quirks #------------------------------------------------------------------------------ def _trace_args(func): def tofloat(x): if isinstance(x, mpmath.mpc): return complex(x) else: return float(x) def wrap(*a, **kw): sys.stderr.write("%r: " % (tuple(map(tofloat, a)),)) sys.stderr.flush() try: r = func(*a, **kw) sys.stderr.write("-> %r" % r) finally: sys.stderr.write("\n") sys.stderr.flush() return r return wrap try: import posix import signal POSIX = ('setitimer' in dir(signal)) except ImportError: POSIX = False class _TimeoutError(Exception): pass def _time_limited(timeout=0.5, return_val=np.nan, use_sigalrm=True): """ Decorator for setting a timeout for pure-Python functions. If the function does not return within `timeout` seconds, the value `return_val` is returned instead. On POSIX this uses SIGALRM by default. On non-POSIX, settrace is used. Do not use this with threads: the SIGALRM implementation does probably not work well. The settrace implementation only traces the current thread. The settrace implementation slows down execution speed. Slowdown by a factor around 10 is probably typical. """ if POSIX and use_sigalrm: def sigalrm_handler(signum, frame): raise _TimeoutError() def deco(func): def wrap(*a, **kw): old_handler = signal.signal(signal.SIGALRM, sigalrm_handler) signal.setitimer(signal.ITIMER_REAL, timeout) try: return func(*a, **kw) except _TimeoutError: return return_val finally: signal.setitimer(signal.ITIMER_REAL, 0) signal.signal(signal.SIGALRM, old_handler) return wrap else: def deco(func): def wrap(*a, **kw): start_time = time.time() def trace(frame, event, arg): if time.time() - start_time > timeout: raise _TimeoutError() return None # turn off tracing except at function calls sys.settrace(trace) try: return func(*a, **kw) except _TimeoutError: sys.settrace(None) return return_val finally: sys.settrace(None) return wrap return deco def _exception_to_nan(func): """Decorate function to return nan if it raises an exception""" def wrap(*a, **kw): try: return func(*a, **kw) except Exception: return np.nan return wrap def _inf_to_nan(func): """Decorate function to return nan if it returns inf""" def wrap(*a, **kw): v = func(*a, **kw) if not np.isfinite(v): return np.nan return v return wrap #------------------------------------------------------------------------------ # Systematic tests #------------------------------------------------------------------------------ HYPERKW = dict(maxprec=200, maxterms=200) class TestSystematic(with_metaclass(_SystematicMeta, object)): def test_airyai(self): # oscillating function, limit range assert_mpmath_equal(lambda z: sc.airy(z)[0], mpmath.airyai, [Arg(-1e8, 1e8)], rtol=1e-5) assert_mpmath_equal(lambda z: sc.airy(z)[0], mpmath.airyai, [Arg(-1e3, 1e3)]) def test_airyai_complex(self): assert_mpmath_equal(lambda z: sc.airy(z)[0], mpmath.airyai, [ComplexArg()]) def test_airyai_prime(self): # oscillating function, limit range assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z: mpmath.airyai(z, derivative=1), [Arg(-1e8, 1e8)], rtol=1e-5) assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z: mpmath.airyai(z, derivative=1), [Arg(-1e3, 1e3)]) def test_airyai_prime_complex(self): assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z: mpmath.airyai(z, derivative=1), [ComplexArg()]) def test_airybi(self): # oscillating function, limit range assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z: mpmath.airybi(z), [Arg(-1e8, 1e8)], rtol=1e-5) assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z: mpmath.airybi(z), [Arg(-1e3, 1e3)]) def test_airybi_complex(self): assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z: mpmath.airybi(z), [ComplexArg()]) def test_airybi_prime(self): # oscillating function, limit range assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z: mpmath.airybi(z, derivative=1), [Arg(-1e8, 1e8)], rtol=1e-5) assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z: mpmath.airybi(z, derivative=1), [Arg(-1e3, 1e3)]) def test_airybi_prime_complex(self): assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z: mpmath.airybi(z, derivative=1), [ComplexArg()]) def test_bei(self): assert_mpmath_equal(sc.bei, _exception_to_nan(lambda z: mpmath.bei(0, z, **HYPERKW)), [Arg(-1e3, 1e3)]) def test_ber(self): assert_mpmath_equal(sc.ber, _exception_to_nan(lambda z: mpmath.ber(0, z, **HYPERKW)), [Arg(-1e3, 1e3)]) def test_bernoulli(self): assert_mpmath_equal(lambda n: sc.bernoulli(int(n))[int(n)], lambda n: float(mpmath.bernoulli(int(n))), [IntArg(0, 13000)], rtol=1e-9, n=13000) def test_besseli(self): assert_mpmath_equal(sc.iv, _exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)), [Arg(-1e100, 1e100), Arg()], atol=1e-270) def test_besseli_complex(self): assert_mpmath_equal(lambda v, z: sc.iv(v.real, z), _exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)), [Arg(-1e100, 1e100), ComplexArg()]) def test_besselj(self): assert_mpmath_equal(sc.jv, _exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)), [Arg(-1e100, 1e100), Arg(-1e3, 1e3)], ignore_inf_sign=True) # loss of precision at large arguments due to oscillation assert_mpmath_equal(sc.jv, _exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)), [Arg(-1e100, 1e100), Arg(-1e8, 1e8)], ignore_inf_sign=True, rtol=1e-5) def test_besselj_complex(self): assert_mpmath_equal(lambda v, z: sc.jv(v.real, z), _exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)), [Arg(), ComplexArg()]) def test_besselk(self): def mpbesselk(v, x): r = float(mpmath.besselk(v, x, **HYPERKW)) if abs(r) > 1e305: # overflowing to inf a bit earlier is OK r = np.inf * np.sign(r) if abs(v) == abs(x) and abs(r) == np.inf and abs(x) > 1: # wrong result (kv(x,x) -> 0 for x > 1), # try with higher dps old_dps = mpmath.mp.dps mpmath.mp.dps = 200 try: r = float(mpmath.besselk(v, x, **HYPERKW)) finally: mpmath.mp.dps = old_dps return r assert_mpmath_equal(sc.kv, _exception_to_nan(mpbesselk), [Arg(-1e100, 1e100), Arg()]) def test_besselk_int(self): assert_mpmath_equal(sc.kn, _exception_to_nan(lambda v, z: mpmath.besselk(v, z, **HYPERKW)), [IntArg(-1000, 1000), Arg()]) def test_besselk_complex(self): assert_mpmath_equal(lambda v, z: sc.kv(v.real, z), _exception_to_nan(lambda v, z: mpmath.besselk(v, z, **HYPERKW)), [Arg(-1e100, 1e100), ComplexArg()]) def test_bessely(self): def mpbessely(v, x): r = float(mpmath.bessely(v, x, **HYPERKW)) if abs(r) > 1e305: # overflowing to inf a bit earlier is OK r = np.inf * np.sign(r) if abs(r) == 0 and x == 0: # invalid result from mpmath, point x=0 is a divergence return np.nan return r assert_mpmath_equal(sc.yv, _exception_to_nan(mpbessely), [Arg(-1e100, 1e100), Arg(-1e8, 1e8)], n=5000) def test_bessely_complex(self): def mpbessely(v, x): r = complex(mpmath.bessely(v, x, **HYPERKW)) if abs(r) > 1e305: # overflowing to inf a bit earlier is OK olderr = np.seterr(invalid='ignore') try: r = np.inf * np.sign(r) finally: np.seterr(**olderr) return r assert_mpmath_equal(lambda v, z: sc.yv(v.real, z), _exception_to_nan(mpbessely), [Arg(), ComplexArg()], n=15000) def test_bessely_int(self): def mpbessely(v, x): r = float(mpmath.bessely(v, x)) if abs(r) == 0 and x == 0: # invalid result from mpmath, point x=0 is a divergence return np.nan return r assert_mpmath_equal(lambda v, z: sc.yn(int(v), z), _exception_to_nan(mpbessely), [IntArg(-1000, 1000), Arg(-1e8, 1e8)]) def test_beta(self): bad_points = [] def beta(a, b, nonzero=False): if a < -1e12 or b < -1e12: # Function is defined here only at integers, but due # to loss of precision this is numerically # ill-defined. Don't compare values here. return np.nan if (a < 0 or b < 0) and (abs(float(a + b)) % 1) == 0: # close to a zero of the function: mpmath and scipy # will not round here the same, so the test needs to be # run with an absolute tolerance if nonzero: bad_points.append((float(a), float(b))) return np.nan return mpmath.beta(a, b) assert_mpmath_equal(sc.beta, lambda a, b: beta(a, b, nonzero=True), [Arg(), Arg()], dps=400, ignore_inf_sign=True) assert_mpmath_equal(sc.beta, beta, np.array(bad_points), dps=400, ignore_inf_sign=True, atol=1e-14) def test_betainc(self): assert_mpmath_equal(sc.betainc, _time_limited()(_exception_to_nan(lambda a, b, x: mpmath.betainc(a, b, 0, x, regularized=True))), [Arg(), Arg(), Arg()]) def test_binom(self): bad_points = [] def binomial(n, k, nonzero=False): if abs(k) > 1e8*(abs(n) + 1): # The binomial is rapidly oscillating in this region, # and the function is numerically ill-defined. Don't # compare values here. return np.nan if n < k and abs(float(n-k) - np.round(float(n-k))) < 1e-15: # close to a zero of the function: mpmath and scipy # will not round here the same, so the test needs to be # run with an absolute tolerance if nonzero: bad_points.append((float(n), float(k))) return np.nan return mpmath.binomial(n, k) assert_mpmath_equal(sc.binom, lambda n, k: binomial(n, k, nonzero=True), [Arg(), Arg()], dps=400) assert_mpmath_equal(sc.binom, binomial, np.array(bad_points), dps=400, atol=1e-14) def test_chebyt_int(self): assert_mpmath_equal(lambda n, x: sc.eval_chebyt(int(n), x), _exception_to_nan(lambda n, x: mpmath.chebyt(n, x, **HYPERKW)), [IntArg(), Arg()], dps=50) @knownfailure_overridable("some cases in hyp2f1 not fully accurate") def test_chebyt(self): assert_mpmath_equal(sc.eval_chebyt, lambda n, x: _time_limited()(_exception_to_nan(mpmath.chebyt))(n, x, **HYPERKW), [Arg(-101, 101), Arg()], n=10000) def test_chebyu_int(self): assert_mpmath_equal(lambda n, x: sc.eval_chebyu(int(n), x), _exception_to_nan(lambda n, x: mpmath.chebyu(n, x, **HYPERKW)), [IntArg(), Arg()], dps=50) @knownfailure_overridable("some cases in hyp2f1 not fully accurate") def test_chebyu(self): assert_mpmath_equal(sc.eval_chebyu, lambda n, x: _time_limited()(_exception_to_nan(mpmath.chebyu))(n, x, **HYPERKW), [Arg(-101, 101), Arg()]) def test_chi(self): def chi(x): return sc.shichi(x)[1] assert_mpmath_equal(chi, mpmath.chi, [Arg()]) # check asymptotic series cross-over assert_mpmath_equal(chi, mpmath.chi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])]) def test_ci(self): def ci(x): return sc.sici(x)[1] # oscillating function: limit range assert_mpmath_equal(ci, mpmath.ci, [Arg(-1e8, 1e8)]) def test_digamma(self): assert_mpmath_equal(sc.digamma, _exception_to_nan(mpmath.digamma), [Arg()], dps=50) @knownfailure_overridable() def test_digamma_complex(self): assert_mpmath_equal(sc.digamma, _time_limited()(_exception_to_nan(mpmath.digamma)), [ComplexArg()], n=200) def test_e1(self): assert_mpmath_equal(sc.exp1, mpmath.e1, [Arg()]) def test_exprel(self): assert_mpmath_equal(sc.exprel, lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'), [Arg(a=-np.log(np.finfo(np.double).max), b=np.log(np.finfo(np.double).max))]) assert_mpmath_equal(sc.exprel, lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'), np.array([1e-12, 1e-24, 0, 1e12, 1e24, np.inf]), rtol=1e-11) assert_(np.isinf(sc.exprel(np.inf))) assert_(sc.exprel(-np.inf) == 0) def test_e1_complex(self): # E_1 oscillates as Im[z] -> +- inf, so limit range assert_mpmath_equal(sc.exp1, mpmath.e1, [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], rtol=1e-11) # Check cross-over reqion assert_mpmath_equal(sc.exp1, mpmath.e1, (np.linspace(-50, 50, 171)[:,None] + np.r_[0, np.logspace(-3, 2, 61), -np.logspace(-3, 2, 11)]*1j ).ravel(), rtol=1e-11) assert_mpmath_equal(sc.exp1, mpmath.e1, (np.linspace(-50, -35, 10000) + 0j), rtol=1e-11) def test_ei(self): assert_mpmath_equal(sc.expi, mpmath.ei, [Arg()], rtol=1e-11) def test_ei_complex(self): # Ei oscillates as Im[z] -> +- inf, so limit range assert_mpmath_equal(sc.expi, mpmath.ei, [ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))], rtol=1e-9) def test_ellipe(self): assert_mpmath_equal(sc.ellipe, mpmath.ellipe, [Arg(b=1.0)]) def test_ellipeinc(self): assert_mpmath_equal(sc.ellipeinc, mpmath.ellipe, [Arg(-1e3, 1e3), Arg(b=1.0)]) def test_ellipeinc_largephi(self): assert_mpmath_equal(sc.ellipeinc, mpmath.ellipe, [Arg(), Arg()]) def test_ellipf(self): assert_mpmath_equal(sc.ellipkinc, mpmath.ellipf, [Arg(-1e3, 1e3), Arg()]) def test_ellipf_largephi(self): assert_mpmath_equal(sc.ellipkinc, mpmath.ellipf, [Arg(), Arg()]) def test_ellipk(self): assert_mpmath_equal(sc.ellipk, mpmath.ellipk, [Arg(b=1.0)]) assert_mpmath_equal(sc.ellipkm1, lambda m: mpmath.ellipk(1 - m), [Arg(a=0.0)], dps=400) def test_ellipkinc(self): def ellipkinc(phi, m): return mpmath.ellippi(0, phi, m) assert_mpmath_equal(sc.ellipkinc, ellipkinc, [Arg(-1e3, 1e3), Arg(b=1.0)], ignore_inf_sign=True) def test_ellipkinc_largephi(self): def ellipkinc(phi, m): return mpmath.ellippi(0, phi, m) assert_mpmath_equal(sc.ellipkinc, ellipkinc, [Arg(), Arg(b=1.0)], ignore_inf_sign=True) def test_ellipfun_sn(self): # Oscillating function --- limit range of first argument; the # loss of precision there is an expected numerical feature # rather than an actual bug assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[0], lambda u, m: mpmath.ellipfun("sn", u=u, m=m), [Arg(-1e6, 1e6), Arg(a=0, b=1)], atol=1e-20) def test_ellipfun_cn(self): # see comment in ellipfun_sn assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[1], lambda u, m: mpmath.ellipfun("cn", u=u, m=m), [Arg(-1e6, 1e6), Arg(a=0, b=1)], atol=1e-20) def test_ellipfun_dn(self): # see comment in ellipfun_sn assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[2], lambda u, m: mpmath.ellipfun("dn", u=u, m=m), [Arg(-1e6, 1e6), Arg(a=0, b=1)], atol=1e-20) def test_erf(self): assert_mpmath_equal(sc.erf, lambda z: mpmath.erf(z), [Arg()]) def test_erf_complex(self): assert_mpmath_equal(sc.erf, lambda z: mpmath.erf(z), [ComplexArg()], n=200) def test_erfc(self): assert_mpmath_equal(sc.erfc, _exception_to_nan(lambda z: mpmath.erfc(z)), [Arg()]) def test_erfc_complex(self): assert_mpmath_equal(sc.erfc, _exception_to_nan(lambda z: mpmath.erfc(z)), [ComplexArg()], n=200) def test_erfi(self): assert_mpmath_equal(sc.erfi, mpmath.erfi, [Arg()], n=200) def test_erfi_complex(self): assert_mpmath_equal(sc.erfi, mpmath.erfi, [ComplexArg()], n=200) def test_eulernum(self): assert_mpmath_equal(lambda n: sc.euler(n)[-1], mpmath.eulernum, [IntArg(1, 10000)], n=10000) @knownfailure_overridable("spurious(?) inf for negative x") def test_expint(self): assert_mpmath_equal(sc.expn, _exception_to_nan(mpmath.expint), [IntArg(0, 100), Arg()]) def test_fresnels(self): def fresnels(x): return sc.fresnel(x)[0] assert_mpmath_equal(fresnels, mpmath.fresnels, [Arg()]) def test_fresnelc(self): def fresnelc(x): return sc.fresnel(x)[1] assert_mpmath_equal(fresnelc, mpmath.fresnelc, [Arg()]) def test_gamma(self): assert_mpmath_equal(sc.gamma, _exception_to_nan(mpmath.gamma), [Arg()]) @dec.knownfailureif(True, "BUG: special.gammainc(1e20, 1e20) never returns") def test_gammainc(self): assert_mpmath_equal(sc.gammainc, _exception_to_nan( lambda z, b: mpmath.gammainc(z, b=b)/mpmath.gamma(z)), [Arg(a=0), Arg(a=0)]) @knownfailure_overridable() def test_gegenbauer(self): assert_mpmath_equal(sc.eval_gegenbauer, _exception_to_nan(mpmath.gegenbauer), [Arg(-1e3, 1e3), Arg(), Arg()]) def test_gegenbauer_int(self): # Redefine functions to deal with numerical + mpmath issues def gegenbauer(n, a, x): # Avoid overflow at large `a` (mpmath would need an even larger # dps to handle this correctly, so just skip this region) if abs(a) > 1e100: return np.nan # Deal with n=0, n=1 correctly; mpmath 0.17 doesn't do these # always correctly if n == 0: r = 1.0 elif n == 1: r = 2*a*x else: r = mpmath.gegenbauer(n, a, x) # Mpmath 0.17 gives wrong results (spurious zero) in some cases, so # compute the value by perturbing the result if float(r) == 0 and n <= 1-a and a < -1 and float(a) == int(float(a)): r = mpmath.gegenbauer(n, a + mpmath.mpf('1e-50'), x) # Differing overflow thresholds in scipy vs. mpmath if abs(r) > 1e270: return np.inf return r def sc_gegenbauer(n, a, x): r = sc.eval_gegenbauer(int(n), a, x) # Differing overflow thresholds in scipy vs. mpmath if abs(r) > 1e270: return np.inf return r assert_mpmath_equal(sc_gegenbauer, _exception_to_nan(gegenbauer), [IntArg(0, 100), Arg(), Arg()], n=40000, dps=100, ignore_inf_sign=True) # Check the small-x expansion assert_mpmath_equal(sc_gegenbauer, _exception_to_nan(gegenbauer), [IntArg(0, 100), Arg(), FixedArg(np.logspace(-30, -4, 30))], dps=100, ignore_inf_sign=True) @knownfailure_overridable() def test_gegenbauer_complex(self): assert_mpmath_equal(lambda n, a, x: sc.eval_gegenbauer(int(n), a.real, x), _exception_to_nan(mpmath.gegenbauer), [IntArg(0, 100), Arg(), ComplexArg()]) @nonfunctional_tooslow def test_gegenbauer_complex_general(self): assert_mpmath_equal(lambda n, a, x: sc.eval_gegenbauer(n.real, a.real, x), _exception_to_nan(mpmath.gegenbauer), [Arg(-1e3, 1e3), Arg(), ComplexArg()]) def test_hankel1(self): assert_mpmath_equal(sc.hankel1, _exception_to_nan(lambda v, x: mpmath.hankel1(v, x, **HYPERKW)), [Arg(-1e20, 1e20), Arg()]) def test_hankel2(self): assert_mpmath_equal(sc.hankel2, _exception_to_nan(lambda v, x: mpmath.hankel2(v, x, **HYPERKW)), [Arg(-1e20, 1e20), Arg()]) @knownfailure_overridable("issues at intermediately large orders") def test_hermite(self): assert_mpmath_equal(lambda n, x: sc.eval_hermite(int(n), x), _exception_to_nan(mpmath.hermite), [IntArg(0, 10000), Arg()]) # hurwitz: same as zeta @nonfunctional_tooslow def test_hyp0f1(self): assert_mpmath_equal(sc.hyp0f1, _exception_to_nan(lambda a, x: mpmath.hyp0f1(a, x, **HYPERKW)), [Arg(), Arg()]) @nonfunctional_tooslow def test_hyp0f1_complex(self): assert_mpmath_equal(lambda a, z: sc.hyp0f1(a.real, z), _exception_to_nan(lambda a, x: mpmath.hyp0f1(a, x, **HYPERKW)), [Arg(), ComplexArg()]) @knownfailure_overridable() def test_hyp1f1(self): assert_mpmath_equal(_inf_to_nan(sc.hyp1f1), _exception_to_nan(lambda a, b, x: mpmath.hyp1f1(a, b, x, **HYPERKW)), [Arg(-1e5, 1e5), Arg(-1e5, 1e5), Arg()], n=2000) @knownfailure_overridable() def test_hyp1f1_complex(self): assert_mpmath_equal(_inf_to_nan(lambda a, b, x: sc.hyp1f1(a.real, b.real, x)), _exception_to_nan(lambda a, b, x: mpmath.hyp1f1(a, b, x, **HYPERKW)), [Arg(-1e3, 1e3), Arg(-1e3, 1e3), ComplexArg()], n=2000) @knownfailure_overridable() def test_hyp1f2(self): def hyp1f2(a, b, c, x): v, err = sc.hyp1f2(a, b, c, x) if abs(err) > max(1, abs(v)) * 1e-7: return np.nan return v assert_mpmath_equal(hyp1f2, _exception_to_nan(lambda a, b, c, x: mpmath.hyp1f2(a, b, c, x, **HYPERKW)), [Arg(), Arg(), Arg(), Arg()], n=20000) @knownfailure_overridable() def test_hyp2f0(self): def hyp2f0(a, b, x): v, err = sc.hyp2f0(a, b, x, 1) if abs(err) > max(1, abs(v)) * 1e-7: return np.nan return v assert_mpmath_equal(hyp2f0, lambda a, b, x: _time_limited(0.1)(_exception_to_nan(_trace_args(mpmath.hyp2f0)))( a, b, x, **HYPERKW), [Arg(), Arg(), Arg()]) @knownfailure_overridable("spurious inf (or inf with wrong sign) for some argument values") def test_hyp2f1(self): assert_mpmath_equal(sc.hyp2f1, _exception_to_nan(lambda a, b, c, x: mpmath.hyp2f1(a, b, c, x, **HYPERKW)), [Arg(), Arg(), Arg(), Arg()]) @nonfunctional_tooslow def test_hyp2f1_complex(self): # Scipy's hyp2f1 seems to have performance and accuracy problems assert_mpmath_equal(lambda a, b, c, x: sc.hyp2f1(a.real, b.real, c.real, x), _exception_to_nan(lambda a, b, c, x: mpmath.hyp2f1(a, b, c, x, **HYPERKW)), [Arg(-1e2, 1e2), Arg(-1e2, 1e2), Arg(-1e2, 1e2), ComplexArg()], n=10) @knownfailure_overridable() def test_hyperu(self): assert_mpmath_equal(sc.hyperu, _exception_to_nan(lambda a, b, x: mpmath.hyperu(a, b, x, **HYPERKW)), [Arg(), Arg(), Arg()]) def test_j0(self): # The Bessel function at large arguments is j0(x) ~ cos(x + phi)/sqrt(x) # and at large arguments the phase of the cosine loses precision. # # This is numerically expected behavior, so we compare only up to # 1e8 = 1e15 * 1e-7 assert_mpmath_equal(sc.j0, mpmath.j0, [Arg(-1e3, 1e3)]) assert_mpmath_equal(sc.j0, mpmath.j0, [Arg(-1e8, 1e8)], rtol=1e-5) def test_j1(self): # See comment in test_j0 assert_mpmath_equal(sc.j1, mpmath.j1, [Arg(-1e3, 1e3)]) assert_mpmath_equal(sc.j1, mpmath.j1, [Arg(-1e8, 1e8)], rtol=1e-5) @knownfailure_overridable() def test_jacobi(self): assert_mpmath_equal(sc.eval_jacobi, _exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)), [Arg(), Arg(), Arg(), Arg()]) assert_mpmath_equal(lambda n, b, c, x: sc.eval_jacobi(int(n), b, c, x), _exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)), [IntArg(), Arg(), Arg(), Arg()]) def test_jacobi_int(self): # Redefine functions to deal with numerical + mpmath issues def jacobi(n, a, b, x): # Mpmath does not handle n=0 case always correctly if n == 0: return 1.0 return mpmath.jacobi(n, a, b, x) assert_mpmath_equal(lambda n, a, b, x: sc.eval_jacobi(int(n), a, b, x), lambda n, a, b, x: _exception_to_nan(jacobi)(n, a, b, x, **HYPERKW), [IntArg(), Arg(), Arg(), Arg()], n=20000, dps=50) def test_kei(self): def kei(x): if x == 0: # work around mpmath issue at x=0 return -pi/4 return _exception_to_nan(mpmath.kei)(0, x, **HYPERKW) assert_mpmath_equal(sc.kei, kei, [Arg(-1e30, 1e30)], n=1000) def test_ker(self): assert_mpmath_equal(sc.ker, _exception_to_nan(lambda x: mpmath.ker(0, x, **HYPERKW)), [Arg(-1e30, 1e30)], n=1000) @nonfunctional_tooslow def test_laguerre(self): assert_mpmath_equal(_trace_args(sc.eval_laguerre), lambda n, x: _exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW), [Arg(), Arg()]) def test_laguerre_int(self): assert_mpmath_equal(lambda n, x: sc.eval_laguerre(int(n), x), lambda n, x: _exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW), [IntArg(), Arg()], n=20000) def test_lambertw(self): assert_mpmath_equal(lambda x, k: sc.lambertw(x, int(k)), lambda x, k: mpmath.lambertw(x, int(k)), [Arg(), IntArg(0, 10)]) @nonfunctional_tooslow def test_legendre(self): assert_mpmath_equal(sc.eval_legendre, mpmath.legendre, [Arg(), Arg()]) def test_legendre_int(self): assert_mpmath_equal(lambda n, x: sc.eval_legendre(int(n), x), lambda n, x: _exception_to_nan(mpmath.legendre)(n, x, **HYPERKW), [IntArg(), Arg()], n=20000) # Check the small-x expansion assert_mpmath_equal(lambda n, x: sc.eval_legendre(int(n), x), lambda n, x: _exception_to_nan(mpmath.legendre)(n, x, **HYPERKW), [IntArg(), FixedArg(np.logspace(-30, -4, 20))]) def test_legenp(self): def lpnm(n, m, z): try: v = sc.lpmn(m, n, z)[0][-1,-1] except ValueError: return np.nan if abs(v) > 1e306: # harmonize overflow to inf v = np.inf * np.sign(v.real) return v def lpnm_2(n, m, z): v = sc.lpmv(m, n, z) if abs(v) > 1e306: # harmonize overflow to inf v = np.inf * np.sign(v.real) return v def legenp(n, m, z): if (z == 1 or z == -1) and int(n) == n: # Special case (mpmath may give inf, we take the limit by # continuity) if m == 0: if n < 0: n = -n - 1 return mpmath.power(mpmath.sign(z), n) else: return 0 if abs(z) < 1e-15: # mpmath has bad performance here return np.nan typ = 2 if abs(z) < 1 else 3 v = _exception_to_nan(mpmath.legenp)(n, m, z, type=typ) if abs(v) > 1e306: # harmonize overflow to inf v = mpmath.inf * mpmath.sign(v.real) return v assert_mpmath_equal(lpnm, legenp, [IntArg(-100, 100), IntArg(-100, 100), Arg()]) assert_mpmath_equal(lpnm_2, legenp, [IntArg(-100, 100), Arg(-100, 100), Arg(-1, 1)]) def test_legenp_complex_2(self): def clpnm(n, m, z): try: return sc.clpmn(m.real, n.real, z, type=2)[0][-1,-1] except ValueError: return np.nan def legenp(n, m, z): if abs(z) < 1e-15: # mpmath has bad performance here return np.nan return _exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=2) # mpmath is quite slow here x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3]) y = np.array([-1e3, -0.5, 0.5, 1.3]) z = (x[:,None] + 1j*y[None,:]).ravel() assert_mpmath_equal(clpnm, legenp, [FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg(z)], rtol=1e-6, n=500) def test_legenp_complex_3(self): def clpnm(n, m, z): try: return sc.clpmn(m.real, n.real, z, type=3)[0][-1,-1] except ValueError: return np.nan def legenp(n, m, z): if abs(z) < 1e-15: # mpmath has bad performance here return np.nan return _exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=3) # mpmath is quite slow here x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3]) y = np.array([-1e3, -0.5, 0.5, 1.3]) z = (x[:,None] + 1j*y[None,:]).ravel() assert_mpmath_equal(clpnm, legenp, [FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg(z)], rtol=1e-6, n=500) @knownfailure_overridable("apparently picks wrong function at |z| > 1") def test_legenq(self): def lqnm(n, m, z): return sc.lqmn(m, n, z)[0][-1,-1] def legenq(n, m, z): if abs(z) < 1e-15: # mpmath has bad performance here return np.nan return _exception_to_nan(mpmath.legenq)(n, m, z, type=2) assert_mpmath_equal(lqnm, legenq, [IntArg(0, 100), IntArg(0, 100), Arg()]) @nonfunctional_tooslow def test_legenq_complex(self): def lqnm(n, m, z): return sc.lqmn(int(m.real), int(n.real), z)[0][-1,-1] def legenq(n, m, z): if abs(z) < 1e-15: # mpmath has bad performance here return np.nan return _exception_to_nan(mpmath.legenq)(int(n.real), int(m.real), z, type=2) assert_mpmath_equal(lqnm, legenq, [IntArg(0, 100), IntArg(0, 100), ComplexArg()], n=100) @knownfailure_overridable() def test_pcfd(self): def pcfd(v, x): return sc.pbdv(v, x)[0] assert_mpmath_equal(pcfd, _exception_to_nan(lambda v, x: mpmath.pcfd(v, x, **HYPERKW)), [Arg(), Arg()]) @knownfailure_overridable("it's not the same as the mpmath function --- maybe different definition?") def test_pcfv(self): def pcfv(v, x): return sc.pbvv(v, x)[0] assert_mpmath_equal(pcfv, lambda v, x: _time_limited()(_exception_to_nan(mpmath.pcfv))(v, x, **HYPERKW), [Arg(), Arg()], n=1000) @knownfailure_overridable() def test_pcfw(self): def pcfw(a, x): return sc.pbwa(a, x)[0] assert_mpmath_equal(pcfw, lambda v, x: _time_limited()(_exception_to_nan(mpmath.pcfw))(v, x, **HYPERKW), [Arg(), Arg()], dps=50, n=1000) @knownfailure_overridable("issues at large arguments (atol OK, rtol not) and <eps-close to z=0") def test_polygamma(self): assert_mpmath_equal(sc.polygamma, _time_limited()(_exception_to_nan(mpmath.polygamma)), [IntArg(0, 1000), Arg()]) def test_rgamma(self): def rgamma(x): if x < -8000: return np.inf else: v = mpmath.rgamma(x) return v assert_mpmath_equal(sc.rgamma, rgamma, [Arg()], ignore_inf_sign=True) def test_rf(self): def mppoch(a, m): # deal with cases where the result in double precision # hits exactly a non-positive integer, but the # corresponding extended-precision mpf floats don't if float(a + m) == int(a + m) and float(a + m) <= 0: a = mpmath.mpf(a) m = int(a + m) - a return mpmath.rf(a, m) assert_mpmath_equal(sc.poch, mppoch, [Arg(), Arg()], dps=400) def test_shi(self): def shi(x): return sc.shichi(x)[0] assert_mpmath_equal(shi, mpmath.shi, [Arg()]) # check asymptotic series cross-over assert_mpmath_equal(shi, mpmath.shi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])]) def test_si(self): def si(x): return sc.sici(x)[0] assert_mpmath_equal(si, mpmath.si, [Arg()]) def test_spherharm(self): def spherharm(l, m, theta, phi): if m > l: return np.nan return sc.sph_harm(m, l, phi, theta) assert_mpmath_equal(spherharm, mpmath.spherharm, [IntArg(0, 100), IntArg(0, 100), Arg(a=0, b=pi), Arg(a=0, b=2*pi)], atol=1e-8, n=6000, dps=150) def test_struveh(self): assert_mpmath_equal(sc.struve, _exception_to_nan(mpmath.struveh), [Arg(-1e4, 1e4), Arg(0, 1e4)], rtol=5e-10) def test_struvel(self): def mp_struvel(v, z): if v < 0 and z < -v and abs(v) > 1000: # larger DPS needed for correct results old_dps = mpmath.mp.dps try: mpmath.mp.dps = 300 return mpmath.struvel(v, z) finally: mpmath.mp.dps = old_dps return mpmath.struvel(v, z) assert_mpmath_equal(sc.modstruve, _exception_to_nan(mp_struvel), [Arg(-1e4, 1e4), Arg(0, 1e4)], rtol=5e-10, ignore_inf_sign=True) def test_zeta(self): assert_mpmath_equal(sc.zeta, _exception_to_nan(mpmath.zeta), [Arg(a=1, b=1e10, inclusive_a=False), Arg(a=0, inclusive_a=False)]) def test_boxcox(self): def mp_boxcox(x, lmbda): x = mpmath.mp.mpf(x) lmbda = mpmath.mp.mpf(lmbda) if lmbda == 0: return mpmath.mp.log(x) else: return mpmath.mp.powm1(x, lmbda) / lmbda assert_mpmath_equal(sc.boxcox, _exception_to_nan(mp_boxcox), [Arg(a=0, inclusive_a=False), Arg()], n=200, dps=60, rtol=1e-13) def test_boxcox1p(self): def mp_boxcox1p(x, lmbda): x = mpmath.mp.mpf(x) lmbda = mpmath.mp.mpf(lmbda) one = mpmath.mp.mpf(1) if lmbda == 0: return mpmath.mp.log(one + x) else: return mpmath.mp.powm1(one + x, lmbda) / lmbda assert_mpmath_equal(sc.boxcox1p, _exception_to_nan(mp_boxcox1p), [Arg(a=-1, inclusive_a=False), Arg()], n=200, dps=60, rtol=1e-13) if __name__ == "__main__": run_module_suite()
pepitogithub/PythonScripts
refs/heads/master
Fractaloides.py
1
import os import time import pygame import Aplicacion from fractions import gcd from datetime import datetime from paletaColores import Paleta from Menu import * from Variable import * from validador import * from Funcionesfractales import * #------------------------------------------------ #--------------- TODO --------------------------- #------------------------------------------------ # 1) EJECUCION EN PARARELO # 2) IMPLEMENTAR LOGGING # 3) MOSTRAR TIEMPOS DE EJECUCION: # 3.1) CADA CIERTA CANTIDAD DE TIEMPO MOSTRAR EL PROGRESO. # 3.2) CADA UN 25% LOGUEAR PROGRESO LOGRADO. # 4) TESTER DE VEOLICDAD # 4.1) CREAR UNA FUNCION QUE DEVUELVA SIEMPRE MAXIMAS ITERACIONES- PARA UN SET FIJO DE VARIABLES TOMAR TIEMPO. # 4.2) IDEM 4.1) PERO CON FUNCION QUE DEVUELVA MINIMAS ITERACIONES. # 5) SCHEDULER - PARA CREAR LISTAS DE EJECUCION. #------------------------------------------------ #------------------------------------------------ #------------------------------------------------ def graficarPorcion(self,funcion,desdex,hastax,alto,planoComplejo,grilla,params): pixelArrayTemporal = [] for x in range(desdex,hastax): pixelArrayTemporal.append([]) for y in range(0,alto): # valor = funcion.calcular(planoComplejo[x][y],params) # pixelArrayTemporal[x-desdex].append(grilla[valor-1]) print "asd" return pixelArrayTemporal class Fractales(Aplicacion.Aplicacion): """ FILLME """ def iniciar(self,**args): # self.listaFunciones= Funcion.listado # Diccionario con el listado de funciones. self.listaFunciones = Funciones() # Acciones scheduleables. self.scheduleables =[ ["graficar",self.graficar], ["foto",self.foto], ["sesion",self.sesion], ["ascii",self.ascii], ["modificarX",self.modifEjeX], ["modificarY",self.modifEjeY], ["modificarZoom",self.modifZoom], ["modificarParametro",self.modifParametro], ["modificarNorma",self.modifNorma], ["modificarExponente",self.modifExponente], ["modificarColor",self.modifListaColor], ["modificarFuncion",self.modifFuncion], ["volver"] ] # variables de programa self.xmin = 0.0 # minimo valor del plano complejo, en el eje x self.xmax = 0.0 # maximo valor del plano complejo, en el eje x self.ymin = 0j # minimo valor del plano complejo, en el eje y self.ymax = 0j # maximo valor del plano complejo, en el eje y self.planoComplejo = [] # matriz donde se guardan todos los puntos del plano complejo. self.sesionPasosMaximos = 256 # variable para definir el maximo nivel de iteraciones en la funcion sesion. self.formatos = [".jpg",".bmp",".png"] # Formatos disponibles para imagenes. self.elapsedTime = 0.0 #variables de usuario self.vars["funcion"] = Variable(self.listaFunciones.obtenerFuncion(args["funcion"]),self.modifFuncion,orden=0) self.vars["parametro"] = Variable(args["parametro"],self.modifGenerico,flags={"iterable":True},orden=1) self.vars["norma"] = Variable(args["norma"],self.modifGenerico,minimo=0.0,flags={"iterable":True},orden=2) self.vars["exponente"] = Variable(args["exponente"],self.modifGenerico,flags={"iterable":True},orden=3) self.vars["resolucion"] = Variable(args["resolucion"],self.modifResolucion,minimo=2,flags={"iterable":True},orden=4) self.vars["listaColores"] = Variable(args["listaColores"],self.modifListaColor,orden=5) self.vars["zoom"] = Variable(args["zoom"],self.modifZoom,minimo=0,flags={"iterable":True},orden=6) self.vars["deltax"] = Variable(args["deltax"],self.modifEjeX,flags={"iterable":True},orden=7) self.vars["deltay"] = Variable(args["deltay"],self.modifEjeY,flags={"iterable":True},orden=8) self.vars["extension"] = Variable(args["extension"],self.modifValoresPosibles,valoresPosibles=self.formatos,orden=9) self.vars["asciiFile"] = Variable("asciiOut.txt",self.modifGenerico,orden=10) self.vars["outFile"] = Variable(self.appNombre + "Out.txt",self.modifPath,orden=11) #Paleta de colores self.paleta = Paleta(self.vars["listaColores"].valor,self.vars["resolucion"].valor) #Paleta de colores para manejar el pintado de las funciones. #Items del Menu self.agregarMenu(0,Leaf("graficar","genera la salida en la pantalla",self.graficar)) self.agregarMenu(1,Leaf("toAscii","",self.ascii)) self.agregarMenu(2,Leaf("Foto","Foto Tomada",self.foto)) self.agregarMenu(3,Leaf("Sesion","Secuencia de fotos iterando algunas variables",self.sesion)) self.agregarMenu(4,Nodo("Scheduler","Ejecucion por lotes",Leaf("Ejecutar Scheduler","",self.ejecutarScheduler),Leaf("Cargar Scheduler","",self.cargarScheduler))) self.agregarMenu(5,Leaf("tiempos de ejecucion","Mediciones de tiempo de la ultima ejecucion",self.tiempos)) #Funciones que se ejecutan luego de llamar a Modificar. self.agregarPostFunciones(self.calcularBounds,self.graficar,self.foto) self.actualizarTamanoPantalla() self.calcularBounds() self.graficar() def calcularBounds(self): # Este es el mapeo de pixeles al plano complejo. # por ej: X/[0,199] -> [-4,4] # Y/[0,149] -> [-3i,3i] # tambien incluye los corrimientos y el zoom. # xmin = (-(ancho/mcd) * zoom) + deltax # xmax = ((ancho/mcd) * zoom) + deltax # ymin = ((alto/mcd) * zoom) + deltay # ymax = ((-alto/mcd) * zoom) + deltay mcd = gcd(self.ancho,self.alto) self.xmin = (-(self.ancho/mcd))*self.vars["zoom"].valor + self.vars["deltax"].valor self.xmax = (self.ancho/mcd)*self.vars["zoom"].valor + self.vars["deltax"].valor self.ymin = (-(self.alto/mcd))*self.vars["zoom"].valor + self.vars["deltay"].valor self.ymax = (self.alto/mcd)*self.vars["zoom"].valor + self.vars["deltay"].valor deltax = abs(self.xmin - self.xmax) / (float(self.ancho) - 1) deltay = abs(self.ymin - self.ymax) / (float(self.alto) - 1) self.planoComplejo = [] for x in range(0,self.ancho): self.planoComplejo.append([]) for y in range(0,self.alto): self.planoComplejo[x].append(self.convertirPC(x,y,deltax,deltay)) def cargarScheduler(self): opciones = [schedule[0] for schedule in self.scheduleables] self.enumerarLista(opciones) eleccion = validador.seleccionar(opciones) if(eleccion != "volver"): print "schedule!" def ejecutarScheduler(self): print "schedule!" def tiempos(self,*tiempoTotal): if (len(tiempoTotal) == 0): tiempoTotal = [self.elapsedTime] print "-----------------------------------------------------------------" print "Tiempo Total: " + str(tiempoTotal[0])[:12] print "Funcion: " + str(self.vars["funcion"].valor) print "dimensiones: " + str(self.vars["ratio"].valor[0]*self.vars["factorRatio"].valor) + "x" + str(self.vars["ratio"].valor[1]*self.vars["factorRatio"].valor) print "resolucion: " + str(self.vars["resolucion"].valor) print "-----------------------------------------------------------------" self.log("-----------------------------------------------------------------") self.log("Tiempo Total: " + str(tiempoTotal[0])[:12]) self.log("Funcion: " + str(self.vars["funcion"].valor)) self.log("dimensiones: " + str(self.vars["ratio"].valor[0]*self.vars["factorRatio"].valor) + "x" + str(self.vars["ratio"].valor[1]*self.vars["factorRatio"].valor)) self.log("resolucion: " + str(self.vars["resolucion"].valor)) self.log("-----------------------------------------------------------------") def convertirPC(self,x,y,deltax,deltay): # convertir de pixel a complejo # def convertirPC(self,x,y):# convertir de pixel a complejo # equis = self.xmin + (x * (abs(self.xmin - self.xmax) / (float(self.ancho) - 1))) # ygrie = self.ymin + (y * (abs(self.ymin - self.ymax) / (float(self.alto) - 1))) equis = self.xmin + (x * deltax) ygrie = self.ymin + (y * deltay) return complex(equis,ygrie) def graficar(self): # primero que todo se obtienen los valores concretos para los parametros de la funcion a ejecutar # luego para cada pixel de la pantalla: # primero se convierte el pixel con coordenadas [x,y] a su valor en el plano complejo. # despues se ejecuta la funcion seteada para ese punto y los parametros seteados(obtenidos en el primer paso). # el color se obtiene desde la grilla de la paleta de colores. # por ultimo el pixel se acutaliza con el nuevo color. #recojo los parametros para pasarle a la funcion params = [] for key in self.vars["funcion"].valor.parametros: params.append(self.vars[key].valor) #medicion de tiempo startTime = time.time() cuarto = self.ancho/4 #Esto es para hacer el pasaje de pixel a complejo, esta aca para que no se hagan tantas cuentas en el loop. deltax = abs(self.xmin - self.xmax) / (float(self.ancho) - 1) deltay = abs(self.ymin - self.ymax) / (float(self.alto) - 1) # version 1 #loop Principal del metodo, barre todo el plano calculando las intensidades # for x in range(0,self.ancho): # for y in range(0,self.alto): # complejo = self.convertirPC(x,y,deltax,deltay) # # complejo = self.convertirPC(x,y) # valor = self.vars["funcion"].valor.calcular(complejo,params) # self.pixelArray[x,y] = self.paleta.grilla[valor-1] #en grilla tengo el valor de color para la intensidad=valor-1 # version 2 # [[self.cargarPixelArray(x,y,self.paleta.grilla[self.vars["funcion"].valor.calcular(self.convertirPC(x,y,deltax,deltay),params) - 1]) for y in rangoy] for x in rangox] # version 3 # rangox = range(0,self.ancho) # rangoy = range(0,self.alto) # [[self.cargarPixelArray(x,y,self.paleta.grilla[self.vars["funcion"].valor.calcular(self.planoComplejo[x][y],params) - 1]) for y in rangoy] for x in rangox] # version 3 con fors por ahora esta es la mas rapida. for x in range(0,self.ancho): for y in range(0,self.alto): valor = self.vars["funcion"].valor.calcular(self.planoComplejo[x][y],params) self.pixelArray[x,y] = self.paleta.grilla[valor-1] #en grilla tengo el valor de color para la intensidad=valor-1 #medicion de tiempo endTime = time.time() self.elapsedTime = endTime-startTime self.actualizarPantalla() #self.foto() self.tiempos(self.elapsedTime) def cargarPixelArray(self,x,y,color): self.pixelArray[x,y] = color def ascii(self): #setear resolucion a 16 aux = self.vars["resolucion"].valor self.modifResolucion("resolucion",16) caracteres = [".",".",".",".",".",".",".",".",".",".",".",".",".",".",".","#"] archivo = open(self.vars["filesPath"].valor + "\\" + self.vars["outFile"].valor,"w") variables = str(self.vars) timestamp = str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) archivo.write(variables + "\n"+ timestamp + "\n") self.log(variables) params = [] for key in self.vars["funcion"].valor.parametros: params.append(self.vars[key].valor) # for x in range(0,self.ancho): # for y in range(0,self.alto): # complejo = self.convertirPC(y,x) # valor = self.vars["funcion"].valor.calcular(complejo,params) # archivo.write(caracteres[valor-1]) # archivo.write("\n") for x in range(0,self.ancho): for y in range(0,self.alto): valor = self.vars["funcion"].valor.calcular(self.planoComplejo[y][x],params) archivo.write(caracteres[valor-1]) archivo.write("\n") self.modifResolucion("resolucion",aux) archivo.close() def foto(self): panta = pantall = self.screen.copy() params = str(self.vars["funcion"].valor) + "-p" + str(self.vars["parametro"].valor) +"-e" + str(self.vars["exponente"].valor) +"-n" + str(self.vars["norma"].valor) +"-dx" + str(self.vars["deltax"].valor) +"-dy" + str(self.vars["deltay"].valor) +"-z" + str(self.vars["zoom"].valor) +"-r" + str(self.vars["resolucion"].valor) nombre = self.generarNombreValido(self.vars["filesPath"].valor + "\\"+ params + self.vars["extension"].valor) pygame.image.save(panta, nombre) self.log("foto Tomada",nombre,str(self.vars)) def sesion(self): print "Sesion de fotos" volver = False seguir = True primera = True listado = [] # en listado me voy a guardar tuplas de 3, donde cada una tiene la key de la variable a iterar, valor incial y el salto que se da. # listado = [[norma,2,1],[parametro,3j,1+1j]] pasos = 0 # lista por comprension de variables iterables # Ordena la lista de variables por su flag de orden, y se queda con las que tengan flag iterable y sea true. disponibles = [variable[0] for variable in sorted(self.vars.items(),key=lambda x: x[1].orden) if "iterable" in variable[1].flags.keys() and variable[1].flags["iterable"]] while (seguir): print "variable a iterar?" self.enumerarLista(disponibles + ["Volver"]) variable = validador.seleccionar(disponibles + ["Volver"]) if(variable == "Volver"): seguir = False volver = True else: print "valor actual:" print str(self.vars[variable].valor) + "\n" print "desde:" # desde = validador.ingresar(type(self.vars[variable].valor),validador.entre,self.vars[variable].minimo,self.vars[variable].maximo) desde = validador.ingresarVariable(self.vars[variable]) print "hasta:" # hasta = validador.ingresar(type(self.vars[variable].valor),validador.entre,self.vars[variable].minimo,self.vars[variable].maximo) hasta = validador.ingresarVariable(self.vars[variable]) if(primera): print "en cuantos pasos:" pasos = validador.ingresar(int,validador.entre,2,self.sesionPasosMaximos) primera = False salto = (hasta - desde) / (pasos - 1) listado.append([variable, desde, salto]) disponibles.remove(variable) if (len(disponibles) == 0): seguir = False else: print "otra variable?" seguir = validador.ingresarSINO() if(not volver): # Genero el nombre de la Carpeta nombreCarpeta = self.vars["filesPath"].valor + "\\Sesion-" + str(self.vars["funcion"].valor) for item in listado: # item[1] + item[2] * (pasos-1) = HASTA! nombreCarpeta += "_" + str(item[0]) + str(item[1]) + "-" + str(item[1] + item[2] * (pasos-1)) nombreCarpeta = self.generarNombreValido(nombreCarpeta) if not os.path.isdir(nombreCarpeta): os.mkdir(nombreCarpeta) os.chdir(nombreCarpeta) #medicion de tiempo startTime = time.time() #Loop de la sesion for i in range(0,pasos): for key in listado: var = key[0] desdeaux = key[1] saltoaux = key[2] self.vars[var].modificador(var,desdeaux + (saltoaux * i)) # self.vars[var].valor = desdeaux + (saltoaux * i) self.graficar() self.foto() print str(i+1) + "/" + str(pasos) #medicion de tiempo endTime = time.time() self.log("-----------------------------------------------------------------") self.log("--- Sesion ------------------------------------------------------") print "-----------------------------------------------------------------" print "--- Sesion ------------------------------------------------------" self.tiempos(endTime - startTime) os.chdir(self.vars["filesPath"].valor) def modifNorma(self,key,*params): if(len(params) == 0): self.vars["norma"] = validador.ingresarVariable(self.vars["norma"]) def modifExponente(self,key,*params): if(len(params) == 0): self.vars["exponente"] = validador.ingresarVariable(self.vars["exponente"]) def modifParametro(self,key,*params): if(len(params) == 0): self.vars["parametro"] = validador.ingresarVariable(self.vars["parametro"]) def modifZoom(self,key,*params): if(len(params) == 0): self.vars["zoom"].valor = validador.ingresarVariable(self.vars["zoom"]) else: self.vars["zoom"].valor = float(params[0]) self.calcularBounds() def modifEjeX(self,key,*params): if(len(params) == 0): print "valores positivos dezplazan la imagen hacia izquierda y valores negativos hacia la derecha" print "corrimiento en x" self.vars["deltax"].valor = validador.ingresarVariable(self.vars["deltax"])# obtengo el delta en x else: self.vars["deltax"].valor = float(params[0]) self.calcularBounds() def modifEjeY(self,key,*params): if(len(params) == 0): print "valores positivos dezplazan la imagen hacia arriba y valores negativos hacia la abajo" print "corrimiento en y" self.vars["deltay"].valor = validador.ingresarVariable(self.vars["deltay"])# obtengo el delta en y else: self.vars["deltay"].valor = float(params[0]) self.calcularBounds() def modifColor(self,key,*params): if(len(params) == 0): self.vars[key].valor = self.pickColor() else: self.vars[key].valor = params[0] def pickColor(self): print "rojo:" r= validador.ingresar(int,validador.entre,0,255) print "verde:" g = validador.ingresar(int,validador.entre,0,255) print "azul:" b = validador.ingresar(int,validador.entre,0,255) return [r,g,b] def modifListaColor(self,key,*params): #Lista Colores if(len(params) == 0): self.vars["listaColores"].valor = [] print "-- Lista de colores--" print "Como armar la lista de colores:" print "En este modo se van a setear una cantidad de colores en puntos de la paleta, todos los puntos intermedios son interpolados linealmente." print "Para ello se eligen de a dos colores RGB y hasta donde abarca ese tramo." print "si la paleta no es llenada hasta el final, el ultimo color ingresado sera el que complete la misma, de igual modo si por error se ingresa un valor hasta mas grande que el que permite la paleta, este se trunca." print "resolucion:", self.vars["resolucion"].valor seguir = True previo = 0 while(seguir): print "Color Desde:" colord = self.pickColor() print "Color Hasta:" colorh = self.pickColor() print "Hasta:" hasta = validador.ingresar(int,validador.entre,previo,self.vars["resolucion"].valor) print "otro tramo?" resp = validador.ingresarSINO() if (resp): # SI quiero otro color if(hasta == self.vars["resolucion"].valor): seguir = False else: # NO quiero otro color if(hasta < self.vars["resolucion"].valor): hasta = self.vars["resolucion"].valor seguir = False color = [colord,colorh,hasta] self.vars["listaColores"].valor.append(color) # print self.vars["listaColores"].valor # self.paleta.setear(self.vars["listaColores"].valor) else: self.vars["listaColores"].valor = params[0] print self.vars["listaColores"].valor self.paleta.setear(self.vars["listaColores"].valor) def modifResolucion(self,key,*params): if(len(params) == 0): self.vars["resolucion"].valor = validador.ingresarVariable(self.vars["resolucion"]) else: self.vars["resolucion"].valor = int(params[0]) self.vars["listaColores"].valor = self.paleta.ajustarResolucion(self.vars["listaColores"].valor, self.vars["resolucion"].valor) def modifTamano(self,key,*params): #ajustar el tamano pantalla, hay que ingresar la relacion de aspecto. # por ejemplo, 4:3 factorRatio = 2 nos da una pantalla de 8 x 6 pixeles if(len(params) == 0): print "---relacion de aspecto de la pantalla---" print "se debe ingresar cuantos pixeles de ancho por cuantos de alto por ej 4:3, 16:9" print "Luego el factor multiplica ambos terminos (al 4 y al 3 por ej) para dar la resolucion final de la pantalla" print "ancho:" ancho = validador.ingresar(int,validador.mayor,0) print "alto:" alto = validador.ingresar(int,validador.mayor,0) else: ancho = int(params[0]) alto = int(params[1]) mcd = gcd(ancho,alto) self.vars["ratio"].valor[0] = ancho/mcd self.vars["ratio"].valor[1] = alto/mcd if(len(params) == 0): print "factor:" self.vars["factorRatio"].valor = validador.ingresarVariable(self.vars["factorRatio"]) else: self.vars["factorRatio"].valor = int(params[2]) def modifFuncion(self,key,*params): if(len(params) == 0): self.enumerarLista(self.listaFunciones.nombres+["volver"]) funcion = validador.seleccionar(self.listaFunciones.nombres + ["volver"]) if(funcion !="volver"): self.vars["funcion"].valor = self.listaFunciones.obtenerFuncion(funcion) else: self.vars["funcion"].valor = self.listaFunciones.obtenerFuncion(params[0]) def ayuda(self): self.espaciador() print "PROXIMAMENTE MANUAL DE INSTRUCCIONES E INTRODCCION A GEOMETRIA" print "FRACTAL." self.espaciador() def salirPrint(self): print "--- \\m/ ---" if __name__ == '__main__': a = Fractales("Fractaloides","2.0.0",True, ratio=[1,1], factorRatio=100, zoom=1.0, deltax=0.0, deltay=0.0, resolucion=15, norma=2.0, exponente=2.0+0j, parametro=0j, funcion="mandelbrot", colorFondo=[0,0,0], listaColores=[[[0,0,0],[0,0,255],15]], extension=".png") a.menuPrincipal() # Constantes divertidas: # --- (julia) (1+0j) # --- (julia) (0+1j) # --- (julia) (0.5+0.5j) # --- (julia) (-1.3+0.00525j) # --- (julia) (-0.72-0.196j) # --- (julia) (-0.8-0.2j) !!! # --- (julia) (0.4+0.3j) # --- (julia) (0.7+0.5j) # --- (juli4) (-0.8-0.2j) # --- (julia5) (0.718+0.5j) !!! # --- (julia5) (0.71898+0.5j) !!!
Dhivyap/ansible
refs/heads/devel
lib/ansible/module_utils/facts/system/fips.py
232
# Determine if a system is in 'fips' mode # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.module_utils.facts.utils import get_file_content from ansible.module_utils.facts.collector import BaseFactCollector class FipsFactCollector(BaseFactCollector): name = 'fips' _fact_ids = set() def collect(self, module=None, collected_facts=None): # NOTE: this is populated even if it is not set fips_facts = {} fips_facts['fips'] = False data = get_file_content('/proc/sys/crypto/fips_enabled') if data and data == '1': fips_facts['fips'] = True return fips_facts
JesusZapata/asdcust
refs/heads/10.0
asdcust/tests/__init__.py
1
# coding: utf-8 # Copyright 2016 Vauxoo (https://www.vauxoo.com) <info@vauxoo.com> # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
OpenEdgeComputing/elijah-openstack
refs/heads/master
dashboard/instances/panel.py
21
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.utils.translation import ugettext_lazy as _ import horizon from openstack_dashboard.dashboards.project import dashboard class Instances(horizon.Panel): name = _("Instances") slug = 'instances' dashboard.Project.register(Instances)
umbra2/Calc
refs/heads/master
calc_0.1.3.3.py
1
""" <one line to give the program's name and a brief idea of what it does.> Copyright (C) 2015 Sadovnikov Dmitriy Pavlovich mail: umbra2@mail.ru This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ # -*- coding: cp1251 -*- import time """Описание функций""" def ver(): print 'ver 0.1.3.3' def lang(): #функция выбора языка """Английский массив""" eng_text = ['Enter languge(Russian - 1, English - 2)', 'Enter X and Y', 'Enter menu number', 'To continue press 2, to EXIT press 1', 'Some thing is wrong... try agane and press 2, or press 1 to EXIT', 'Answare = ', ' change the languge press 3', 'Press ENTER to exit', 'ver 0.1.4'] """Русский массив""" rus_text = ["Выбите язык(Русский - 1, Английский - 2)", "Введите X и Y", "Выберите пункт меню", "Для продолжения нажмите 2, для ВЫХОДА нажмите 1", "Что-то случилось... для продолжения нажмите 2, или 1 для ВЫХОДА", "Ответ = ", "для смены языка нажмите 3"] def choose_lang(int_lang, eng_text, rus_text): int_lang = input() if int_lang == 1: text = rus_text elif int_lang == 2: text = eng_text else: print 'error/ошибка' choose_lang(int_lang, eng_text, rus_text) return text int_lang = 0 print eng_text[0] print rus_text[0] text = choose_lang(int_lang, eng_text, rus_text) return text def menu(text): #функция, показывающая меню калькулятора print text[2] print '1)x+y' print '2)x-y' print '3)x*y' print '4)x/y' def calc(x,y,text): #функция, выполняющая вычисления, в соответствии с выбранным пользователем пунктом oper = int(input()) if (oper == 1): ans = x + y elif oper == 2: ans = x-y elif oper == 3: ans = x*y elif oper == 4: ans = x/y else: error(text) print text[5], ans def enter_num(text): #функция ввода X и Y print text[1] x = float(input()) y = float(input()) if y == 0 or x >= 4294967296 or y >= 4294967296: error(text) enter_num(text) else: prog_calc(x,y,text) def error(text): #функция вывода ошибки print text[4], text[6] err = input() if err == 2: enter_num(text) elif err == 3: prog() def exit(text): #функция выхода print text[3], text[6] exi = input() if exi == 2: enter_num(text) elif exi == 3: prog() elif exi == 1: pass def prog_calc(x,y,text): #функция калькулятора menu(text) calc(x,y,text) exit(text) def prog(): #тело программы text = lang() enter_num(text) """Конец описания функций""" ver() prog()
MartinThoma/PyMySQL
refs/heads/master
pymysql/tests/test_optionfile.py
31
from pymysql.optionfile import Parser from unittest import TestCase from pymysql._compat import PY2 try: from cStringIO import StringIO except ImportError: from io import StringIO __all__ = ['TestParser'] _cfg_file = (r""" [default] string = foo quoted = "bar" single_quoted = 'foobar' """) class TestParser(TestCase): def test_string(self): parser = Parser() if PY2: parser.readfp(StringIO(_cfg_file)) else: parser.read_file(StringIO(_cfg_file)) self.assertEqual(parser.get("default", "string"), "foo") self.assertEqual(parser.get("default", "quoted"), "bar") self.assertEqual(parser.get("default", "single_quoted"), "foobar")
collective/eden
refs/heads/master
private/templates/CRMT/controllers.py
2
# -*- coding: utf-8 -*- from urllib import urlencode try: import json # try stdlib (Python 2.6) except ImportError: try: import simplejson as json # try external module except: import gluon.contrib.simplejson as json # fallback to pure-Python module from gluon import current, URL from gluon.html import * #from gluon.storage import Storage from s3 import ICON, S3FilterForm, S3FilterString, S3OptionsFilter, FS, S3URLQuery, S3Summary, s3_auth_user_represent_name, S3CustomController THEME = "CRMT" # ============================================================================= class index(S3CustomController): """ Custom Home Page """ def __call__(self): T = current.T db = current.db s3db = current.s3db request = current.request response = current.response s3 = response.s3 output = {} output["title"] = response.title = current.deployment_settings.get_system_name() # Map auth = current.auth is_logged_in = auth.is_logged_in() callback = None if is_logged_in: # Show the User's Coalition's Polygon org_group_id = auth.user.org_group_id if org_group_id: # Lookup Coalition Name table = s3db.org_group row = db(table.id == org_group_id).select(table.name, limitby=(0, 1) ).first() if row: callback = '''S3.gis.show_map(); var layer,layers=S3.gis.maps.default_map.layers; for(var i=0,len=layers.length;i<len;i++){ layer=layers[i]; if(layer.name=='%s'){layer.setVisibility(true)}}''' % row.name if not callback: # Show all Coalition Polygons callback = '''S3.gis.show_map(); var layer,layers=S3.gis.maps.default_map.layers; for(var i=0,len=layers.length;i<len;i++){ layer=layers[i]; if(layer.name=='All Coalitions'){layer.setVisibility(true)}} ''' gis = current.gis config = gis.get_config() config.zoom = 8 map = gis.show_map(width=770, height=295, callback=callback, catalogue_layers=True, collapsed=True, save=False, ) output["map"] = map # Description of available data from s3db.cms import S3CMS for item in response.menu: item["cms"] = S3CMS.resource_content(module = item["c"], resource = item["f"]) # Site Activity Log resource = s3db.resource("s3_audit") resource.add_filter(FS("~.method") != "delete") orderby = "s3_audit.timestmp desc" list_fields = ["id", "method", "timestmp", "user_id", "tablename", "record_id", ] #current.deployment_settings.ui.customise_s3_audit() db.s3_audit.user_id.represent = s3_auth_user_represent_name list_id = "log" datalist, numrows, ids = resource.datalist(fields=list_fields, start=None, limit=4, list_id=list_id, orderby=orderby, layout=s3.render_log) # Placeholder filter_form = DIV(_class="filter_form") if numrows == 0: # Empty table or just no match? from s3 import S3CRUD table = resource.table if "deleted" in table: available_records = db(table.deleted != True) else: available_records = db(table._id > 0) if available_records.select(table._id, limitby=(0, 1)).first(): msg = DIV(S3CRUD.crud_string(resource.tablename, "msg_no_match"), _class="empty") else: msg = DIV(S3CRUD.crud_string(resource.tablename, "msg_list_empty"), _class="empty") data = msg else: # Render the list ajaxurl = URL(c="default", f="audit", args="datalist_f.dl") popup_url = URL(c="default", f="audit", args="datalist.popup") dl = datalist.html(ajaxurl=ajaxurl, pagesize=4, popup_url=popup_url, popup_title=T("Updates"), ) data = dl if is_logged_in and org_group_id: # Add a Filter filter_widgets = [S3OptionsFilter("user_id$org_group_id", label = "", # Can't just use "" as this is then omitted from rendering options = {"*": T("All"), org_group_id: T("My Community"), }, cols = 2, multiple = False, ), ] filter_submit_url = URL(c="default", f="index") filter_ajax_url = URL(c="default", f="audit", args=["filter.options"]) filter_form = S3FilterForm(filter_widgets, filter_manager = False, formstyle = filter_formstyle, clear = False, submit = True, ajax = True, url = filter_submit_url, ajaxurl = filter_ajax_url, _class = "filter-form", _id = "%s-filter-form" % list_id ) filter_form = filter_form.html(resource, request.get_vars, target=list_id, ) output["updates"] = data output["filter_form"] = filter_form # Add JavaScript appname = request.application debug = s3.debug scripts_append = s3.scripts.append if debug: # Infinite Scroll doesn't make sense here, but currently required by dataLists.js scripts_append("/%s/static/scripts/jquery.infinitescroll.js" % appname) scripts_append("/%s/static/scripts/jquery.viewport.js" % appname) scripts_append("/%s/static/scripts/S3/s3.dataLists.js" % appname) else: scripts_append("/%s/static/scripts/S3/s3.dataLists.min.js" % appname) self._view(THEME, "index.html") return output # ============================================================================= def filter_formstyle(row_id, label, widget, comment, hidden=False): """ Custom Formstyle for FilterForm @param row_id: HTML id for the row @param label: the label @param widget: the form widget @param comment: the comment @param hidden: whether the row should initially be hidden or not """ if hidden: _class = "advanced hide" else: _class= "" if label: return DIV(label, widget, _id=row_id, _class=_class) else: return DIV(widget, _id=row_id, _class=_class) # ============================================================================= class filters(S3CustomController): """ Custom controller to manage saved filters """ def __call__(self): """ Main entry point """ # Authorization (user must be logged in) auth = current.auth permissions = auth.permission if not auth.user: permissions.fail() fmt = permissions.format if current.request.env.request_method == "POST" and fmt != "dl": return self.update() pe_id = auth.user.pe_id s3 = current.response.s3 # Filter f = FS("pe_id") == pe_id s3.filter = f # List Fields current.s3db.configure("pr_filter", list_fields = ["title", "resource", "url", "query"], list_layout = self.render_filter, orderby = "resource") # Page length s3.dl_pagelength = 10 # Data list current.request.args = ["datalist.%s" % fmt] output = current.rest_controller("pr", "filter", list_ajaxurl = URL(f="index", args="filters.dl")) # Title and view T = current.T if fmt != "dl": output["title"] = T("Saved Filters") self._view(THEME, "filters.html") # Script for inline-editing of filter title options = {"cssclass": "jeditable-input", "tooltip": str(T("Click to edit"))} script = '''$('.jeditable').editable('%s',%s)''' % \ (URL(args="filters"), json.dumps(options)) s3.jquery_ready.append(script) return output # ------------------------------------------------------------------------- @classmethod def render_filter(cls, list_id, item_id, resource, rfields, record): """ Custom dataList item renderer for 'Saved Filters' @param list_id: the HTML ID of the list @param item_id: the HTML ID of the item @param resource: the S3Resource to render @param rfields: the S3ResourceFields to render @param record: the record as dict """ record_id = record["pr_filter.id"] item_class = "thumbnail" raw = record._row resource_name = raw["pr_filter.resource"] resource = current.s3db.resource(resource_name) T = current.T # Resource title crud_strings = current.response.s3.crud_strings.get(resource.tablename) if crud_strings: resource_name = crud_strings.title_list else: resource_name = string.capwords(resource.name, "_") # Filter title title = record["pr_filter.title"] # Filter Query and Summary URLs fstring = S3FilterString(resource, raw["pr_filter.query"]) query = fstring.represent() links = cls.summary_urls(resource, raw["pr_filter.url"], fstring.get_vars) actions = [] if links: if "map" in links: actions.append(A(ICON("globe"), _title=T("Open Map"), _href=links["map"])) if "table" in links: actions.append(A(ICON("list"), _title=T("Open Table"), _href=links["table"])) if "chart" in links: actions.append(A(ICON("list"), _title=T("Open Chart"), _href=links["chart"])) # Render the item item = DIV(DIV(DIV(actions, _class="action-bar fleft"), SPAN(T("%(resource)s Filter") % \ dict(resource=resource_name), _class="card-title"), DIV(A(ICON("delete"), _title=T("Delete this Filter"), _class="dl-item-delete"), _class="edit-bar fright"), _class="card-header"), DIV(DIV(H5(title, _id="filter-title-%s" % record_id, _class="media-heading jeditable"), DIV(query), _class="media-body"), _class="media"), _class=item_class, _id=item_id) return item # ------------------------------------------------------------------------- def update(self): """ Simple ajax method to update a saved filter title """ post_vars = current.request.post_vars record_id = post_vars["id"].rsplit("-", 1)[-1] new_title = post_vars["value"] if new_title: ftable = current.s3db.pr_filter success = current.db(ftable.id==record_id) \ .update(title=new_title) else: success = False if success: return new_title else: raise HTTP(400) # ------------------------------------------------------------------------- @staticmethod def summary_urls(resource, url, filters): """ Construct the summary tabs URLs for a saved filter. @param resource: the S3Resource @param url: the filter page URL @param filters: the filter GET vars """ links = {} if not url: return links get_vars = S3URLQuery.parse_url(url) get_vars.pop("t", None) get_vars.pop("w", None) get_vars.update(filters) list_vars = [] for (k, v) in get_vars.items(): if v is None: continue values = v if type(v) is list else [v] for value in values: if value is not None: list_vars.append((k, value)) base_url = url.split("?", 1)[0] summary_config = S3Summary._get_config(resource) tab_idx = 0 for section in summary_config: if section.get("common"): continue section_id = section["name"] tab_vars = list_vars + [("t", str(tab_idx))] links[section["name"]] = "%s?%s" % (base_url, urlencode(tab_vars)) tab_idx += 1 return links # END =========================================================================
RobinQuetin/CAIRIS-web
refs/heads/develop
cairis/cairis/DimensionListBox.py
1
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import wx import armid import ARM class DimensionListBox(wx.ListBox): def __init__(self,parent,winId,boxSize,dimensionTable,dp): wx.ListBox.__init__(self,parent,winId,size=boxSize) self.dbProxy = dp self.theDimensionTable = dimensionTable self.theDimMenu = wx.Menu() self.theDimMenu.Append(armid.DIMLIST_MENUADD_ID,'Add') self.theDimMenu.Append(armid.DIMLIST_MENUDELETE_ID,'Delete') self.Bind(wx.EVT_RIGHT_DOWN,self.OnRightDown) wx.EVT_MENU(self.theDimMenu,armid.DIMLIST_MENUADD_ID,self.onAddDimension) wx.EVT_MENU(self.theDimMenu,armid.DIMLIST_MENUDELETE_ID,self.onDeleteDimension) def OnRightDown(self,evt): self.PopupMenu(self.theDimMenu) def onAddDimension(self,evt): dimensions = self.dbProxy.getDimensionNames(self.theDimensionTable) from DimensionNameDialog import DimensionNameDialog dlg = DimensionNameDialog(self,self.theDimensionTable,dimensions,'Add') if (dlg.ShowModal() == armid.DIMNAME_BUTTONACTION_ID): for additionalDimension in dlg.dimensionNames(): self.Append(additionalDimension) def onDeleteDimension(self,evt): idx = self.GetSelection() if (idx == -1): errorText = 'No ' + self.theDimensionTable + ' selected' errorLabel = 'Delete ' + self.theDimensionTable dlg = wx.MessageDialog(self,errorText,errorLabel,wx.OK) dlg.ShowModal() dlg.Destroy() else: selectedValue = self.GetSelection() self.Delete(selectedValue)
step-up-health/step-up-backend
refs/heads/master
viz.py
1
import os import json import hashlib def get_data_path(): if not 'OPENSHIFT_DATA_DIR' in os.environ: return '../data/data.json' else: return os.path.join(os.environ['OPENSHIFT_DATA_DIR'], 'data.json') def get_data(): if not os.path.isfile(get_data_path()): with open(get_data_path(), 'w') as fh: fh.write("{}") data = json.load(open(get_data_path(), 'r')) return data def weird_hash(data): hashed = hashlib.md5() hashed.update(data.encode('utf-8')) digest = hashed.hexdigest() uppercase_offset = ord('A') - ord('0') for x in range(ord('0'), ord('9') + 1): digest = digest.replace(chr(x), chr(x + uppercase_offset)) return digest out = 'graph main {\n' dot_usernames = '' dot_relations = '' data = get_data() for k in data: user = data[k] username = user['username'] dot_usernames += weird_hash(k) + '[label="' + weird_hash(k)[:5] + '"]' + '\n' if not 'friends' in user: continue for friend in user['friends']: if not (weird_hash(friend) + '--' + weird_hash(k) + '\n') in dot_relations: dot_relations += weird_hash(k) + '--' + \ weird_hash(friend) + '\n' out += dot_usernames out += dot_relations out += '}' print(out)
theatrus/statsite
refs/heads/master
integ/test_stdin.py
2
import os import os.path import socket import textwrap import shutil import subprocess import contextlib import sys import tempfile import time import random try: import pytest except ImportError: print >> sys.stderr, "Integ tests require pytests!" sys.exit(1) @pytest.fixture def servers(request): "Returns a new APIHandler with a filter manager" # Create tmpdir and delete after tmpdir = tempfile.mkdtemp() # Make the command output = "%s/output" % tmpdir cmd = "cat >> %s" % output # Write the configuration config_path = os.path.join(tmpdir, "config.cfg") conf = """[statsite] flush_interval = 1 port = 0 udp_port = 0 parse_stdin = yes [sink_stream_default] command = %s [histogram1] prefix=has_hist min=10 max=90 width=10 """ % (cmd) open(config_path, "w").write(conf) # Start the process proc = subprocess.Popen(['./statsite', '-f', config_path], stdin=subprocess.PIPE) proc.poll() assert proc.returncode is None # Define a cleanup handler def cleanup(): try: proc.kill() proc.wait() shutil.rmtree(tmpdir) except: print proc pass request.addfinalizer(cleanup) # Return the connection return proc.stdin, output def wait_file(path, timeout=15): "Waits on a file to be make" start = time.time() while not os.path.isfile(path) and time.time() - start < timeout: time.sleep(0.1) if not os.path.isfile(path): raise Exception("Timed out waiting for file %s" % path) while os.path.getsize(path) == 0 and time.time() - start < timeout: time.sleep(0.1) class TestInteg(object): def test_kv(self, servers): "Tests adding kv pairs" server, output = servers server.write("tubez:100|kv\n") wait_file(output) now = time.time() out = open(output).read() assert out in ("kv.tubez|100.000000|%d\n" % now, "kv.tubez|100.000000|%d\n" % (now - 1)) def test_gauges(self, servers): "Tests adding gauges" server, output = servers server.write("g1:1|g\n") server.write("g1:50|g\n") wait_file(output) now = time.time() out = open(output).read() assert "gauges.g1|50.000000|%d\n" % now in out assert "gauges.g1.sum|51.000000|%d\n" % now in out assert "gauges.g1.mean|25.500000|%d\n" % now in out assert "gauges.g1.min|1.000000|%d\n" % now in out assert "gauges.g1.max|50.000000|%d\n" % now in out def test_gauges_delta(self, servers): "Tests adding gauges" server, output = servers server.write("gd:+50|g\n") server.write("gd:+50|g\n") wait_file(output) now = time.time() out = open(output).read() assert "gauges.gd|100.000000|%d\n" % now in out assert "gauges.gd.sum|100.000000|%d\n" % now in out assert "gauges.gd.mean|50.000000|%d\n" % now in out assert "gauges.gd.min|50.000000|%d\n" % now in out assert "gauges.gd.max|50.000000|%d\n" % now in out def test_gauges_delta_neg(self, servers): "Tests adding gauges" server, output = servers server.write("gd:-50|g\n") server.write("gd:-50|g\n") wait_file(output) now = time.time() out = open(output).read() assert "gauges.gd|-100.000000|%d\n" % now in out assert "gauges.gd.sum|-100.000000|%d\n" % now in out assert "gauges.gd.mean|-50.000000|%d\n" % now in out assert "gauges.gd.min|-50.000000|%d\n" % now in out assert "gauges.gd.max|-50.000000|%d\n" % now in out def test_counters(self, servers): "Tests adding kv pairs" server, output = servers server.write("foobar:100|c\n") server.write("foobar:200|c\n") server.write("foobar:300|c\n") wait_file(output) now = time.time() out = open(output).read() assert out in ("counts.foobar|600.000000|%d\n" % (now), "counts.foobar|600.000000|%d\n" % (now - 1)) def test_counters_sample(self, servers): "Tests adding kv pairs" server, output = servers server.write("foobar:100|c|@0.1\n") server.write("foobar:200|c|@0.1\n") server.write("foobar:300|c|@0.1\n") wait_file(output) now = time.time() out = open(output).read() assert out in ("counts.foobar|6000.000000|%d\n" % (now), "counts.foobar|6000.000000|%d\n" % (now - 1)) def test_meters(self, servers): "Tests adding kv pairs" server, output = servers msg = "" for x in xrange(100): msg += "noobs:%d|ms\n" % x server.write(msg) wait_file(output) out = open(output).read() assert "timers.noobs.mean|49.500000" in out assert "timers.noobs.lower|0.000000" in out assert "timers.noobs.upper|99.000000" in out assert "timers.noobs.count|100" in out assert "timers.noobs.median|49.000000" in out assert "timers.noobs.p95|95.000000" in out assert "timers.noobs.p99|99.000000" in out def test_histogram(self, servers): "Tests adding keys with histograms" server, output = servers msg = "" for x in xrange(100): msg += "has_hist.test:%d|ms\n" % x server.write(msg) wait_file(output) out = open(output).read() assert "timers.has_hist.test.histogram.bin_<10.00|10" in out assert "timers.has_hist.test.histogram.bin_10.00|10" in out assert "timers.has_hist.test.histogram.bin_20.00|10" in out assert "timers.has_hist.test.histogram.bin_30.00|10" in out assert "timers.has_hist.test.histogram.bin_40.00|10" in out assert "timers.has_hist.test.histogram.bin_50.00|10" in out assert "timers.has_hist.test.histogram.bin_60.00|10" in out assert "timers.has_hist.test.histogram.bin_70.00|10" in out assert "timers.has_hist.test.histogram.bin_80.00|10" in out assert "timers.has_hist.test.histogram.bin_>90.00|10" in out def test_sets(self, servers): "Tests adding kv pairs" server, output = servers server.write("zip:foo|s\n") server.write("zip:bar|s\n") server.write("zip:baz|s\n") wait_file(output) now = time.time() out = open(output).read() assert out in ("sets.zip|3|%d\n" % now, "sets.zip|3|%d\n" % (now - 1)) if __name__ == "__main__": sys.exit(pytest.main(args="-k TestInteg."))
chylli/phantomjs
refs/heads/master
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/file_lock.py
125
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """This class helps to lock files exclusively across processes.""" import logging import os import sys import time _log = logging.getLogger(__name__) class FileLock(object): def __init__(self, lock_file_path, max_wait_time_sec=20): self._lock_file_path = lock_file_path self._lock_file_descriptor = None self._max_wait_time_sec = max_wait_time_sec def _create_lock(self): if sys.platform == 'win32': import msvcrt msvcrt.locking(self._lock_file_descriptor, msvcrt.LK_NBLCK, 32) else: import fcntl fcntl.flock(self._lock_file_descriptor, fcntl.LOCK_EX | fcntl.LOCK_NB) def _remove_lock(self): if sys.platform == 'win32': import msvcrt msvcrt.locking(self._lock_file_descriptor, msvcrt.LK_UNLCK, 32) else: import fcntl fcntl.flock(self._lock_file_descriptor, fcntl.LOCK_UN) def acquire_lock(self): self._lock_file_descriptor = os.open(self._lock_file_path, os.O_TRUNC | os.O_CREAT) start_time = time.time() while True: try: self._create_lock() return True except IOError: if time.time() - start_time > self._max_wait_time_sec: _log.debug("File locking failed: %s" % str(sys.exc_info())) os.close(self._lock_file_descriptor) self._lock_file_descriptor = None return False # There's no compelling reason to spin hard here, so sleep for a bit. time.sleep(0.01) def release_lock(self): try: if self._lock_file_descriptor: self._remove_lock() os.close(self._lock_file_descriptor) self._lock_file_descriptor = None os.unlink(self._lock_file_path) except (IOError, OSError): _log.debug("Warning in release lock: %s" % str(sys.exc_info()))
Elandril/SickRage
refs/heads/master
lib/sqlalchemy/sql/compiler.py
76
# sql/compiler.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Base SQL and DDL compiler implementations. Classes provided include: :class:`.compiler.SQLCompiler` - renders SQL strings :class:`.compiler.DDLCompiler` - renders DDL (data definition language) strings :class:`.compiler.GenericTypeCompiler` - renders type specification strings. To generate user-defined SQL strings, see :doc:`/ext/compiler`. """ import re from . import schema, sqltypes, operators, functions, \ util as sql_util, visitors, elements, selectable, base from .. import util, exc import decimal import itertools import operator RESERVED_WORDS = set([ 'all', 'analyse', 'analyze', 'and', 'any', 'array', 'as', 'asc', 'asymmetric', 'authorization', 'between', 'binary', 'both', 'case', 'cast', 'check', 'collate', 'column', 'constraint', 'create', 'cross', 'current_date', 'current_role', 'current_time', 'current_timestamp', 'current_user', 'default', 'deferrable', 'desc', 'distinct', 'do', 'else', 'end', 'except', 'false', 'for', 'foreign', 'freeze', 'from', 'full', 'grant', 'group', 'having', 'ilike', 'in', 'initially', 'inner', 'intersect', 'into', 'is', 'isnull', 'join', 'leading', 'left', 'like', 'limit', 'localtime', 'localtimestamp', 'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset', 'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps', 'placing', 'primary', 'references', 'right', 'select', 'session_user', 'set', 'similar', 'some', 'symmetric', 'table', 'then', 'to', 'trailing', 'true', 'union', 'unique', 'user', 'using', 'verbose', 'when', 'where']) LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I) ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in range(0, 10)]).union(['$']) BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE) BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]+)(?![:\w\$])', re.UNICODE) BIND_TEMPLATES = { 'pyformat': "%%(%(name)s)s", 'qmark': "?", 'format': "%%s", 'numeric': ":[_POSITION]", 'named': ":%(name)s" } REQUIRED = util.symbol('REQUIRED', """ Placeholder for the value within a :class:`.BindParameter` which is required to be present when the statement is passed to :meth:`.Connection.execute`. This symbol is typically used when a :func:`.expression.insert` or :func:`.expression.update` statement is compiled without parameter values present. """) OPERATORS = { # binary operators.and_: ' AND ', operators.or_: ' OR ', operators.add: ' + ', operators.mul: ' * ', operators.sub: ' - ', operators.div: ' / ', operators.mod: ' % ', operators.truediv: ' / ', operators.neg: '-', operators.lt: ' < ', operators.le: ' <= ', operators.ne: ' != ', operators.gt: ' > ', operators.ge: ' >= ', operators.eq: ' = ', operators.concat_op: ' || ', operators.between_op: ' BETWEEN ', operators.match_op: ' MATCH ', operators.in_op: ' IN ', operators.notin_op: ' NOT IN ', operators.comma_op: ', ', operators.from_: ' FROM ', operators.as_: ' AS ', operators.is_: ' IS ', operators.isnot: ' IS NOT ', operators.collate: ' COLLATE ', # unary operators.exists: 'EXISTS ', operators.distinct_op: 'DISTINCT ', operators.inv: 'NOT ', # modifiers operators.desc_op: ' DESC', operators.asc_op: ' ASC', operators.nullsfirst_op: ' NULLS FIRST', operators.nullslast_op: ' NULLS LAST', } FUNCTIONS = { functions.coalesce: 'coalesce%(expr)s', functions.current_date: 'CURRENT_DATE', functions.current_time: 'CURRENT_TIME', functions.current_timestamp: 'CURRENT_TIMESTAMP', functions.current_user: 'CURRENT_USER', functions.localtime: 'LOCALTIME', functions.localtimestamp: 'LOCALTIMESTAMP', functions.random: 'random%(expr)s', functions.sysdate: 'sysdate', functions.session_user: 'SESSION_USER', functions.user: 'USER' } EXTRACT_MAP = { 'month': 'month', 'day': 'day', 'year': 'year', 'second': 'second', 'hour': 'hour', 'doy': 'doy', 'minute': 'minute', 'quarter': 'quarter', 'dow': 'dow', 'week': 'week', 'epoch': 'epoch', 'milliseconds': 'milliseconds', 'microseconds': 'microseconds', 'timezone_hour': 'timezone_hour', 'timezone_minute': 'timezone_minute' } COMPOUND_KEYWORDS = { selectable.CompoundSelect.UNION: 'UNION', selectable.CompoundSelect.UNION_ALL: 'UNION ALL', selectable.CompoundSelect.EXCEPT: 'EXCEPT', selectable.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL', selectable.CompoundSelect.INTERSECT: 'INTERSECT', selectable.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL' } class Compiled(object): """Represent a compiled SQL or DDL expression. The ``__str__`` method of the ``Compiled`` object should produce the actual text of the statement. ``Compiled`` objects are specific to their underlying database dialect, and also may or may not be specific to the columns referenced within a particular set of bind parameters. In no case should the ``Compiled`` object be dependent on the actual values of those bind parameters, even though it may reference those values as defaults. """ def __init__(self, dialect, statement, bind=None, compile_kwargs=util.immutabledict()): """Construct a new ``Compiled`` object. :param dialect: ``Dialect`` to compile against. :param statement: ``ClauseElement`` to be compiled. :param bind: Optional Engine or Connection to compile this statement against. :param compile_kwargs: additional kwargs that will be passed to the initial call to :meth:`.Compiled.process`. .. versionadded:: 0.8 """ self.dialect = dialect self.bind = bind if statement is not None: self.statement = statement self.can_execute = statement.supports_execution self.string = self.process(self.statement, **compile_kwargs) @util.deprecated("0.7", ":class:`.Compiled` objects now compile " "within the constructor.") def compile(self): """Produce the internal string representation of this element. """ pass def _execute_on_connection(self, connection, multiparams, params): return connection._execute_compiled(self, multiparams, params) @property def sql_compiler(self): """Return a Compiled that is capable of processing SQL expressions. If this compiler is one, it would likely just return 'self'. """ raise NotImplementedError() def process(self, obj, **kwargs): return obj._compiler_dispatch(self, **kwargs) def __str__(self): """Return the string text of the generated SQL or DDL.""" return self.string or '' def construct_params(self, params=None): """Return the bind params for this compiled object. :param params: a dict of string/object pairs whose values will override bind values compiled in to the statement. """ raise NotImplementedError() @property def params(self): """Return the bind params for this compiled object.""" return self.construct_params() def execute(self, *multiparams, **params): """Execute this compiled object.""" e = self.bind if e is None: raise exc.UnboundExecutionError( "This Compiled object is not bound to any Engine " "or Connection.") return e._execute_compiled(self, multiparams, params) def scalar(self, *multiparams, **params): """Execute this compiled object and return the result's scalar value.""" return self.execute(*multiparams, **params).scalar() class TypeCompiler(object): """Produces DDL specification for TypeEngine objects.""" def __init__(self, dialect): self.dialect = dialect def process(self, type_): return type_._compiler_dispatch(self) class _CompileLabel(visitors.Visitable): """lightweight label object which acts as an expression.Label.""" __visit_name__ = 'label' __slots__ = 'element', 'name' def __init__(self, col, name, alt_names=()): self.element = col self.name = name self._alt_names = (col,) + alt_names @property def proxy_set(self): return self.element.proxy_set @property def type(self): return self.element.type class SQLCompiler(Compiled): """Default implementation of Compiled. Compiles ClauseElements into SQL strings. Uses a similar visit paradigm as visitors.ClauseVisitor but implements its own traversal. """ extract_map = EXTRACT_MAP compound_keywords = COMPOUND_KEYWORDS isdelete = isinsert = isupdate = False """class-level defaults which can be set at the instance level to define if this Compiled instance represents INSERT/UPDATE/DELETE """ returning = None """holds the "returning" collection of columns if the statement is CRUD and defines returning columns either implicitly or explicitly """ returning_precedes_values = False """set to True classwide to generate RETURNING clauses before the VALUES or WHERE clause (i.e. MSSQL) """ render_table_with_column_in_update_from = False """set to True classwide to indicate the SET clause in a multi-table UPDATE statement should qualify columns with the table name (i.e. MySQL only) """ ansi_bind_rules = False """SQL 92 doesn't allow bind parameters to be used in the columns clause of a SELECT, nor does it allow ambiguous expressions like "? = ?". A compiler subclass can set this flag to False if the target driver/DB enforces this """ def __init__(self, dialect, statement, column_keys=None, inline=False, **kwargs): """Construct a new ``DefaultCompiler`` object. dialect Dialect to be used statement ClauseElement to be compiled column_keys a list of column names to be compiled into an INSERT or UPDATE statement. """ self.column_keys = column_keys # compile INSERT/UPDATE defaults/sequences inlined (no pre- # execute) self.inline = inline or getattr(statement, 'inline', False) # a dictionary of bind parameter keys to BindParameter # instances. self.binds = {} # a dictionary of BindParameter instances to "compiled" names # that are actually present in the generated SQL self.bind_names = util.column_dict() # stack which keeps track of nested SELECT statements self.stack = [] # relates label names in the final SQL to a tuple of local # column/label name, ColumnElement object (if any) and # TypeEngine. ResultProxy uses this for type processing and # column targeting self.result_map = {} # true if the paramstyle is positional self.positional = dialect.positional if self.positional: self.positiontup = [] self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle] self.ctes = None # an IdentifierPreparer that formats the quoting of identifiers self.preparer = dialect.identifier_preparer self.label_length = dialect.label_length \ or dialect.max_identifier_length # a map which tracks "anonymous" identifiers that are created on # the fly here self.anon_map = util.PopulateDict(self._process_anon) # a map which tracks "truncated" names based on # dialect.label_length or dialect.max_identifier_length self.truncated_names = {} Compiled.__init__(self, dialect, statement, **kwargs) if self.positional and dialect.paramstyle == 'numeric': self._apply_numbered_params() @util.memoized_instancemethod def _init_cte_state(self): """Initialize collections related to CTEs only if a CTE is located, to save on the overhead of these collections otherwise. """ # collect CTEs to tack on top of a SELECT self.ctes = util.OrderedDict() self.ctes_by_name = {} self.ctes_recursive = False if self.positional: self.cte_positional = [] def _apply_numbered_params(self): poscount = itertools.count(1) self.string = re.sub( r'\[_POSITION\]', lambda m: str(util.next(poscount)), self.string) @util.memoized_property def _bind_processors(self): return dict( (key, value) for key, value in ((self.bind_names[bindparam], bindparam.type._cached_bind_processor(self.dialect)) for bindparam in self.bind_names) if value is not None ) def is_subquery(self): return len(self.stack) > 1 @property def sql_compiler(self): return self def construct_params(self, params=None, _group_number=None, _check=True): """return a dictionary of bind parameter keys and values""" if params: pd = {} for bindparam, name in self.bind_names.items(): if bindparam.key in params: pd[name] = params[bindparam.key] elif name in params: pd[name] = params[name] elif _check and bindparam.required: if _group_number: raise exc.InvalidRequestError( "A value is required for bind parameter %r, " "in parameter group %d" % (bindparam.key, _group_number)) else: raise exc.InvalidRequestError( "A value is required for bind parameter %r" % bindparam.key) else: pd[name] = bindparam.effective_value return pd else: pd = {} for bindparam in self.bind_names: if _check and bindparam.required: if _group_number: raise exc.InvalidRequestError( "A value is required for bind parameter %r, " "in parameter group %d" % (bindparam.key, _group_number)) else: raise exc.InvalidRequestError( "A value is required for bind parameter %r" % bindparam.key) pd[self.bind_names[bindparam]] = bindparam.effective_value return pd @property def params(self): """Return the bind param dictionary embedded into this compiled object, for those values that are present.""" return self.construct_params(_check=False) def default_from(self): """Called when a SELECT statement has no froms, and no FROM clause is to be appended. Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output. """ return "" def visit_grouping(self, grouping, asfrom=False, **kwargs): return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")" def visit_label(self, label, add_to_result_map=None, within_label_clause=False, within_columns_clause=False, render_label_as_label=None, **kw): # only render labels within the columns clause # or ORDER BY clause of a select. dialect-specific compilers # can modify this behavior. render_label_with_as = within_columns_clause and not within_label_clause render_label_only = render_label_as_label is label if render_label_only or render_label_with_as: if isinstance(label.name, elements._truncated_label): labelname = self._truncated_identifier("colident", label.name) else: labelname = label.name if render_label_with_as: if add_to_result_map is not None: add_to_result_map( labelname, label.name, (label, labelname, ) + label._alt_names, label.type ) return label.element._compiler_dispatch(self, within_columns_clause=True, within_label_clause=True, **kw) + \ OPERATORS[operators.as_] + \ self.preparer.format_label(label, labelname) elif render_label_only: return labelname else: return label.element._compiler_dispatch(self, within_columns_clause=False, **kw) def visit_column(self, column, add_to_result_map=None, include_table=True, **kwargs): name = orig_name = column.name if name is None: raise exc.CompileError("Cannot compile Column object until " "its 'name' is assigned.") is_literal = column.is_literal if not is_literal and isinstance(name, elements._truncated_label): name = self._truncated_identifier("colident", name) if add_to_result_map is not None: add_to_result_map( name, orig_name, (column, name, column.key), column.type ) if is_literal: name = self.escape_literal_column(name) else: name = self.preparer.quote(name) table = column.table if table is None or not include_table or not table.named_with_column: return name else: if table.schema: schema_prefix = self.preparer.quote_schema(table.schema) + '.' else: schema_prefix = '' tablename = table.name if isinstance(tablename, elements._truncated_label): tablename = self._truncated_identifier("alias", tablename) return schema_prefix + \ self.preparer.quote(tablename) + \ "." + name def escape_literal_column(self, text): """provide escaping for the literal_column() construct.""" # TODO: some dialects might need different behavior here return text.replace('%', '%%') def visit_fromclause(self, fromclause, **kwargs): return fromclause.name def visit_index(self, index, **kwargs): return index.name def visit_typeclause(self, typeclause, **kwargs): return self.dialect.type_compiler.process(typeclause.type) def post_process_text(self, text): return text def visit_textclause(self, textclause, **kw): def do_bindparam(m): name = m.group(1) if name in textclause._bindparams: return self.process(textclause._bindparams[name], **kw) else: return self.bindparam_string(name, **kw) # un-escape any \:params return BIND_PARAMS_ESC.sub(lambda m: m.group(1), BIND_PARAMS.sub(do_bindparam, self.post_process_text(textclause.text)) ) def visit_text_as_from(self, taf, iswrapper=False, compound_index=0, force_result_map=False, asfrom=False, parens=True, **kw): toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] populate_result_map = force_result_map or ( compound_index == 0 and ( toplevel or \ entry['iswrapper'] ) ) if populate_result_map: for c in taf.column_args: self.process(c, within_columns_clause=True, add_to_result_map=self._add_to_result_map) text = self.process(taf.element, **kw) if asfrom and parens: text = "(%s)" % text return text def visit_null(self, expr, **kw): return 'NULL' def visit_true(self, expr, **kw): if self.dialect.supports_native_boolean: return 'true' else: return "1" def visit_false(self, expr, **kw): if self.dialect.supports_native_boolean: return 'false' else: return "0" def visit_clauselist(self, clauselist, order_by_select=None, **kw): if order_by_select is not None: return self._order_by_clauselist( clauselist, order_by_select, **kw) sep = clauselist.operator if sep is None: sep = " " else: sep = OPERATORS[clauselist.operator] return sep.join( s for s in ( c._compiler_dispatch(self, **kw) for c in clauselist.clauses) if s) def _order_by_clauselist(self, clauselist, order_by_select, **kw): # look through raw columns collection for labels. # note that its OK we aren't expanding tables and other selectables # here; we can only add a label in the ORDER BY for an individual # label expression in the columns clause. raw_col = set(l._order_by_label_element.name for l in order_by_select._raw_columns if l._order_by_label_element is not None) return ", ".join( s for s in ( c._compiler_dispatch(self, render_label_as_label= c._order_by_label_element if c._order_by_label_element is not None and c._order_by_label_element.name in raw_col else None, **kw) for c in clauselist.clauses) if s) def visit_case(self, clause, **kwargs): x = "CASE " if clause.value is not None: x += clause.value._compiler_dispatch(self, **kwargs) + " " for cond, result in clause.whens: x += "WHEN " + cond._compiler_dispatch( self, **kwargs ) + " THEN " + result._compiler_dispatch( self, **kwargs) + " " if clause.else_ is not None: x += "ELSE " + clause.else_._compiler_dispatch( self, **kwargs ) + " " x += "END" return x def visit_cast(self, cast, **kwargs): return "CAST(%s AS %s)" % \ (cast.clause._compiler_dispatch(self, **kwargs), cast.typeclause._compiler_dispatch(self, **kwargs)) def visit_over(self, over, **kwargs): return "%s OVER (%s)" % ( over.func._compiler_dispatch(self, **kwargs), ' '.join( '%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs)) for word, clause in ( ('PARTITION', over.partition_by), ('ORDER', over.order_by) ) if clause is not None and len(clause) ) ) def visit_extract(self, extract, **kwargs): field = self.extract_map.get(extract.field, extract.field) return "EXTRACT(%s FROM %s)" % (field, extract.expr._compiler_dispatch(self, **kwargs)) def visit_function(self, func, add_to_result_map=None, **kwargs): if add_to_result_map is not None: add_to_result_map( func.name, func.name, (), func.type ) disp = getattr(self, "visit_%s_func" % func.name.lower(), None) if disp: return disp(func, **kwargs) else: name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s") return ".".join(list(func.packagenames) + [name]) % \ {'expr': self.function_argspec(func, **kwargs)} def visit_next_value_func(self, next_value, **kw): return self.visit_sequence(next_value.sequence) def visit_sequence(self, sequence): raise NotImplementedError( "Dialect '%s' does not support sequence increments." % self.dialect.name ) def function_argspec(self, func, **kwargs): return func.clause_expr._compiler_dispatch(self, **kwargs) def visit_compound_select(self, cs, asfrom=False, parens=True, compound_index=0, **kwargs): toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] self.stack.append( { 'correlate_froms': entry['correlate_froms'], 'iswrapper': toplevel, 'asfrom_froms': entry['asfrom_froms'] }) keyword = self.compound_keywords.get(cs.keyword) text = (" " + keyword + " ").join( (c._compiler_dispatch(self, asfrom=asfrom, parens=False, compound_index=i, **kwargs) for i, c in enumerate(cs.selects)) ) group_by = cs._group_by_clause._compiler_dispatch( self, asfrom=asfrom, **kwargs) if group_by: text += " GROUP BY " + group_by text += self.order_by_clause(cs, **kwargs) text += (cs._limit is not None or cs._offset is not None) and \ self.limit_clause(cs) or "" if self.ctes and \ compound_index == 0 and toplevel: text = self._render_cte_clause() + text self.stack.pop(-1) if asfrom and parens: return "(" + text + ")" else: return text def visit_unary(self, unary, **kw): if unary.operator: if unary.modifier: raise exc.CompileError( "Unary expression does not support operator " "and modifier simultaneously") disp = getattr(self, "visit_%s_unary_operator" % unary.operator.__name__, None) if disp: return disp(unary, unary.operator, **kw) else: return self._generate_generic_unary_operator(unary, OPERATORS[unary.operator], **kw) elif unary.modifier: disp = getattr(self, "visit_%s_unary_modifier" % unary.modifier.__name__, None) if disp: return disp(unary, unary.modifier, **kw) else: return self._generate_generic_unary_modifier(unary, OPERATORS[unary.modifier], **kw) else: raise exc.CompileError( "Unary expression has no operator or modifier") def visit_istrue_unary_operator(self, element, operator, **kw): if self.dialect.supports_native_boolean: return self.process(element.element, **kw) else: return "%s = 1" % self.process(element.element, **kw) def visit_isfalse_unary_operator(self, element, operator, **kw): if self.dialect.supports_native_boolean: return "NOT %s" % self.process(element.element, **kw) else: return "%s = 0" % self.process(element.element, **kw) def visit_binary(self, binary, **kw): # don't allow "? = ?" to render if self.ansi_bind_rules and \ isinstance(binary.left, elements.BindParameter) and \ isinstance(binary.right, elements.BindParameter): kw['literal_binds'] = True operator = binary.operator disp = getattr(self, "visit_%s_binary" % operator.__name__, None) if disp: return disp(binary, operator, **kw) else: try: opstring = OPERATORS[operator] except KeyError: raise exc.UnsupportedCompilationError(self, operator) else: return self._generate_generic_binary(binary, opstring, **kw) def visit_custom_op_binary(self, element, operator, **kw): return self._generate_generic_binary(element, " " + operator.opstring + " ", **kw) def visit_custom_op_unary_operator(self, element, operator, **kw): return self._generate_generic_unary_operator(element, operator.opstring + " ", **kw) def visit_custom_op_unary_modifier(self, element, operator, **kw): return self._generate_generic_unary_modifier(element, " " + operator.opstring, **kw) def _generate_generic_binary(self, binary, opstring, **kw): return binary.left._compiler_dispatch(self, **kw) + \ opstring + \ binary.right._compiler_dispatch(self, **kw) def _generate_generic_unary_operator(self, unary, opstring, **kw): return opstring + unary.element._compiler_dispatch(self, **kw) def _generate_generic_unary_modifier(self, unary, opstring, **kw): return unary.element._compiler_dispatch(self, **kw) + opstring @util.memoized_property def _like_percent_literal(self): return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE) def visit_contains_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right).__add__(percent) return self.visit_like_op_binary(binary, operator, **kw) def visit_notcontains_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right).__add__(percent) return self.visit_notlike_op_binary(binary, operator, **kw) def visit_startswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__radd__( binary.right ) return self.visit_like_op_binary(binary, operator, **kw) def visit_notstartswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__radd__( binary.right ) return self.visit_notlike_op_binary(binary, operator, **kw) def visit_endswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right) return self.visit_like_op_binary(binary, operator, **kw) def visit_notendswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right) return self.visit_notlike_op_binary(binary, operator, **kw) def visit_like_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) # TODO: use ternary here, not "and"/ "or" return '%s LIKE %s' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else '' ) def visit_notlike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return '%s NOT LIKE %s' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else '' ) def visit_ilike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return 'lower(%s) LIKE lower(%s)' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else '' ) def visit_notilike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return 'lower(%s) NOT LIKE lower(%s)' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else '' ) def visit_bindparam(self, bindparam, within_columns_clause=False, literal_binds=False, skip_bind_expression=False, **kwargs): if not skip_bind_expression and bindparam.type._has_bind_expression: bind_expression = bindparam.type.bind_expression(bindparam) return self.process(bind_expression, skip_bind_expression=True) if literal_binds or \ (within_columns_clause and \ self.ansi_bind_rules): if bindparam.value is None and bindparam.callable is None: raise exc.CompileError("Bind parameter '%s' without a " "renderable value not allowed here." % bindparam.key) return self.render_literal_bindparam(bindparam, within_columns_clause=True, **kwargs) name = self._truncate_bindparam(bindparam) if name in self.binds: existing = self.binds[name] if existing is not bindparam: if (existing.unique or bindparam.unique) and \ not existing.proxy_set.intersection( bindparam.proxy_set): raise exc.CompileError( "Bind parameter '%s' conflicts with " "unique bind parameter of the same name" % bindparam.key ) elif existing._is_crud or bindparam._is_crud: raise exc.CompileError( "bindparam() name '%s' is reserved " "for automatic usage in the VALUES or SET " "clause of this " "insert/update statement. Please use a " "name other than column name when using bindparam() " "with insert() or update() (for example, 'b_%s')." % (bindparam.key, bindparam.key) ) self.binds[bindparam.key] = self.binds[name] = bindparam return self.bindparam_string(name, **kwargs) def render_literal_bindparam(self, bindparam, **kw): value = bindparam.effective_value return self.render_literal_value(value, bindparam.type) def render_literal_value(self, value, type_): """Render the value of a bind parameter as a quoted literal. This is used for statement sections that do not accept bind parameters on the target driver/database. This should be implemented by subclasses using the quoting services of the DBAPI. """ processor = type_._cached_literal_processor(self.dialect) if processor: return processor(value) else: raise NotImplementedError( "Don't know how to literal-quote value %r" % value) def _truncate_bindparam(self, bindparam): if bindparam in self.bind_names: return self.bind_names[bindparam] bind_name = bindparam.key if isinstance(bind_name, elements._truncated_label): bind_name = self._truncated_identifier("bindparam", bind_name) # add to bind_names for translation self.bind_names[bindparam] = bind_name return bind_name def _truncated_identifier(self, ident_class, name): if (ident_class, name) in self.truncated_names: return self.truncated_names[(ident_class, name)] anonname = name.apply_map(self.anon_map) if len(anonname) > self.label_length: counter = self.truncated_names.get(ident_class, 1) truncname = anonname[0:max(self.label_length - 6, 0)] + \ "_" + hex(counter)[2:] self.truncated_names[ident_class] = counter + 1 else: truncname = anonname self.truncated_names[(ident_class, name)] = truncname return truncname def _anonymize(self, name): return name % self.anon_map def _process_anon(self, key): (ident, derived) = key.split(' ', 1) anonymous_counter = self.anon_map.get(derived, 1) self.anon_map[derived] = anonymous_counter + 1 return derived + "_" + str(anonymous_counter) def bindparam_string(self, name, positional_names=None, **kw): if self.positional: if positional_names is not None: positional_names.append(name) else: self.positiontup.append(name) return self.bindtemplate % {'name': name} def visit_cte(self, cte, asfrom=False, ashint=False, fromhints=None, **kwargs): self._init_cte_state() if self.positional: kwargs['positional_names'] = self.cte_positional if isinstance(cte.name, elements._truncated_label): cte_name = self._truncated_identifier("alias", cte.name) else: cte_name = cte.name if cte_name in self.ctes_by_name: existing_cte = self.ctes_by_name[cte_name] # we've generated a same-named CTE that we are enclosed in, # or this is the same CTE. just return the name. if cte in existing_cte._restates or cte is existing_cte: return self.preparer.format_alias(cte, cte_name) elif existing_cte in cte._restates: # we've generated a same-named CTE that is # enclosed in us - we take precedence, so # discard the text for the "inner". del self.ctes[existing_cte] else: raise exc.CompileError( "Multiple, unrelated CTEs found with " "the same name: %r" % cte_name) self.ctes_by_name[cte_name] = cte if cte._cte_alias is not None: orig_cte = cte._cte_alias if orig_cte not in self.ctes: self.visit_cte(orig_cte) cte_alias_name = cte._cte_alias.name if isinstance(cte_alias_name, elements._truncated_label): cte_alias_name = self._truncated_identifier("alias", cte_alias_name) else: orig_cte = cte cte_alias_name = None if not cte_alias_name and cte not in self.ctes: if cte.recursive: self.ctes_recursive = True text = self.preparer.format_alias(cte, cte_name) if cte.recursive: if isinstance(cte.original, selectable.Select): col_source = cte.original elif isinstance(cte.original, selectable.CompoundSelect): col_source = cte.original.selects[0] else: assert False recur_cols = [c for c in util.unique_list(col_source.inner_columns) if c is not None] text += "(%s)" % (", ".join( self.preparer.format_column(ident) for ident in recur_cols)) text += " AS \n" + \ cte.original._compiler_dispatch( self, asfrom=True, **kwargs ) self.ctes[cte] = text if asfrom: if cte_alias_name: text = self.preparer.format_alias(cte, cte_alias_name) text += " AS " + cte_name else: return self.preparer.format_alias(cte, cte_name) return text def visit_alias(self, alias, asfrom=False, ashint=False, iscrud=False, fromhints=None, **kwargs): if asfrom or ashint: if isinstance(alias.name, elements._truncated_label): alias_name = self._truncated_identifier("alias", alias.name) else: alias_name = alias.name if ashint: return self.preparer.format_alias(alias, alias_name) elif asfrom: ret = alias.original._compiler_dispatch(self, asfrom=True, **kwargs) + \ " AS " + \ self.preparer.format_alias(alias, alias_name) if fromhints and alias in fromhints: ret = self.format_from_hint_text(ret, alias, fromhints[alias], iscrud) return ret else: return alias.original._compiler_dispatch(self, **kwargs) def _add_to_result_map(self, keyname, name, objects, type_): if not self.dialect.case_sensitive: keyname = keyname.lower() if keyname in self.result_map: # conflicting keyname, just double up the list # of objects. this will cause an "ambiguous name" # error if an attempt is made by the result set to # access. e_name, e_obj, e_type = self.result_map[keyname] self.result_map[keyname] = e_name, e_obj + objects, e_type else: self.result_map[keyname] = name, objects, type_ def _label_select_column(self, select, column, populate_result_map, asfrom, column_clause_args, name=None, within_columns_clause=True): """produce labeled columns present in a select().""" if column.type._has_column_expression and \ populate_result_map: col_expr = column.type.column_expression(column) add_to_result_map = lambda keyname, name, objects, type_: \ self._add_to_result_map( keyname, name, objects + (column,), type_) else: col_expr = column if populate_result_map: add_to_result_map = self._add_to_result_map else: add_to_result_map = None if not within_columns_clause: result_expr = col_expr elif isinstance(column, elements.Label): if col_expr is not column: result_expr = _CompileLabel( col_expr, column.name, alt_names=(column.element,) ) else: result_expr = col_expr elif select is not None and name: result_expr = _CompileLabel( col_expr, name, alt_names=(column._key_label,) ) elif \ asfrom and \ isinstance(column, elements.ColumnClause) and \ not column.is_literal and \ column.table is not None and \ not isinstance(column.table, selectable.Select): result_expr = _CompileLabel(col_expr, elements._as_truncated(column.name), alt_names=(column.key,)) elif not isinstance(column, (elements.UnaryExpression, elements.TextClause)) \ and (not hasattr(column, 'name') or \ isinstance(column, functions.Function)): result_expr = _CompileLabel(col_expr, column.anon_label) elif col_expr is not column: # TODO: are we sure "column" has a .name and .key here ? # assert isinstance(column, elements.ColumnClause) result_expr = _CompileLabel(col_expr, elements._as_truncated(column.name), alt_names=(column.key,)) else: result_expr = col_expr column_clause_args.update( within_columns_clause=within_columns_clause, add_to_result_map=add_to_result_map ) return result_expr._compiler_dispatch( self, **column_clause_args ) def format_from_hint_text(self, sqltext, table, hint, iscrud): hinttext = self.get_from_hint_text(table, hint) if hinttext: sqltext += " " + hinttext return sqltext def get_select_hint_text(self, byfroms): return None def get_from_hint_text(self, table, text): return None def get_crud_hint_text(self, table, text): return None def _transform_select_for_nested_joins(self, select): """Rewrite any "a JOIN (b JOIN c)" expression as "a JOIN (select * from b JOIN c) AS anon", to support databases that can't parse a parenthesized join correctly (i.e. sqlite the main one). """ cloned = {} column_translate = [{}] def visit(element, **kw): if element in column_translate[-1]: return column_translate[-1][element] elif element in cloned: return cloned[element] newelem = cloned[element] = element._clone() if newelem.is_selectable and newelem._is_join and \ isinstance(newelem.right, selectable.FromGrouping): newelem._reset_exported() newelem.left = visit(newelem.left, **kw) right = visit(newelem.right, **kw) selectable_ = selectable.Select( [right.element], use_labels=True).alias() for c in selectable_.c: c._key_label = c.key c._label = c.name translate_dict = dict( zip(newelem.right.element.c, selectable_.c) ) # translating from both the old and the new # because different select() structures will lead us # to traverse differently translate_dict[right.element.left] = selectable_ translate_dict[right.element.right] = selectable_ translate_dict[newelem.right.element.left] = selectable_ translate_dict[newelem.right.element.right] = selectable_ # propagate translations that we've gained # from nested visit(newelem.right) outwards # to the enclosing select here. this happens # only when we have more than one level of right # join nesting, i.e. "a JOIN (b JOIN (c JOIN d))" for k, v in list(column_translate[-1].items()): if v in translate_dict: # remarkably, no current ORM tests (May 2013) # hit this condition, only test_join_rewriting # does. column_translate[-1][k] = translate_dict[v] column_translate[-1].update(translate_dict) newelem.right = selectable_ newelem.onclause = visit(newelem.onclause, **kw) elif newelem.is_selectable and newelem._is_from_container: # if we hit an Alias or CompoundSelect, put a marker in the # stack. kw['transform_clue'] = 'select_container' newelem._copy_internals(clone=visit, **kw) elif newelem.is_selectable and newelem._is_select: barrier_select = kw.get('transform_clue', None) == 'select_container' # if we're still descended from an Alias/CompoundSelect, we're # in a FROM clause, so start with a new translate collection if barrier_select: column_translate.append({}) kw['transform_clue'] = 'inside_select' newelem._copy_internals(clone=visit, **kw) if barrier_select: del column_translate[-1] else: newelem._copy_internals(clone=visit, **kw) return newelem return visit(select) def _transform_result_map_for_nested_joins(self, select, transformed_select): inner_col = dict((c._key_label, c) for c in transformed_select.inner_columns) d = dict( (inner_col[c._key_label], c) for c in select.inner_columns ) for key, (name, objs, typ) in list(self.result_map.items()): objs = tuple([d.get(col, col) for col in objs]) self.result_map[key] = (name, objs, typ) _default_stack_entry = util.immutabledict([ ('iswrapper', False), ('correlate_froms', frozenset()), ('asfrom_froms', frozenset()) ]) def _display_froms_for_select(self, select, asfrom): # utility method to help external dialects # get the correct from list for a select. # specifically the oracle dialect needs this feature # right now. toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] correlate_froms = entry['correlate_froms'] asfrom_froms = entry['asfrom_froms'] if asfrom: froms = select._get_display_froms( explicit_correlate_froms=\ correlate_froms.difference(asfrom_froms), implicit_correlate_froms=()) else: froms = select._get_display_froms( explicit_correlate_froms=correlate_froms, implicit_correlate_froms=asfrom_froms) return froms def visit_select(self, select, asfrom=False, parens=True, iswrapper=False, fromhints=None, compound_index=0, force_result_map=False, positional_names=None, nested_join_translation=False, **kwargs): needs_nested_translation = \ select.use_labels and \ not nested_join_translation and \ not self.stack and \ not self.dialect.supports_right_nested_joins if needs_nested_translation: transformed_select = self._transform_select_for_nested_joins(select) text = self.visit_select( transformed_select, asfrom=asfrom, parens=parens, iswrapper=iswrapper, fromhints=fromhints, compound_index=compound_index, force_result_map=force_result_map, positional_names=positional_names, nested_join_translation=True, **kwargs ) toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] populate_result_map = force_result_map or ( compound_index == 0 and ( toplevel or \ entry['iswrapper'] ) ) if needs_nested_translation: if populate_result_map: self._transform_result_map_for_nested_joins( select, transformed_select) return text correlate_froms = entry['correlate_froms'] asfrom_froms = entry['asfrom_froms'] if asfrom: froms = select._get_display_froms( explicit_correlate_froms= correlate_froms.difference(asfrom_froms), implicit_correlate_froms=()) else: froms = select._get_display_froms( explicit_correlate_froms=correlate_froms, implicit_correlate_froms=asfrom_froms) new_correlate_froms = set(selectable._from_objects(*froms)) all_correlate_froms = new_correlate_froms.union(correlate_froms) new_entry = { 'asfrom_froms': new_correlate_froms, 'iswrapper': iswrapper, 'correlate_froms': all_correlate_froms } self.stack.append(new_entry) column_clause_args = kwargs.copy() column_clause_args.update({ 'positional_names': positional_names, 'within_label_clause': False, 'within_columns_clause': False }) # the actual list of columns to print in the SELECT column list. inner_columns = [ c for c in [ self._label_select_column(select, column, populate_result_map, asfrom, column_clause_args, name=name) for name, column in select._columns_plus_names ] if c is not None ] text = "SELECT " # we're off to a good start ! if select._hints: byfrom = dict([ (from_, hinttext % { 'name':from_._compiler_dispatch( self, ashint=True) }) for (from_, dialect), hinttext in select._hints.items() if dialect in ('*', self.dialect.name) ]) hint_text = self.get_select_hint_text(byfrom) if hint_text: text += hint_text + " " if select._prefixes: text += self._generate_prefixes(select, select._prefixes, **kwargs) text += self.get_select_precolumns(select) text += ', '.join(inner_columns) if froms: text += " \nFROM " if select._hints: text += ', '.join([f._compiler_dispatch(self, asfrom=True, fromhints=byfrom, **kwargs) for f in froms]) else: text += ', '.join([f._compiler_dispatch(self, asfrom=True, **kwargs) for f in froms]) else: text += self.default_from() if select._whereclause is not None: t = select._whereclause._compiler_dispatch(self, **kwargs) if t: text += " \nWHERE " + t if select._group_by_clause.clauses: group_by = select._group_by_clause._compiler_dispatch( self, **kwargs) if group_by: text += " GROUP BY " + group_by if select._having is not None: t = select._having._compiler_dispatch(self, **kwargs) if t: text += " \nHAVING " + t if select._order_by_clause.clauses: if self.dialect.supports_simple_order_by_label: order_by_select = select else: order_by_select = None text += self.order_by_clause(select, order_by_select=order_by_select, **kwargs) if select._limit is not None or select._offset is not None: text += self.limit_clause(select) if select._for_update_arg is not None: text += self.for_update_clause(select) if self.ctes and \ compound_index == 0 and toplevel: text = self._render_cte_clause() + text self.stack.pop(-1) if asfrom and parens: return "(" + text + ")" else: return text def _generate_prefixes(self, stmt, prefixes, **kw): clause = " ".join( prefix._compiler_dispatch(self, **kw) for prefix, dialect_name in prefixes if dialect_name is None or dialect_name == self.dialect.name ) if clause: clause += " " return clause def _render_cte_clause(self): if self.positional: self.positiontup = self.cte_positional + self.positiontup cte_text = self.get_cte_preamble(self.ctes_recursive) + " " cte_text += ", \n".join( [txt for txt in self.ctes.values()] ) cte_text += "\n " return cte_text def get_cte_preamble(self, recursive): if recursive: return "WITH RECURSIVE" else: return "WITH" def get_select_precolumns(self, select): """Called when building a ``SELECT`` statement, position is just before column list. """ return select._distinct and "DISTINCT " or "" def order_by_clause(self, select, **kw): order_by = select._order_by_clause._compiler_dispatch(self, **kw) if order_by: return " ORDER BY " + order_by else: return "" def for_update_clause(self, select): return " FOR UPDATE" def returning_clause(self, stmt, returning_cols): raise exc.CompileError( "RETURNING is not supported by this " "dialect's statement compiler.") def limit_clause(self, select): text = "" if select._limit is not None: text += "\n LIMIT " + self.process(elements.literal(select._limit)) if select._offset is not None: if select._limit is None: text += "\n LIMIT -1" text += " OFFSET " + self.process(elements.literal(select._offset)) return text def visit_table(self, table, asfrom=False, iscrud=False, ashint=False, fromhints=None, **kwargs): if asfrom or ashint: if getattr(table, "schema", None): ret = self.preparer.quote_schema(table.schema) + \ "." + self.preparer.quote(table.name) else: ret = self.preparer.quote(table.name) if fromhints and table in fromhints: ret = self.format_from_hint_text(ret, table, fromhints[table], iscrud) return ret else: return "" def visit_join(self, join, asfrom=False, **kwargs): return ( join.left._compiler_dispatch(self, asfrom=True, **kwargs) + (join.isouter and " LEFT OUTER JOIN " or " JOIN ") + join.right._compiler_dispatch(self, asfrom=True, **kwargs) + " ON " + join.onclause._compiler_dispatch(self, **kwargs) ) def visit_insert(self, insert_stmt, **kw): self.isinsert = True colparams = self._get_colparams(insert_stmt, **kw) if not colparams and \ not self.dialect.supports_default_values and \ not self.dialect.supports_empty_insert: raise exc.CompileError("The '%s' dialect with current database " "version settings does not support empty " "inserts." % self.dialect.name) if insert_stmt._has_multi_parameters: if not self.dialect.supports_multivalues_insert: raise exc.CompileError("The '%s' dialect with current database " "version settings does not support " "in-place multirow inserts." % self.dialect.name) colparams_single = colparams[0] else: colparams_single = colparams preparer = self.preparer supports_default_values = self.dialect.supports_default_values text = "INSERT " if insert_stmt._prefixes: text += self._generate_prefixes(insert_stmt, insert_stmt._prefixes, **kw) text += "INTO " table_text = preparer.format_table(insert_stmt.table) if insert_stmt._hints: dialect_hints = dict([ (table, hint_text) for (table, dialect), hint_text in insert_stmt._hints.items() if dialect in ('*', self.dialect.name) ]) if insert_stmt.table in dialect_hints: table_text = self.format_from_hint_text( table_text, insert_stmt.table, dialect_hints[insert_stmt.table], True ) text += table_text if colparams_single or not supports_default_values: text += " (%s)" % ', '.join([preparer.format_column(c[0]) for c in colparams_single]) if self.returning or insert_stmt._returning: self.returning = self.returning or insert_stmt._returning returning_clause = self.returning_clause( insert_stmt, self.returning) if self.returning_precedes_values: text += " " + returning_clause if insert_stmt.select is not None: text += " %s" % self.process(insert_stmt.select, **kw) elif not colparams and supports_default_values: text += " DEFAULT VALUES" elif insert_stmt._has_multi_parameters: text += " VALUES %s" % ( ", ".join( "(%s)" % ( ', '.join(c[1] for c in colparam_set) ) for colparam_set in colparams ) ) else: text += " VALUES (%s)" % \ ', '.join([c[1] for c in colparams]) if self.returning and not self.returning_precedes_values: text += " " + returning_clause return text def update_limit_clause(self, update_stmt): """Provide a hook for MySQL to add LIMIT to the UPDATE""" return None def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw): """Provide a hook to override the initial table clause in an UPDATE statement. MySQL overrides this. """ return from_table._compiler_dispatch(self, asfrom=True, iscrud=True, **kw) def update_from_clause(self, update_stmt, from_table, extra_froms, from_hints, **kw): """Provide a hook to override the generation of an UPDATE..FROM clause. MySQL and MSSQL override this. """ return "FROM " + ', '.join( t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw) for t in extra_froms) def visit_update(self, update_stmt, **kw): self.stack.append( {'correlate_froms': set([update_stmt.table]), "iswrapper": False, "asfrom_froms": set([update_stmt.table])}) self.isupdate = True extra_froms = update_stmt._extra_froms text = "UPDATE " if update_stmt._prefixes: text += self._generate_prefixes(update_stmt, update_stmt._prefixes, **kw) table_text = self.update_tables_clause(update_stmt, update_stmt.table, extra_froms, **kw) colparams = self._get_colparams(update_stmt, **kw) if update_stmt._hints: dialect_hints = dict([ (table, hint_text) for (table, dialect), hint_text in update_stmt._hints.items() if dialect in ('*', self.dialect.name) ]) if update_stmt.table in dialect_hints: table_text = self.format_from_hint_text( table_text, update_stmt.table, dialect_hints[update_stmt.table], True ) else: dialect_hints = None text += table_text text += ' SET ' include_table = extra_froms and \ self.render_table_with_column_in_update_from text += ', '.join( c[0]._compiler_dispatch(self, include_table=include_table) + '=' + c[1] for c in colparams ) if self.returning or update_stmt._returning: if not self.returning: self.returning = update_stmt._returning if self.returning_precedes_values: text += " " + self.returning_clause( update_stmt, self.returning) if extra_froms: extra_from_text = self.update_from_clause( update_stmt, update_stmt.table, extra_froms, dialect_hints, **kw) if extra_from_text: text += " " + extra_from_text if update_stmt._whereclause is not None: text += " WHERE " + self.process(update_stmt._whereclause) limit_clause = self.update_limit_clause(update_stmt) if limit_clause: text += " " + limit_clause if self.returning and not self.returning_precedes_values: text += " " + self.returning_clause( update_stmt, self.returning) self.stack.pop(-1) return text def _create_crud_bind_param(self, col, value, required=False, name=None): if name is None: name = col.key bindparam = elements.BindParameter(name, value, type_=col.type, required=required) bindparam._is_crud = True return bindparam._compiler_dispatch(self) @util.memoized_property def _key_getters_for_crud_column(self): if self.isupdate and self.statement._extra_froms: # when extra tables are present, refer to the columns # in those extra tables as table-qualified, including in # dictionaries and when rendering bind param names. # the "main" table of the statement remains unqualified, # allowing the most compatibility with a non-multi-table # statement. _et = set(self.statement._extra_froms) def _column_as_key(key): str_key = elements._column_as_key(key) if hasattr(key, 'table') and key.table in _et: return (key.table.name, str_key) else: return str_key def _getattr_col_key(col): if col.table in _et: return (col.table.name, col.key) else: return col.key def _col_bind_name(col): if col.table in _et: return "%s_%s" % (col.table.name, col.key) else: return col.key else: _column_as_key = elements._column_as_key _getattr_col_key = _col_bind_name = operator.attrgetter("key") return _column_as_key, _getattr_col_key, _col_bind_name def _get_colparams(self, stmt, **kw): """create a set of tuples representing column/string pairs for use in an INSERT or UPDATE statement. Also generates the Compiled object's postfetch, prefetch, and returning column collections, used for default handling and ultimately populating the ResultProxy's prefetch_cols() and postfetch_cols() collections. """ self.postfetch = [] self.prefetch = [] self.returning = [] # no parameters in the statement, no parameters in the # compiled params - return binds for all columns if self.column_keys is None and stmt.parameters is None: return [ (c, self._create_crud_bind_param(c, None, required=True)) for c in stmt.table.columns ] if stmt._has_multi_parameters: stmt_parameters = stmt.parameters[0] else: stmt_parameters = stmt.parameters # getters - these are normally just column.key, # but in the case of mysql multi-table update, the rules for # .key must conditionally take tablename into account _column_as_key, _getattr_col_key, _col_bind_name = \ self._key_getters_for_crud_column # if we have statement parameters - set defaults in the # compiled params if self.column_keys is None: parameters = {} else: parameters = dict((_column_as_key(key), REQUIRED) for key in self.column_keys if not stmt_parameters or key not in stmt_parameters) # create a list of column assignment clauses as tuples values = [] if stmt_parameters is not None: for k, v in stmt_parameters.items(): colkey = _column_as_key(k) if colkey is not None: parameters.setdefault(colkey, v) else: # a non-Column expression on the left side; # add it to values() in an "as-is" state, # coercing right side to bound param if elements._is_literal(v): v = self.process( elements.BindParameter(None, v, type_=k.type), **kw) else: v = self.process(v.self_group(), **kw) values.append((k, v)) need_pks = self.isinsert and \ not self.inline and \ not stmt._returning implicit_returning = need_pks and \ self.dialect.implicit_returning and \ stmt.table.implicit_returning if self.isinsert: implicit_return_defaults = implicit_returning and stmt._return_defaults elif self.isupdate: implicit_return_defaults = self.dialect.implicit_returning and \ stmt.table.implicit_returning and \ stmt._return_defaults if implicit_return_defaults: if stmt._return_defaults is True: implicit_return_defaults = set(stmt.table.c) else: implicit_return_defaults = set(stmt._return_defaults) postfetch_lastrowid = need_pks and self.dialect.postfetch_lastrowid check_columns = {} # special logic that only occurs for multi-table UPDATE # statements if self.isupdate and stmt._extra_froms and stmt_parameters: normalized_params = dict( (elements._clause_element_as_expr(c), param) for c, param in stmt_parameters.items() ) affected_tables = set() for t in stmt._extra_froms: for c in t.c: if c in normalized_params: affected_tables.add(t) check_columns[_getattr_col_key(c)] = c value = normalized_params[c] if elements._is_literal(value): value = self._create_crud_bind_param( c, value, required=value is REQUIRED, name=_col_bind_name(c)) else: self.postfetch.append(c) value = self.process(value.self_group(), **kw) values.append((c, value)) # determine tables which are actually # to be updated - process onupdate and # server_onupdate for these for t in affected_tables: for c in t.c: if c in normalized_params: continue elif c.onupdate is not None and not c.onupdate.is_sequence: if c.onupdate.is_clause_element: values.append( (c, self.process( c.onupdate.arg.self_group(), **kw) ) ) self.postfetch.append(c) else: values.append( (c, self._create_crud_bind_param( c, None, name=_col_bind_name(c) ) ) ) self.prefetch.append(c) elif c.server_onupdate is not None: self.postfetch.append(c) if self.isinsert and stmt.select_names: # for an insert from select, we can only use names that # are given, so only select for those names. cols = (stmt.table.c[_column_as_key(name)] for name in stmt.select_names) else: # iterate through all table columns to maintain # ordering, even for those cols that aren't included cols = stmt.table.columns for c in cols: col_key = _getattr_col_key(c) if col_key in parameters and col_key not in check_columns: value = parameters.pop(col_key) if elements._is_literal(value): value = self._create_crud_bind_param( c, value, required=value is REQUIRED, name=_col_bind_name(c) if not stmt._has_multi_parameters else "%s_0" % _col_bind_name(c) ) else: if isinstance(value, elements.BindParameter) and \ value.type._isnull: value = value._clone() value.type = c.type if c.primary_key and implicit_returning: self.returning.append(c) value = self.process(value.self_group(), **kw) elif implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) value = self.process(value.self_group(), **kw) else: self.postfetch.append(c) value = self.process(value.self_group(), **kw) values.append((c, value)) elif self.isinsert: if c.primary_key and \ need_pks and \ ( implicit_returning or not postfetch_lastrowid or c is not stmt.table._autoincrement_column ): if implicit_returning: if c.default is not None: if c.default.is_sequence: if self.dialect.supports_sequences and \ (not c.default.optional or \ not self.dialect.sequences_optional): proc = self.process(c.default, **kw) values.append((c, proc)) self.returning.append(c) elif c.default.is_clause_element: values.append( (c, self.process(c.default.arg.self_group(), **kw)) ) self.returning.append(c) else: values.append( (c, self._create_crud_bind_param(c, None)) ) self.prefetch.append(c) else: self.returning.append(c) else: if ( c.default is not None and ( not c.default.is_sequence or self.dialect.supports_sequences ) ) or \ c is stmt.table._autoincrement_column and ( self.dialect.supports_sequences or self.dialect.preexecute_autoincrement_sequences ): values.append( (c, self._create_crud_bind_param(c, None)) ) self.prefetch.append(c) elif c.default is not None: if c.default.is_sequence: if self.dialect.supports_sequences and \ (not c.default.optional or \ not self.dialect.sequences_optional): proc = self.process(c.default, **kw) values.append((c, proc)) if implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) elif not c.primary_key: self.postfetch.append(c) elif c.default.is_clause_element: values.append( (c, self.process(c.default.arg.self_group(), **kw)) ) if implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) elif not c.primary_key: # dont add primary key column to postfetch self.postfetch.append(c) else: values.append( (c, self._create_crud_bind_param(c, None)) ) self.prefetch.append(c) elif c.server_default is not None: if implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) elif not c.primary_key: self.postfetch.append(c) elif implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) elif self.isupdate: if c.onupdate is not None and not c.onupdate.is_sequence: if c.onupdate.is_clause_element: values.append( (c, self.process(c.onupdate.arg.self_group(), **kw)) ) if implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) else: self.postfetch.append(c) else: values.append( (c, self._create_crud_bind_param(c, None)) ) self.prefetch.append(c) elif c.server_onupdate is not None: if implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) else: self.postfetch.append(c) elif implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) if parameters and stmt_parameters: check = set(parameters).intersection( _column_as_key(k) for k in stmt.parameters ).difference(check_columns) if check: raise exc.CompileError( "Unconsumed column names: %s" % (", ".join("%s" % c for c in check)) ) if stmt._has_multi_parameters: values_0 = values values = [values] values.extend( [ ( c, self._create_crud_bind_param( c, row[c.key], name="%s_%d" % (c.key, i + 1) ) if c.key in row else param ) for (c, param) in values_0 ] for i, row in enumerate(stmt.parameters[1:]) ) return values def visit_delete(self, delete_stmt, **kw): self.stack.append({'correlate_froms': set([delete_stmt.table]), "iswrapper": False, "asfrom_froms": set([delete_stmt.table])}) self.isdelete = True text = "DELETE " if delete_stmt._prefixes: text += self._generate_prefixes(delete_stmt, delete_stmt._prefixes, **kw) text += "FROM " table_text = delete_stmt.table._compiler_dispatch(self, asfrom=True, iscrud=True) if delete_stmt._hints: dialect_hints = dict([ (table, hint_text) for (table, dialect), hint_text in delete_stmt._hints.items() if dialect in ('*', self.dialect.name) ]) if delete_stmt.table in dialect_hints: table_text = self.format_from_hint_text( table_text, delete_stmt.table, dialect_hints[delete_stmt.table], True ) else: dialect_hints = None text += table_text if delete_stmt._returning: self.returning = delete_stmt._returning if self.returning_precedes_values: text += " " + self.returning_clause( delete_stmt, delete_stmt._returning) if delete_stmt._whereclause is not None: text += " WHERE " text += delete_stmt._whereclause._compiler_dispatch(self) if self.returning and not self.returning_precedes_values: text += " " + self.returning_clause( delete_stmt, delete_stmt._returning) self.stack.pop(-1) return text def visit_savepoint(self, savepoint_stmt): return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt) def visit_rollback_to_savepoint(self, savepoint_stmt): return "ROLLBACK TO SAVEPOINT %s" % \ self.preparer.format_savepoint(savepoint_stmt) def visit_release_savepoint(self, savepoint_stmt): return "RELEASE SAVEPOINT %s" % \ self.preparer.format_savepoint(savepoint_stmt) class DDLCompiler(Compiled): @util.memoized_property def sql_compiler(self): return self.dialect.statement_compiler(self.dialect, None) @util.memoized_property def type_compiler(self): return self.dialect.type_compiler @property def preparer(self): return self.dialect.identifier_preparer def construct_params(self, params=None): return None def visit_ddl(self, ddl, **kwargs): # table events can substitute table and schema name context = ddl.context if isinstance(ddl.target, schema.Table): context = context.copy() preparer = self.dialect.identifier_preparer path = preparer.format_table_seq(ddl.target) if len(path) == 1: table, sch = path[0], '' else: table, sch = path[-1], path[0] context.setdefault('table', table) context.setdefault('schema', sch) context.setdefault('fullname', preparer.format_table(ddl.target)) return self.sql_compiler.post_process_text(ddl.statement % context) def visit_create_schema(self, create): schema = self.preparer.format_schema(create.element) return "CREATE SCHEMA " + schema def visit_drop_schema(self, drop): schema = self.preparer.format_schema(drop.element) text = "DROP SCHEMA " + schema if drop.cascade: text += " CASCADE" return text def visit_create_table(self, create): table = create.element preparer = self.dialect.identifier_preparer text = "\n" + " ".join(['CREATE'] + \ table._prefixes + \ ['TABLE', preparer.format_table(table), "("]) separator = "\n" # if only one primary key, specify it along with the column first_pk = False for create_column in create.columns: column = create_column.element try: processed = self.process(create_column, first_pk=column.primary_key and not first_pk) if processed is not None: text += separator separator = ", \n" text += "\t" + processed if column.primary_key: first_pk = True except exc.CompileError as ce: util.raise_from_cause( exc.CompileError(util.u("(in table '%s', column '%s'): %s") % ( table.description, column.name, ce.args[0] ))) const = self.create_table_constraints(table) if const: text += ", \n\t" + const text += "\n)%s\n\n" % self.post_create_table(table) return text def visit_create_column(self, create, first_pk=False): column = create.element if column.system: return None text = self.get_column_specification( column, first_pk=first_pk ) const = " ".join(self.process(constraint) \ for constraint in column.constraints) if const: text += " " + const return text def create_table_constraints(self, table): # On some DB order is significant: visit PK first, then the # other constraints (engine.ReflectionTest.testbasic failed on FB2) constraints = [] if table.primary_key: constraints.append(table.primary_key) constraints.extend([c for c in table._sorted_constraints if c is not table.primary_key]) return ", \n\t".join(p for p in (self.process(constraint) for constraint in constraints if ( constraint._create_rule is None or constraint._create_rule(self)) and ( not self.dialect.supports_alter or not getattr(constraint, 'use_alter', False) )) if p is not None ) def visit_drop_table(self, drop): return "\nDROP TABLE " + self.preparer.format_table(drop.element) def visit_drop_view(self, drop): return "\nDROP VIEW " + self.preparer.format_table(drop.element) def _verify_index_table(self, index): if index.table is None: raise exc.CompileError("Index '%s' is not associated " "with any table." % index.name) def visit_create_index(self, create, include_schema=False, include_table_schema=True): index = create.element self._verify_index_table(index) preparer = self.preparer text = "CREATE " if index.unique: text += "UNIQUE " text += "INDEX %s ON %s (%s)" \ % ( self._prepared_index_name(index, include_schema=include_schema), preparer.format_table(index.table, use_schema=include_table_schema), ', '.join( self.sql_compiler.process(expr, include_table=False, literal_binds=True) for expr in index.expressions) ) return text def visit_drop_index(self, drop): index = drop.element return "\nDROP INDEX " + self._prepared_index_name(index, include_schema=True) def _prepared_index_name(self, index, include_schema=False): if include_schema and index.table is not None and index.table.schema: schema = index.table.schema schema_name = self.preparer.quote_schema(schema) else: schema_name = None ident = index.name if isinstance(ident, elements._truncated_label): max_ = self.dialect.max_index_name_length or \ self.dialect.max_identifier_length if len(ident) > max_: ident = ident[0:max_ - 8] + \ "_" + util.md5_hex(ident)[-4:] else: self.dialect.validate_identifier(ident) index_name = self.preparer.quote(ident) if schema_name: index_name = schema_name + "." + index_name return index_name def visit_add_constraint(self, create): return "ALTER TABLE %s ADD %s" % ( self.preparer.format_table(create.element.table), self.process(create.element) ) def visit_create_sequence(self, create): text = "CREATE SEQUENCE %s" % \ self.preparer.format_sequence(create.element) if create.element.increment is not None: text += " INCREMENT BY %d" % create.element.increment if create.element.start is not None: text += " START WITH %d" % create.element.start return text def visit_drop_sequence(self, drop): return "DROP SEQUENCE %s" % \ self.preparer.format_sequence(drop.element) def visit_drop_constraint(self, drop): return "ALTER TABLE %s DROP CONSTRAINT %s%s" % ( self.preparer.format_table(drop.element.table), self.preparer.format_constraint(drop.element), drop.cascade and " CASCADE" or "" ) def get_column_specification(self, column, **kwargs): colspec = self.preparer.format_column(column) + " " + \ self.dialect.type_compiler.process(column.type) default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if not column.nullable: colspec += " NOT NULL" return colspec def post_create_table(self, table): return '' def get_column_default_string(self, column): if isinstance(column.server_default, schema.DefaultClause): if isinstance(column.server_default.arg, util.string_types): return "'%s'" % column.server_default.arg else: return self.sql_compiler.process(column.server_default.arg) else: return None def visit_check_constraint(self, constraint): text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext, include_table=False, literal_binds=True) text += self.define_constraint_deferrability(constraint) return text def visit_column_check_constraint(self, constraint): text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) text += "CHECK (%s)" % constraint.sqltext text += self.define_constraint_deferrability(constraint) return text def visit_primary_key_constraint(self, constraint): if len(constraint) == 0: return '' text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) text += "PRIMARY KEY " text += "(%s)" % ', '.join(self.preparer.quote(c.name) for c in constraint) text += self.define_constraint_deferrability(constraint) return text def visit_foreign_key_constraint(self, constraint): preparer = self.dialect.identifier_preparer text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ preparer.format_constraint(constraint) remote_table = list(constraint._elements.values())[0].column.table text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % ( ', '.join(preparer.quote(f.parent.name) for f in constraint._elements.values()), self.define_constraint_remote_table( constraint, remote_table, preparer), ', '.join(preparer.quote(f.column.name) for f in constraint._elements.values()) ) text += self.define_constraint_match(constraint) text += self.define_constraint_cascades(constraint) text += self.define_constraint_deferrability(constraint) return text def define_constraint_remote_table(self, constraint, table, preparer): """Format the remote table clause of a CREATE CONSTRAINT clause.""" return preparer.format_table(table) def visit_unique_constraint(self, constraint): if len(constraint) == 0: return '' text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) text += "UNIQUE (%s)" % ( ', '.join(self.preparer.quote(c.name) for c in constraint)) text += self.define_constraint_deferrability(constraint) return text def define_constraint_cascades(self, constraint): text = "" if constraint.ondelete is not None: text += " ON DELETE %s" % constraint.ondelete if constraint.onupdate is not None: text += " ON UPDATE %s" % constraint.onupdate return text def define_constraint_deferrability(self, constraint): text = "" if constraint.deferrable is not None: if constraint.deferrable: text += " DEFERRABLE" else: text += " NOT DEFERRABLE" if constraint.initially is not None: text += " INITIALLY %s" % constraint.initially return text def define_constraint_match(self, constraint): text = "" if constraint.match is not None: text += " MATCH %s" % constraint.match return text class GenericTypeCompiler(TypeCompiler): def visit_FLOAT(self, type_): return "FLOAT" def visit_REAL(self, type_): return "REAL" def visit_NUMERIC(self, type_): if type_.precision is None: return "NUMERIC" elif type_.scale is None: return "NUMERIC(%(precision)s)" % \ {'precision': type_.precision} else: return "NUMERIC(%(precision)s, %(scale)s)" % \ {'precision': type_.precision, 'scale': type_.scale} def visit_DECIMAL(self, type_): if type_.precision is None: return "DECIMAL" elif type_.scale is None: return "DECIMAL(%(precision)s)" % \ {'precision': type_.precision} else: return "DECIMAL(%(precision)s, %(scale)s)" % \ {'precision': type_.precision, 'scale': type_.scale} def visit_INTEGER(self, type_): return "INTEGER" def visit_SMALLINT(self, type_): return "SMALLINT" def visit_BIGINT(self, type_): return "BIGINT" def visit_TIMESTAMP(self, type_): return 'TIMESTAMP' def visit_DATETIME(self, type_): return "DATETIME" def visit_DATE(self, type_): return "DATE" def visit_TIME(self, type_): return "TIME" def visit_CLOB(self, type_): return "CLOB" def visit_NCLOB(self, type_): return "NCLOB" def _render_string_type(self, type_, name): text = name if type_.length: text += "(%d)" % type_.length if type_.collation: text += ' COLLATE "%s"' % type_.collation return text def visit_CHAR(self, type_): return self._render_string_type(type_, "CHAR") def visit_NCHAR(self, type_): return self._render_string_type(type_, "NCHAR") def visit_VARCHAR(self, type_): return self._render_string_type(type_, "VARCHAR") def visit_NVARCHAR(self, type_): return self._render_string_type(type_, "NVARCHAR") def visit_TEXT(self, type_): return self._render_string_type(type_, "TEXT") def visit_BLOB(self, type_): return "BLOB" def visit_BINARY(self, type_): return "BINARY" + (type_.length and "(%d)" % type_.length or "") def visit_VARBINARY(self, type_): return "VARBINARY" + (type_.length and "(%d)" % type_.length or "") def visit_BOOLEAN(self, type_): return "BOOLEAN" def visit_large_binary(self, type_): return self.visit_BLOB(type_) def visit_boolean(self, type_): return self.visit_BOOLEAN(type_) def visit_time(self, type_): return self.visit_TIME(type_) def visit_datetime(self, type_): return self.visit_DATETIME(type_) def visit_date(self, type_): return self.visit_DATE(type_) def visit_big_integer(self, type_): return self.visit_BIGINT(type_) def visit_small_integer(self, type_): return self.visit_SMALLINT(type_) def visit_integer(self, type_): return self.visit_INTEGER(type_) def visit_real(self, type_): return self.visit_REAL(type_) def visit_float(self, type_): return self.visit_FLOAT(type_) def visit_numeric(self, type_): return self.visit_NUMERIC(type_) def visit_string(self, type_): return self.visit_VARCHAR(type_) def visit_unicode(self, type_): return self.visit_VARCHAR(type_) def visit_text(self, type_): return self.visit_TEXT(type_) def visit_unicode_text(self, type_): return self.visit_TEXT(type_) def visit_enum(self, type_): return self.visit_VARCHAR(type_) def visit_null(self, type_): raise exc.CompileError("Can't generate DDL for %r; " "did you forget to specify a " "type on this Column?" % type_) def visit_type_decorator(self, type_): return self.process(type_.type_engine(self.dialect)) def visit_user_defined(self, type_): return type_.get_col_spec() class IdentifierPreparer(object): """Handle quoting and case-folding of identifiers based on options.""" reserved_words = RESERVED_WORDS legal_characters = LEGAL_CHARACTERS illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS def __init__(self, dialect, initial_quote='"', final_quote=None, escape_quote='"', omit_schema=False): """Construct a new ``IdentifierPreparer`` object. initial_quote Character that begins a delimited identifier. final_quote Character that ends a delimited identifier. Defaults to `initial_quote`. omit_schema Prevent prepending schema name. Useful for databases that do not support schemae. """ self.dialect = dialect self.initial_quote = initial_quote self.final_quote = final_quote or self.initial_quote self.escape_quote = escape_quote self.escape_to_quote = self.escape_quote * 2 self.omit_schema = omit_schema self._strings = {} def _escape_identifier(self, value): """Escape an identifier. Subclasses should override this to provide database-dependent escaping behavior. """ return value.replace(self.escape_quote, self.escape_to_quote) def _unescape_identifier(self, value): """Canonicalize an escaped identifier. Subclasses should override this to provide database-dependent unescaping behavior that reverses _escape_identifier. """ return value.replace(self.escape_to_quote, self.escape_quote) def quote_identifier(self, value): """Quote an identifier. Subclasses should override this to provide database-dependent quoting behavior. """ return self.initial_quote + \ self._escape_identifier(value) + \ self.final_quote def _requires_quotes(self, value): """Return True if the given identifier requires quoting.""" lc_value = value.lower() return (lc_value in self.reserved_words or value[0] in self.illegal_initial_characters or not self.legal_characters.match(util.text_type(value)) or (lc_value != value)) def quote_schema(self, schema, force=None): """Conditionally quote a schema. Subclasses can override this to provide database-dependent quoting behavior for schema names. the 'force' flag should be considered deprecated. """ return self.quote(schema, force) def quote(self, ident, force=None): """Conditionally quote an identifier. the 'force' flag should be considered deprecated. """ force = getattr(ident, "quote", None) if force is None: if ident in self._strings: return self._strings[ident] else: if self._requires_quotes(ident): self._strings[ident] = self.quote_identifier(ident) else: self._strings[ident] = ident return self._strings[ident] elif force: return self.quote_identifier(ident) else: return ident def format_sequence(self, sequence, use_schema=True): name = self.quote(sequence.name) if not self.omit_schema and use_schema and sequence.schema is not None: name = self.quote_schema(sequence.schema) + "." + name return name def format_label(self, label, name=None): return self.quote(name or label.name) def format_alias(self, alias, name=None): return self.quote(name or alias.name) def format_savepoint(self, savepoint, name=None): return self.quote(name or savepoint.ident) def format_constraint(self, constraint): return self.quote(constraint.name) def format_table(self, table, use_schema=True, name=None): """Prepare a quoted table and schema name.""" if name is None: name = table.name result = self.quote(name) if not self.omit_schema and use_schema \ and getattr(table, "schema", None): result = self.quote_schema(table.schema) + "." + result return result def format_schema(self, name, quote=None): """Prepare a quoted schema name.""" return self.quote(name, quote) def format_column(self, column, use_table=False, name=None, table_name=None): """Prepare a quoted column name.""" if name is None: name = column.name if not getattr(column, 'is_literal', False): if use_table: return self.format_table( column.table, use_schema=False, name=table_name) + "." + self.quote(name) else: return self.quote(name) else: # literal textual elements get stuck into ColumnClause a lot, # which shouldn't get quoted if use_table: return self.format_table(column.table, use_schema=False, name=table_name) + '.' + name else: return name def format_table_seq(self, table, use_schema=True): """Format table name and schema as a tuple.""" # Dialects with more levels in their fully qualified references # ('database', 'owner', etc.) could override this and return # a longer sequence. if not self.omit_schema and use_schema and \ getattr(table, 'schema', None): return (self.quote_schema(table.schema), self.format_table(table, use_schema=False)) else: return (self.format_table(table, use_schema=False), ) @util.memoized_property def _r_identifiers(self): initial, final, escaped_final = \ [re.escape(s) for s in (self.initial_quote, self.final_quote, self._escape_identifier(self.final_quote))] r = re.compile( r'(?:' r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s' r'|([^\.]+))(?=\.|$))+' % {'initial': initial, 'final': final, 'escaped': escaped_final}) return r def unformat_identifiers(self, identifiers): """Unpack 'schema.table.column'-like strings into components.""" r = self._r_identifiers return [self._unescape_identifier(i) for i in [a or b for a, b in r.findall(identifiers)]]
kingmotley/SickRage
refs/heads/master
lib/sqlalchemy/sql/compiler.py
76
# sql/compiler.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Base SQL and DDL compiler implementations. Classes provided include: :class:`.compiler.SQLCompiler` - renders SQL strings :class:`.compiler.DDLCompiler` - renders DDL (data definition language) strings :class:`.compiler.GenericTypeCompiler` - renders type specification strings. To generate user-defined SQL strings, see :doc:`/ext/compiler`. """ import re from . import schema, sqltypes, operators, functions, \ util as sql_util, visitors, elements, selectable, base from .. import util, exc import decimal import itertools import operator RESERVED_WORDS = set([ 'all', 'analyse', 'analyze', 'and', 'any', 'array', 'as', 'asc', 'asymmetric', 'authorization', 'between', 'binary', 'both', 'case', 'cast', 'check', 'collate', 'column', 'constraint', 'create', 'cross', 'current_date', 'current_role', 'current_time', 'current_timestamp', 'current_user', 'default', 'deferrable', 'desc', 'distinct', 'do', 'else', 'end', 'except', 'false', 'for', 'foreign', 'freeze', 'from', 'full', 'grant', 'group', 'having', 'ilike', 'in', 'initially', 'inner', 'intersect', 'into', 'is', 'isnull', 'join', 'leading', 'left', 'like', 'limit', 'localtime', 'localtimestamp', 'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset', 'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps', 'placing', 'primary', 'references', 'right', 'select', 'session_user', 'set', 'similar', 'some', 'symmetric', 'table', 'then', 'to', 'trailing', 'true', 'union', 'unique', 'user', 'using', 'verbose', 'when', 'where']) LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I) ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in range(0, 10)]).union(['$']) BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE) BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]+)(?![:\w\$])', re.UNICODE) BIND_TEMPLATES = { 'pyformat': "%%(%(name)s)s", 'qmark': "?", 'format': "%%s", 'numeric': ":[_POSITION]", 'named': ":%(name)s" } REQUIRED = util.symbol('REQUIRED', """ Placeholder for the value within a :class:`.BindParameter` which is required to be present when the statement is passed to :meth:`.Connection.execute`. This symbol is typically used when a :func:`.expression.insert` or :func:`.expression.update` statement is compiled without parameter values present. """) OPERATORS = { # binary operators.and_: ' AND ', operators.or_: ' OR ', operators.add: ' + ', operators.mul: ' * ', operators.sub: ' - ', operators.div: ' / ', operators.mod: ' % ', operators.truediv: ' / ', operators.neg: '-', operators.lt: ' < ', operators.le: ' <= ', operators.ne: ' != ', operators.gt: ' > ', operators.ge: ' >= ', operators.eq: ' = ', operators.concat_op: ' || ', operators.between_op: ' BETWEEN ', operators.match_op: ' MATCH ', operators.in_op: ' IN ', operators.notin_op: ' NOT IN ', operators.comma_op: ', ', operators.from_: ' FROM ', operators.as_: ' AS ', operators.is_: ' IS ', operators.isnot: ' IS NOT ', operators.collate: ' COLLATE ', # unary operators.exists: 'EXISTS ', operators.distinct_op: 'DISTINCT ', operators.inv: 'NOT ', # modifiers operators.desc_op: ' DESC', operators.asc_op: ' ASC', operators.nullsfirst_op: ' NULLS FIRST', operators.nullslast_op: ' NULLS LAST', } FUNCTIONS = { functions.coalesce: 'coalesce%(expr)s', functions.current_date: 'CURRENT_DATE', functions.current_time: 'CURRENT_TIME', functions.current_timestamp: 'CURRENT_TIMESTAMP', functions.current_user: 'CURRENT_USER', functions.localtime: 'LOCALTIME', functions.localtimestamp: 'LOCALTIMESTAMP', functions.random: 'random%(expr)s', functions.sysdate: 'sysdate', functions.session_user: 'SESSION_USER', functions.user: 'USER' } EXTRACT_MAP = { 'month': 'month', 'day': 'day', 'year': 'year', 'second': 'second', 'hour': 'hour', 'doy': 'doy', 'minute': 'minute', 'quarter': 'quarter', 'dow': 'dow', 'week': 'week', 'epoch': 'epoch', 'milliseconds': 'milliseconds', 'microseconds': 'microseconds', 'timezone_hour': 'timezone_hour', 'timezone_minute': 'timezone_minute' } COMPOUND_KEYWORDS = { selectable.CompoundSelect.UNION: 'UNION', selectable.CompoundSelect.UNION_ALL: 'UNION ALL', selectable.CompoundSelect.EXCEPT: 'EXCEPT', selectable.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL', selectable.CompoundSelect.INTERSECT: 'INTERSECT', selectable.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL' } class Compiled(object): """Represent a compiled SQL or DDL expression. The ``__str__`` method of the ``Compiled`` object should produce the actual text of the statement. ``Compiled`` objects are specific to their underlying database dialect, and also may or may not be specific to the columns referenced within a particular set of bind parameters. In no case should the ``Compiled`` object be dependent on the actual values of those bind parameters, even though it may reference those values as defaults. """ def __init__(self, dialect, statement, bind=None, compile_kwargs=util.immutabledict()): """Construct a new ``Compiled`` object. :param dialect: ``Dialect`` to compile against. :param statement: ``ClauseElement`` to be compiled. :param bind: Optional Engine or Connection to compile this statement against. :param compile_kwargs: additional kwargs that will be passed to the initial call to :meth:`.Compiled.process`. .. versionadded:: 0.8 """ self.dialect = dialect self.bind = bind if statement is not None: self.statement = statement self.can_execute = statement.supports_execution self.string = self.process(self.statement, **compile_kwargs) @util.deprecated("0.7", ":class:`.Compiled` objects now compile " "within the constructor.") def compile(self): """Produce the internal string representation of this element. """ pass def _execute_on_connection(self, connection, multiparams, params): return connection._execute_compiled(self, multiparams, params) @property def sql_compiler(self): """Return a Compiled that is capable of processing SQL expressions. If this compiler is one, it would likely just return 'self'. """ raise NotImplementedError() def process(self, obj, **kwargs): return obj._compiler_dispatch(self, **kwargs) def __str__(self): """Return the string text of the generated SQL or DDL.""" return self.string or '' def construct_params(self, params=None): """Return the bind params for this compiled object. :param params: a dict of string/object pairs whose values will override bind values compiled in to the statement. """ raise NotImplementedError() @property def params(self): """Return the bind params for this compiled object.""" return self.construct_params() def execute(self, *multiparams, **params): """Execute this compiled object.""" e = self.bind if e is None: raise exc.UnboundExecutionError( "This Compiled object is not bound to any Engine " "or Connection.") return e._execute_compiled(self, multiparams, params) def scalar(self, *multiparams, **params): """Execute this compiled object and return the result's scalar value.""" return self.execute(*multiparams, **params).scalar() class TypeCompiler(object): """Produces DDL specification for TypeEngine objects.""" def __init__(self, dialect): self.dialect = dialect def process(self, type_): return type_._compiler_dispatch(self) class _CompileLabel(visitors.Visitable): """lightweight label object which acts as an expression.Label.""" __visit_name__ = 'label' __slots__ = 'element', 'name' def __init__(self, col, name, alt_names=()): self.element = col self.name = name self._alt_names = (col,) + alt_names @property def proxy_set(self): return self.element.proxy_set @property def type(self): return self.element.type class SQLCompiler(Compiled): """Default implementation of Compiled. Compiles ClauseElements into SQL strings. Uses a similar visit paradigm as visitors.ClauseVisitor but implements its own traversal. """ extract_map = EXTRACT_MAP compound_keywords = COMPOUND_KEYWORDS isdelete = isinsert = isupdate = False """class-level defaults which can be set at the instance level to define if this Compiled instance represents INSERT/UPDATE/DELETE """ returning = None """holds the "returning" collection of columns if the statement is CRUD and defines returning columns either implicitly or explicitly """ returning_precedes_values = False """set to True classwide to generate RETURNING clauses before the VALUES or WHERE clause (i.e. MSSQL) """ render_table_with_column_in_update_from = False """set to True classwide to indicate the SET clause in a multi-table UPDATE statement should qualify columns with the table name (i.e. MySQL only) """ ansi_bind_rules = False """SQL 92 doesn't allow bind parameters to be used in the columns clause of a SELECT, nor does it allow ambiguous expressions like "? = ?". A compiler subclass can set this flag to False if the target driver/DB enforces this """ def __init__(self, dialect, statement, column_keys=None, inline=False, **kwargs): """Construct a new ``DefaultCompiler`` object. dialect Dialect to be used statement ClauseElement to be compiled column_keys a list of column names to be compiled into an INSERT or UPDATE statement. """ self.column_keys = column_keys # compile INSERT/UPDATE defaults/sequences inlined (no pre- # execute) self.inline = inline or getattr(statement, 'inline', False) # a dictionary of bind parameter keys to BindParameter # instances. self.binds = {} # a dictionary of BindParameter instances to "compiled" names # that are actually present in the generated SQL self.bind_names = util.column_dict() # stack which keeps track of nested SELECT statements self.stack = [] # relates label names in the final SQL to a tuple of local # column/label name, ColumnElement object (if any) and # TypeEngine. ResultProxy uses this for type processing and # column targeting self.result_map = {} # true if the paramstyle is positional self.positional = dialect.positional if self.positional: self.positiontup = [] self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle] self.ctes = None # an IdentifierPreparer that formats the quoting of identifiers self.preparer = dialect.identifier_preparer self.label_length = dialect.label_length \ or dialect.max_identifier_length # a map which tracks "anonymous" identifiers that are created on # the fly here self.anon_map = util.PopulateDict(self._process_anon) # a map which tracks "truncated" names based on # dialect.label_length or dialect.max_identifier_length self.truncated_names = {} Compiled.__init__(self, dialect, statement, **kwargs) if self.positional and dialect.paramstyle == 'numeric': self._apply_numbered_params() @util.memoized_instancemethod def _init_cte_state(self): """Initialize collections related to CTEs only if a CTE is located, to save on the overhead of these collections otherwise. """ # collect CTEs to tack on top of a SELECT self.ctes = util.OrderedDict() self.ctes_by_name = {} self.ctes_recursive = False if self.positional: self.cte_positional = [] def _apply_numbered_params(self): poscount = itertools.count(1) self.string = re.sub( r'\[_POSITION\]', lambda m: str(util.next(poscount)), self.string) @util.memoized_property def _bind_processors(self): return dict( (key, value) for key, value in ((self.bind_names[bindparam], bindparam.type._cached_bind_processor(self.dialect)) for bindparam in self.bind_names) if value is not None ) def is_subquery(self): return len(self.stack) > 1 @property def sql_compiler(self): return self def construct_params(self, params=None, _group_number=None, _check=True): """return a dictionary of bind parameter keys and values""" if params: pd = {} for bindparam, name in self.bind_names.items(): if bindparam.key in params: pd[name] = params[bindparam.key] elif name in params: pd[name] = params[name] elif _check and bindparam.required: if _group_number: raise exc.InvalidRequestError( "A value is required for bind parameter %r, " "in parameter group %d" % (bindparam.key, _group_number)) else: raise exc.InvalidRequestError( "A value is required for bind parameter %r" % bindparam.key) else: pd[name] = bindparam.effective_value return pd else: pd = {} for bindparam in self.bind_names: if _check and bindparam.required: if _group_number: raise exc.InvalidRequestError( "A value is required for bind parameter %r, " "in parameter group %d" % (bindparam.key, _group_number)) else: raise exc.InvalidRequestError( "A value is required for bind parameter %r" % bindparam.key) pd[self.bind_names[bindparam]] = bindparam.effective_value return pd @property def params(self): """Return the bind param dictionary embedded into this compiled object, for those values that are present.""" return self.construct_params(_check=False) def default_from(self): """Called when a SELECT statement has no froms, and no FROM clause is to be appended. Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output. """ return "" def visit_grouping(self, grouping, asfrom=False, **kwargs): return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")" def visit_label(self, label, add_to_result_map=None, within_label_clause=False, within_columns_clause=False, render_label_as_label=None, **kw): # only render labels within the columns clause # or ORDER BY clause of a select. dialect-specific compilers # can modify this behavior. render_label_with_as = within_columns_clause and not within_label_clause render_label_only = render_label_as_label is label if render_label_only or render_label_with_as: if isinstance(label.name, elements._truncated_label): labelname = self._truncated_identifier("colident", label.name) else: labelname = label.name if render_label_with_as: if add_to_result_map is not None: add_to_result_map( labelname, label.name, (label, labelname, ) + label._alt_names, label.type ) return label.element._compiler_dispatch(self, within_columns_clause=True, within_label_clause=True, **kw) + \ OPERATORS[operators.as_] + \ self.preparer.format_label(label, labelname) elif render_label_only: return labelname else: return label.element._compiler_dispatch(self, within_columns_clause=False, **kw) def visit_column(self, column, add_to_result_map=None, include_table=True, **kwargs): name = orig_name = column.name if name is None: raise exc.CompileError("Cannot compile Column object until " "its 'name' is assigned.") is_literal = column.is_literal if not is_literal and isinstance(name, elements._truncated_label): name = self._truncated_identifier("colident", name) if add_to_result_map is not None: add_to_result_map( name, orig_name, (column, name, column.key), column.type ) if is_literal: name = self.escape_literal_column(name) else: name = self.preparer.quote(name) table = column.table if table is None or not include_table or not table.named_with_column: return name else: if table.schema: schema_prefix = self.preparer.quote_schema(table.schema) + '.' else: schema_prefix = '' tablename = table.name if isinstance(tablename, elements._truncated_label): tablename = self._truncated_identifier("alias", tablename) return schema_prefix + \ self.preparer.quote(tablename) + \ "." + name def escape_literal_column(self, text): """provide escaping for the literal_column() construct.""" # TODO: some dialects might need different behavior here return text.replace('%', '%%') def visit_fromclause(self, fromclause, **kwargs): return fromclause.name def visit_index(self, index, **kwargs): return index.name def visit_typeclause(self, typeclause, **kwargs): return self.dialect.type_compiler.process(typeclause.type) def post_process_text(self, text): return text def visit_textclause(self, textclause, **kw): def do_bindparam(m): name = m.group(1) if name in textclause._bindparams: return self.process(textclause._bindparams[name], **kw) else: return self.bindparam_string(name, **kw) # un-escape any \:params return BIND_PARAMS_ESC.sub(lambda m: m.group(1), BIND_PARAMS.sub(do_bindparam, self.post_process_text(textclause.text)) ) def visit_text_as_from(self, taf, iswrapper=False, compound_index=0, force_result_map=False, asfrom=False, parens=True, **kw): toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] populate_result_map = force_result_map or ( compound_index == 0 and ( toplevel or \ entry['iswrapper'] ) ) if populate_result_map: for c in taf.column_args: self.process(c, within_columns_clause=True, add_to_result_map=self._add_to_result_map) text = self.process(taf.element, **kw) if asfrom and parens: text = "(%s)" % text return text def visit_null(self, expr, **kw): return 'NULL' def visit_true(self, expr, **kw): if self.dialect.supports_native_boolean: return 'true' else: return "1" def visit_false(self, expr, **kw): if self.dialect.supports_native_boolean: return 'false' else: return "0" def visit_clauselist(self, clauselist, order_by_select=None, **kw): if order_by_select is not None: return self._order_by_clauselist( clauselist, order_by_select, **kw) sep = clauselist.operator if sep is None: sep = " " else: sep = OPERATORS[clauselist.operator] return sep.join( s for s in ( c._compiler_dispatch(self, **kw) for c in clauselist.clauses) if s) def _order_by_clauselist(self, clauselist, order_by_select, **kw): # look through raw columns collection for labels. # note that its OK we aren't expanding tables and other selectables # here; we can only add a label in the ORDER BY for an individual # label expression in the columns clause. raw_col = set(l._order_by_label_element.name for l in order_by_select._raw_columns if l._order_by_label_element is not None) return ", ".join( s for s in ( c._compiler_dispatch(self, render_label_as_label= c._order_by_label_element if c._order_by_label_element is not None and c._order_by_label_element.name in raw_col else None, **kw) for c in clauselist.clauses) if s) def visit_case(self, clause, **kwargs): x = "CASE " if clause.value is not None: x += clause.value._compiler_dispatch(self, **kwargs) + " " for cond, result in clause.whens: x += "WHEN " + cond._compiler_dispatch( self, **kwargs ) + " THEN " + result._compiler_dispatch( self, **kwargs) + " " if clause.else_ is not None: x += "ELSE " + clause.else_._compiler_dispatch( self, **kwargs ) + " " x += "END" return x def visit_cast(self, cast, **kwargs): return "CAST(%s AS %s)" % \ (cast.clause._compiler_dispatch(self, **kwargs), cast.typeclause._compiler_dispatch(self, **kwargs)) def visit_over(self, over, **kwargs): return "%s OVER (%s)" % ( over.func._compiler_dispatch(self, **kwargs), ' '.join( '%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs)) for word, clause in ( ('PARTITION', over.partition_by), ('ORDER', over.order_by) ) if clause is not None and len(clause) ) ) def visit_extract(self, extract, **kwargs): field = self.extract_map.get(extract.field, extract.field) return "EXTRACT(%s FROM %s)" % (field, extract.expr._compiler_dispatch(self, **kwargs)) def visit_function(self, func, add_to_result_map=None, **kwargs): if add_to_result_map is not None: add_to_result_map( func.name, func.name, (), func.type ) disp = getattr(self, "visit_%s_func" % func.name.lower(), None) if disp: return disp(func, **kwargs) else: name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s") return ".".join(list(func.packagenames) + [name]) % \ {'expr': self.function_argspec(func, **kwargs)} def visit_next_value_func(self, next_value, **kw): return self.visit_sequence(next_value.sequence) def visit_sequence(self, sequence): raise NotImplementedError( "Dialect '%s' does not support sequence increments." % self.dialect.name ) def function_argspec(self, func, **kwargs): return func.clause_expr._compiler_dispatch(self, **kwargs) def visit_compound_select(self, cs, asfrom=False, parens=True, compound_index=0, **kwargs): toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] self.stack.append( { 'correlate_froms': entry['correlate_froms'], 'iswrapper': toplevel, 'asfrom_froms': entry['asfrom_froms'] }) keyword = self.compound_keywords.get(cs.keyword) text = (" " + keyword + " ").join( (c._compiler_dispatch(self, asfrom=asfrom, parens=False, compound_index=i, **kwargs) for i, c in enumerate(cs.selects)) ) group_by = cs._group_by_clause._compiler_dispatch( self, asfrom=asfrom, **kwargs) if group_by: text += " GROUP BY " + group_by text += self.order_by_clause(cs, **kwargs) text += (cs._limit is not None or cs._offset is not None) and \ self.limit_clause(cs) or "" if self.ctes and \ compound_index == 0 and toplevel: text = self._render_cte_clause() + text self.stack.pop(-1) if asfrom and parens: return "(" + text + ")" else: return text def visit_unary(self, unary, **kw): if unary.operator: if unary.modifier: raise exc.CompileError( "Unary expression does not support operator " "and modifier simultaneously") disp = getattr(self, "visit_%s_unary_operator" % unary.operator.__name__, None) if disp: return disp(unary, unary.operator, **kw) else: return self._generate_generic_unary_operator(unary, OPERATORS[unary.operator], **kw) elif unary.modifier: disp = getattr(self, "visit_%s_unary_modifier" % unary.modifier.__name__, None) if disp: return disp(unary, unary.modifier, **kw) else: return self._generate_generic_unary_modifier(unary, OPERATORS[unary.modifier], **kw) else: raise exc.CompileError( "Unary expression has no operator or modifier") def visit_istrue_unary_operator(self, element, operator, **kw): if self.dialect.supports_native_boolean: return self.process(element.element, **kw) else: return "%s = 1" % self.process(element.element, **kw) def visit_isfalse_unary_operator(self, element, operator, **kw): if self.dialect.supports_native_boolean: return "NOT %s" % self.process(element.element, **kw) else: return "%s = 0" % self.process(element.element, **kw) def visit_binary(self, binary, **kw): # don't allow "? = ?" to render if self.ansi_bind_rules and \ isinstance(binary.left, elements.BindParameter) and \ isinstance(binary.right, elements.BindParameter): kw['literal_binds'] = True operator = binary.operator disp = getattr(self, "visit_%s_binary" % operator.__name__, None) if disp: return disp(binary, operator, **kw) else: try: opstring = OPERATORS[operator] except KeyError: raise exc.UnsupportedCompilationError(self, operator) else: return self._generate_generic_binary(binary, opstring, **kw) def visit_custom_op_binary(self, element, operator, **kw): return self._generate_generic_binary(element, " " + operator.opstring + " ", **kw) def visit_custom_op_unary_operator(self, element, operator, **kw): return self._generate_generic_unary_operator(element, operator.opstring + " ", **kw) def visit_custom_op_unary_modifier(self, element, operator, **kw): return self._generate_generic_unary_modifier(element, " " + operator.opstring, **kw) def _generate_generic_binary(self, binary, opstring, **kw): return binary.left._compiler_dispatch(self, **kw) + \ opstring + \ binary.right._compiler_dispatch(self, **kw) def _generate_generic_unary_operator(self, unary, opstring, **kw): return opstring + unary.element._compiler_dispatch(self, **kw) def _generate_generic_unary_modifier(self, unary, opstring, **kw): return unary.element._compiler_dispatch(self, **kw) + opstring @util.memoized_property def _like_percent_literal(self): return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE) def visit_contains_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right).__add__(percent) return self.visit_like_op_binary(binary, operator, **kw) def visit_notcontains_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right).__add__(percent) return self.visit_notlike_op_binary(binary, operator, **kw) def visit_startswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__radd__( binary.right ) return self.visit_like_op_binary(binary, operator, **kw) def visit_notstartswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__radd__( binary.right ) return self.visit_notlike_op_binary(binary, operator, **kw) def visit_endswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right) return self.visit_like_op_binary(binary, operator, **kw) def visit_notendswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal binary.right = percent.__add__(binary.right) return self.visit_notlike_op_binary(binary, operator, **kw) def visit_like_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) # TODO: use ternary here, not "and"/ "or" return '%s LIKE %s' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else '' ) def visit_notlike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return '%s NOT LIKE %s' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else '' ) def visit_ilike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return 'lower(%s) LIKE lower(%s)' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else '' ) def visit_notilike_op_binary(self, binary, operator, **kw): escape = binary.modifiers.get("escape", None) return 'lower(%s) NOT LIKE lower(%s)' % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw)) \ + ( ' ESCAPE ' + self.render_literal_value(escape, sqltypes.STRINGTYPE) if escape else '' ) def visit_bindparam(self, bindparam, within_columns_clause=False, literal_binds=False, skip_bind_expression=False, **kwargs): if not skip_bind_expression and bindparam.type._has_bind_expression: bind_expression = bindparam.type.bind_expression(bindparam) return self.process(bind_expression, skip_bind_expression=True) if literal_binds or \ (within_columns_clause and \ self.ansi_bind_rules): if bindparam.value is None and bindparam.callable is None: raise exc.CompileError("Bind parameter '%s' without a " "renderable value not allowed here." % bindparam.key) return self.render_literal_bindparam(bindparam, within_columns_clause=True, **kwargs) name = self._truncate_bindparam(bindparam) if name in self.binds: existing = self.binds[name] if existing is not bindparam: if (existing.unique or bindparam.unique) and \ not existing.proxy_set.intersection( bindparam.proxy_set): raise exc.CompileError( "Bind parameter '%s' conflicts with " "unique bind parameter of the same name" % bindparam.key ) elif existing._is_crud or bindparam._is_crud: raise exc.CompileError( "bindparam() name '%s' is reserved " "for automatic usage in the VALUES or SET " "clause of this " "insert/update statement. Please use a " "name other than column name when using bindparam() " "with insert() or update() (for example, 'b_%s')." % (bindparam.key, bindparam.key) ) self.binds[bindparam.key] = self.binds[name] = bindparam return self.bindparam_string(name, **kwargs) def render_literal_bindparam(self, bindparam, **kw): value = bindparam.effective_value return self.render_literal_value(value, bindparam.type) def render_literal_value(self, value, type_): """Render the value of a bind parameter as a quoted literal. This is used for statement sections that do not accept bind parameters on the target driver/database. This should be implemented by subclasses using the quoting services of the DBAPI. """ processor = type_._cached_literal_processor(self.dialect) if processor: return processor(value) else: raise NotImplementedError( "Don't know how to literal-quote value %r" % value) def _truncate_bindparam(self, bindparam): if bindparam in self.bind_names: return self.bind_names[bindparam] bind_name = bindparam.key if isinstance(bind_name, elements._truncated_label): bind_name = self._truncated_identifier("bindparam", bind_name) # add to bind_names for translation self.bind_names[bindparam] = bind_name return bind_name def _truncated_identifier(self, ident_class, name): if (ident_class, name) in self.truncated_names: return self.truncated_names[(ident_class, name)] anonname = name.apply_map(self.anon_map) if len(anonname) > self.label_length: counter = self.truncated_names.get(ident_class, 1) truncname = anonname[0:max(self.label_length - 6, 0)] + \ "_" + hex(counter)[2:] self.truncated_names[ident_class] = counter + 1 else: truncname = anonname self.truncated_names[(ident_class, name)] = truncname return truncname def _anonymize(self, name): return name % self.anon_map def _process_anon(self, key): (ident, derived) = key.split(' ', 1) anonymous_counter = self.anon_map.get(derived, 1) self.anon_map[derived] = anonymous_counter + 1 return derived + "_" + str(anonymous_counter) def bindparam_string(self, name, positional_names=None, **kw): if self.positional: if positional_names is not None: positional_names.append(name) else: self.positiontup.append(name) return self.bindtemplate % {'name': name} def visit_cte(self, cte, asfrom=False, ashint=False, fromhints=None, **kwargs): self._init_cte_state() if self.positional: kwargs['positional_names'] = self.cte_positional if isinstance(cte.name, elements._truncated_label): cte_name = self._truncated_identifier("alias", cte.name) else: cte_name = cte.name if cte_name in self.ctes_by_name: existing_cte = self.ctes_by_name[cte_name] # we've generated a same-named CTE that we are enclosed in, # or this is the same CTE. just return the name. if cte in existing_cte._restates or cte is existing_cte: return self.preparer.format_alias(cte, cte_name) elif existing_cte in cte._restates: # we've generated a same-named CTE that is # enclosed in us - we take precedence, so # discard the text for the "inner". del self.ctes[existing_cte] else: raise exc.CompileError( "Multiple, unrelated CTEs found with " "the same name: %r" % cte_name) self.ctes_by_name[cte_name] = cte if cte._cte_alias is not None: orig_cte = cte._cte_alias if orig_cte not in self.ctes: self.visit_cte(orig_cte) cte_alias_name = cte._cte_alias.name if isinstance(cte_alias_name, elements._truncated_label): cte_alias_name = self._truncated_identifier("alias", cte_alias_name) else: orig_cte = cte cte_alias_name = None if not cte_alias_name and cte not in self.ctes: if cte.recursive: self.ctes_recursive = True text = self.preparer.format_alias(cte, cte_name) if cte.recursive: if isinstance(cte.original, selectable.Select): col_source = cte.original elif isinstance(cte.original, selectable.CompoundSelect): col_source = cte.original.selects[0] else: assert False recur_cols = [c for c in util.unique_list(col_source.inner_columns) if c is not None] text += "(%s)" % (", ".join( self.preparer.format_column(ident) for ident in recur_cols)) text += " AS \n" + \ cte.original._compiler_dispatch( self, asfrom=True, **kwargs ) self.ctes[cte] = text if asfrom: if cte_alias_name: text = self.preparer.format_alias(cte, cte_alias_name) text += " AS " + cte_name else: return self.preparer.format_alias(cte, cte_name) return text def visit_alias(self, alias, asfrom=False, ashint=False, iscrud=False, fromhints=None, **kwargs): if asfrom or ashint: if isinstance(alias.name, elements._truncated_label): alias_name = self._truncated_identifier("alias", alias.name) else: alias_name = alias.name if ashint: return self.preparer.format_alias(alias, alias_name) elif asfrom: ret = alias.original._compiler_dispatch(self, asfrom=True, **kwargs) + \ " AS " + \ self.preparer.format_alias(alias, alias_name) if fromhints and alias in fromhints: ret = self.format_from_hint_text(ret, alias, fromhints[alias], iscrud) return ret else: return alias.original._compiler_dispatch(self, **kwargs) def _add_to_result_map(self, keyname, name, objects, type_): if not self.dialect.case_sensitive: keyname = keyname.lower() if keyname in self.result_map: # conflicting keyname, just double up the list # of objects. this will cause an "ambiguous name" # error if an attempt is made by the result set to # access. e_name, e_obj, e_type = self.result_map[keyname] self.result_map[keyname] = e_name, e_obj + objects, e_type else: self.result_map[keyname] = name, objects, type_ def _label_select_column(self, select, column, populate_result_map, asfrom, column_clause_args, name=None, within_columns_clause=True): """produce labeled columns present in a select().""" if column.type._has_column_expression and \ populate_result_map: col_expr = column.type.column_expression(column) add_to_result_map = lambda keyname, name, objects, type_: \ self._add_to_result_map( keyname, name, objects + (column,), type_) else: col_expr = column if populate_result_map: add_to_result_map = self._add_to_result_map else: add_to_result_map = None if not within_columns_clause: result_expr = col_expr elif isinstance(column, elements.Label): if col_expr is not column: result_expr = _CompileLabel( col_expr, column.name, alt_names=(column.element,) ) else: result_expr = col_expr elif select is not None and name: result_expr = _CompileLabel( col_expr, name, alt_names=(column._key_label,) ) elif \ asfrom and \ isinstance(column, elements.ColumnClause) and \ not column.is_literal and \ column.table is not None and \ not isinstance(column.table, selectable.Select): result_expr = _CompileLabel(col_expr, elements._as_truncated(column.name), alt_names=(column.key,)) elif not isinstance(column, (elements.UnaryExpression, elements.TextClause)) \ and (not hasattr(column, 'name') or \ isinstance(column, functions.Function)): result_expr = _CompileLabel(col_expr, column.anon_label) elif col_expr is not column: # TODO: are we sure "column" has a .name and .key here ? # assert isinstance(column, elements.ColumnClause) result_expr = _CompileLabel(col_expr, elements._as_truncated(column.name), alt_names=(column.key,)) else: result_expr = col_expr column_clause_args.update( within_columns_clause=within_columns_clause, add_to_result_map=add_to_result_map ) return result_expr._compiler_dispatch( self, **column_clause_args ) def format_from_hint_text(self, sqltext, table, hint, iscrud): hinttext = self.get_from_hint_text(table, hint) if hinttext: sqltext += " " + hinttext return sqltext def get_select_hint_text(self, byfroms): return None def get_from_hint_text(self, table, text): return None def get_crud_hint_text(self, table, text): return None def _transform_select_for_nested_joins(self, select): """Rewrite any "a JOIN (b JOIN c)" expression as "a JOIN (select * from b JOIN c) AS anon", to support databases that can't parse a parenthesized join correctly (i.e. sqlite the main one). """ cloned = {} column_translate = [{}] def visit(element, **kw): if element in column_translate[-1]: return column_translate[-1][element] elif element in cloned: return cloned[element] newelem = cloned[element] = element._clone() if newelem.is_selectable and newelem._is_join and \ isinstance(newelem.right, selectable.FromGrouping): newelem._reset_exported() newelem.left = visit(newelem.left, **kw) right = visit(newelem.right, **kw) selectable_ = selectable.Select( [right.element], use_labels=True).alias() for c in selectable_.c: c._key_label = c.key c._label = c.name translate_dict = dict( zip(newelem.right.element.c, selectable_.c) ) # translating from both the old and the new # because different select() structures will lead us # to traverse differently translate_dict[right.element.left] = selectable_ translate_dict[right.element.right] = selectable_ translate_dict[newelem.right.element.left] = selectable_ translate_dict[newelem.right.element.right] = selectable_ # propagate translations that we've gained # from nested visit(newelem.right) outwards # to the enclosing select here. this happens # only when we have more than one level of right # join nesting, i.e. "a JOIN (b JOIN (c JOIN d))" for k, v in list(column_translate[-1].items()): if v in translate_dict: # remarkably, no current ORM tests (May 2013) # hit this condition, only test_join_rewriting # does. column_translate[-1][k] = translate_dict[v] column_translate[-1].update(translate_dict) newelem.right = selectable_ newelem.onclause = visit(newelem.onclause, **kw) elif newelem.is_selectable and newelem._is_from_container: # if we hit an Alias or CompoundSelect, put a marker in the # stack. kw['transform_clue'] = 'select_container' newelem._copy_internals(clone=visit, **kw) elif newelem.is_selectable and newelem._is_select: barrier_select = kw.get('transform_clue', None) == 'select_container' # if we're still descended from an Alias/CompoundSelect, we're # in a FROM clause, so start with a new translate collection if barrier_select: column_translate.append({}) kw['transform_clue'] = 'inside_select' newelem._copy_internals(clone=visit, **kw) if barrier_select: del column_translate[-1] else: newelem._copy_internals(clone=visit, **kw) return newelem return visit(select) def _transform_result_map_for_nested_joins(self, select, transformed_select): inner_col = dict((c._key_label, c) for c in transformed_select.inner_columns) d = dict( (inner_col[c._key_label], c) for c in select.inner_columns ) for key, (name, objs, typ) in list(self.result_map.items()): objs = tuple([d.get(col, col) for col in objs]) self.result_map[key] = (name, objs, typ) _default_stack_entry = util.immutabledict([ ('iswrapper', False), ('correlate_froms', frozenset()), ('asfrom_froms', frozenset()) ]) def _display_froms_for_select(self, select, asfrom): # utility method to help external dialects # get the correct from list for a select. # specifically the oracle dialect needs this feature # right now. toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] correlate_froms = entry['correlate_froms'] asfrom_froms = entry['asfrom_froms'] if asfrom: froms = select._get_display_froms( explicit_correlate_froms=\ correlate_froms.difference(asfrom_froms), implicit_correlate_froms=()) else: froms = select._get_display_froms( explicit_correlate_froms=correlate_froms, implicit_correlate_froms=asfrom_froms) return froms def visit_select(self, select, asfrom=False, parens=True, iswrapper=False, fromhints=None, compound_index=0, force_result_map=False, positional_names=None, nested_join_translation=False, **kwargs): needs_nested_translation = \ select.use_labels and \ not nested_join_translation and \ not self.stack and \ not self.dialect.supports_right_nested_joins if needs_nested_translation: transformed_select = self._transform_select_for_nested_joins(select) text = self.visit_select( transformed_select, asfrom=asfrom, parens=parens, iswrapper=iswrapper, fromhints=fromhints, compound_index=compound_index, force_result_map=force_result_map, positional_names=positional_names, nested_join_translation=True, **kwargs ) toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] populate_result_map = force_result_map or ( compound_index == 0 and ( toplevel or \ entry['iswrapper'] ) ) if needs_nested_translation: if populate_result_map: self._transform_result_map_for_nested_joins( select, transformed_select) return text correlate_froms = entry['correlate_froms'] asfrom_froms = entry['asfrom_froms'] if asfrom: froms = select._get_display_froms( explicit_correlate_froms= correlate_froms.difference(asfrom_froms), implicit_correlate_froms=()) else: froms = select._get_display_froms( explicit_correlate_froms=correlate_froms, implicit_correlate_froms=asfrom_froms) new_correlate_froms = set(selectable._from_objects(*froms)) all_correlate_froms = new_correlate_froms.union(correlate_froms) new_entry = { 'asfrom_froms': new_correlate_froms, 'iswrapper': iswrapper, 'correlate_froms': all_correlate_froms } self.stack.append(new_entry) column_clause_args = kwargs.copy() column_clause_args.update({ 'positional_names': positional_names, 'within_label_clause': False, 'within_columns_clause': False }) # the actual list of columns to print in the SELECT column list. inner_columns = [ c for c in [ self._label_select_column(select, column, populate_result_map, asfrom, column_clause_args, name=name) for name, column in select._columns_plus_names ] if c is not None ] text = "SELECT " # we're off to a good start ! if select._hints: byfrom = dict([ (from_, hinttext % { 'name':from_._compiler_dispatch( self, ashint=True) }) for (from_, dialect), hinttext in select._hints.items() if dialect in ('*', self.dialect.name) ]) hint_text = self.get_select_hint_text(byfrom) if hint_text: text += hint_text + " " if select._prefixes: text += self._generate_prefixes(select, select._prefixes, **kwargs) text += self.get_select_precolumns(select) text += ', '.join(inner_columns) if froms: text += " \nFROM " if select._hints: text += ', '.join([f._compiler_dispatch(self, asfrom=True, fromhints=byfrom, **kwargs) for f in froms]) else: text += ', '.join([f._compiler_dispatch(self, asfrom=True, **kwargs) for f in froms]) else: text += self.default_from() if select._whereclause is not None: t = select._whereclause._compiler_dispatch(self, **kwargs) if t: text += " \nWHERE " + t if select._group_by_clause.clauses: group_by = select._group_by_clause._compiler_dispatch( self, **kwargs) if group_by: text += " GROUP BY " + group_by if select._having is not None: t = select._having._compiler_dispatch(self, **kwargs) if t: text += " \nHAVING " + t if select._order_by_clause.clauses: if self.dialect.supports_simple_order_by_label: order_by_select = select else: order_by_select = None text += self.order_by_clause(select, order_by_select=order_by_select, **kwargs) if select._limit is not None or select._offset is not None: text += self.limit_clause(select) if select._for_update_arg is not None: text += self.for_update_clause(select) if self.ctes and \ compound_index == 0 and toplevel: text = self._render_cte_clause() + text self.stack.pop(-1) if asfrom and parens: return "(" + text + ")" else: return text def _generate_prefixes(self, stmt, prefixes, **kw): clause = " ".join( prefix._compiler_dispatch(self, **kw) for prefix, dialect_name in prefixes if dialect_name is None or dialect_name == self.dialect.name ) if clause: clause += " " return clause def _render_cte_clause(self): if self.positional: self.positiontup = self.cte_positional + self.positiontup cte_text = self.get_cte_preamble(self.ctes_recursive) + " " cte_text += ", \n".join( [txt for txt in self.ctes.values()] ) cte_text += "\n " return cte_text def get_cte_preamble(self, recursive): if recursive: return "WITH RECURSIVE" else: return "WITH" def get_select_precolumns(self, select): """Called when building a ``SELECT`` statement, position is just before column list. """ return select._distinct and "DISTINCT " or "" def order_by_clause(self, select, **kw): order_by = select._order_by_clause._compiler_dispatch(self, **kw) if order_by: return " ORDER BY " + order_by else: return "" def for_update_clause(self, select): return " FOR UPDATE" def returning_clause(self, stmt, returning_cols): raise exc.CompileError( "RETURNING is not supported by this " "dialect's statement compiler.") def limit_clause(self, select): text = "" if select._limit is not None: text += "\n LIMIT " + self.process(elements.literal(select._limit)) if select._offset is not None: if select._limit is None: text += "\n LIMIT -1" text += " OFFSET " + self.process(elements.literal(select._offset)) return text def visit_table(self, table, asfrom=False, iscrud=False, ashint=False, fromhints=None, **kwargs): if asfrom or ashint: if getattr(table, "schema", None): ret = self.preparer.quote_schema(table.schema) + \ "." + self.preparer.quote(table.name) else: ret = self.preparer.quote(table.name) if fromhints and table in fromhints: ret = self.format_from_hint_text(ret, table, fromhints[table], iscrud) return ret else: return "" def visit_join(self, join, asfrom=False, **kwargs): return ( join.left._compiler_dispatch(self, asfrom=True, **kwargs) + (join.isouter and " LEFT OUTER JOIN " or " JOIN ") + join.right._compiler_dispatch(self, asfrom=True, **kwargs) + " ON " + join.onclause._compiler_dispatch(self, **kwargs) ) def visit_insert(self, insert_stmt, **kw): self.isinsert = True colparams = self._get_colparams(insert_stmt, **kw) if not colparams and \ not self.dialect.supports_default_values and \ not self.dialect.supports_empty_insert: raise exc.CompileError("The '%s' dialect with current database " "version settings does not support empty " "inserts." % self.dialect.name) if insert_stmt._has_multi_parameters: if not self.dialect.supports_multivalues_insert: raise exc.CompileError("The '%s' dialect with current database " "version settings does not support " "in-place multirow inserts." % self.dialect.name) colparams_single = colparams[0] else: colparams_single = colparams preparer = self.preparer supports_default_values = self.dialect.supports_default_values text = "INSERT " if insert_stmt._prefixes: text += self._generate_prefixes(insert_stmt, insert_stmt._prefixes, **kw) text += "INTO " table_text = preparer.format_table(insert_stmt.table) if insert_stmt._hints: dialect_hints = dict([ (table, hint_text) for (table, dialect), hint_text in insert_stmt._hints.items() if dialect in ('*', self.dialect.name) ]) if insert_stmt.table in dialect_hints: table_text = self.format_from_hint_text( table_text, insert_stmt.table, dialect_hints[insert_stmt.table], True ) text += table_text if colparams_single or not supports_default_values: text += " (%s)" % ', '.join([preparer.format_column(c[0]) for c in colparams_single]) if self.returning or insert_stmt._returning: self.returning = self.returning or insert_stmt._returning returning_clause = self.returning_clause( insert_stmt, self.returning) if self.returning_precedes_values: text += " " + returning_clause if insert_stmt.select is not None: text += " %s" % self.process(insert_stmt.select, **kw) elif not colparams and supports_default_values: text += " DEFAULT VALUES" elif insert_stmt._has_multi_parameters: text += " VALUES %s" % ( ", ".join( "(%s)" % ( ', '.join(c[1] for c in colparam_set) ) for colparam_set in colparams ) ) else: text += " VALUES (%s)" % \ ', '.join([c[1] for c in colparams]) if self.returning and not self.returning_precedes_values: text += " " + returning_clause return text def update_limit_clause(self, update_stmt): """Provide a hook for MySQL to add LIMIT to the UPDATE""" return None def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw): """Provide a hook to override the initial table clause in an UPDATE statement. MySQL overrides this. """ return from_table._compiler_dispatch(self, asfrom=True, iscrud=True, **kw) def update_from_clause(self, update_stmt, from_table, extra_froms, from_hints, **kw): """Provide a hook to override the generation of an UPDATE..FROM clause. MySQL and MSSQL override this. """ return "FROM " + ', '.join( t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw) for t in extra_froms) def visit_update(self, update_stmt, **kw): self.stack.append( {'correlate_froms': set([update_stmt.table]), "iswrapper": False, "asfrom_froms": set([update_stmt.table])}) self.isupdate = True extra_froms = update_stmt._extra_froms text = "UPDATE " if update_stmt._prefixes: text += self._generate_prefixes(update_stmt, update_stmt._prefixes, **kw) table_text = self.update_tables_clause(update_stmt, update_stmt.table, extra_froms, **kw) colparams = self._get_colparams(update_stmt, **kw) if update_stmt._hints: dialect_hints = dict([ (table, hint_text) for (table, dialect), hint_text in update_stmt._hints.items() if dialect in ('*', self.dialect.name) ]) if update_stmt.table in dialect_hints: table_text = self.format_from_hint_text( table_text, update_stmt.table, dialect_hints[update_stmt.table], True ) else: dialect_hints = None text += table_text text += ' SET ' include_table = extra_froms and \ self.render_table_with_column_in_update_from text += ', '.join( c[0]._compiler_dispatch(self, include_table=include_table) + '=' + c[1] for c in colparams ) if self.returning or update_stmt._returning: if not self.returning: self.returning = update_stmt._returning if self.returning_precedes_values: text += " " + self.returning_clause( update_stmt, self.returning) if extra_froms: extra_from_text = self.update_from_clause( update_stmt, update_stmt.table, extra_froms, dialect_hints, **kw) if extra_from_text: text += " " + extra_from_text if update_stmt._whereclause is not None: text += " WHERE " + self.process(update_stmt._whereclause) limit_clause = self.update_limit_clause(update_stmt) if limit_clause: text += " " + limit_clause if self.returning and not self.returning_precedes_values: text += " " + self.returning_clause( update_stmt, self.returning) self.stack.pop(-1) return text def _create_crud_bind_param(self, col, value, required=False, name=None): if name is None: name = col.key bindparam = elements.BindParameter(name, value, type_=col.type, required=required) bindparam._is_crud = True return bindparam._compiler_dispatch(self) @util.memoized_property def _key_getters_for_crud_column(self): if self.isupdate and self.statement._extra_froms: # when extra tables are present, refer to the columns # in those extra tables as table-qualified, including in # dictionaries and when rendering bind param names. # the "main" table of the statement remains unqualified, # allowing the most compatibility with a non-multi-table # statement. _et = set(self.statement._extra_froms) def _column_as_key(key): str_key = elements._column_as_key(key) if hasattr(key, 'table') and key.table in _et: return (key.table.name, str_key) else: return str_key def _getattr_col_key(col): if col.table in _et: return (col.table.name, col.key) else: return col.key def _col_bind_name(col): if col.table in _et: return "%s_%s" % (col.table.name, col.key) else: return col.key else: _column_as_key = elements._column_as_key _getattr_col_key = _col_bind_name = operator.attrgetter("key") return _column_as_key, _getattr_col_key, _col_bind_name def _get_colparams(self, stmt, **kw): """create a set of tuples representing column/string pairs for use in an INSERT or UPDATE statement. Also generates the Compiled object's postfetch, prefetch, and returning column collections, used for default handling and ultimately populating the ResultProxy's prefetch_cols() and postfetch_cols() collections. """ self.postfetch = [] self.prefetch = [] self.returning = [] # no parameters in the statement, no parameters in the # compiled params - return binds for all columns if self.column_keys is None and stmt.parameters is None: return [ (c, self._create_crud_bind_param(c, None, required=True)) for c in stmt.table.columns ] if stmt._has_multi_parameters: stmt_parameters = stmt.parameters[0] else: stmt_parameters = stmt.parameters # getters - these are normally just column.key, # but in the case of mysql multi-table update, the rules for # .key must conditionally take tablename into account _column_as_key, _getattr_col_key, _col_bind_name = \ self._key_getters_for_crud_column # if we have statement parameters - set defaults in the # compiled params if self.column_keys is None: parameters = {} else: parameters = dict((_column_as_key(key), REQUIRED) for key in self.column_keys if not stmt_parameters or key not in stmt_parameters) # create a list of column assignment clauses as tuples values = [] if stmt_parameters is not None: for k, v in stmt_parameters.items(): colkey = _column_as_key(k) if colkey is not None: parameters.setdefault(colkey, v) else: # a non-Column expression on the left side; # add it to values() in an "as-is" state, # coercing right side to bound param if elements._is_literal(v): v = self.process( elements.BindParameter(None, v, type_=k.type), **kw) else: v = self.process(v.self_group(), **kw) values.append((k, v)) need_pks = self.isinsert and \ not self.inline and \ not stmt._returning implicit_returning = need_pks and \ self.dialect.implicit_returning and \ stmt.table.implicit_returning if self.isinsert: implicit_return_defaults = implicit_returning and stmt._return_defaults elif self.isupdate: implicit_return_defaults = self.dialect.implicit_returning and \ stmt.table.implicit_returning and \ stmt._return_defaults if implicit_return_defaults: if stmt._return_defaults is True: implicit_return_defaults = set(stmt.table.c) else: implicit_return_defaults = set(stmt._return_defaults) postfetch_lastrowid = need_pks and self.dialect.postfetch_lastrowid check_columns = {} # special logic that only occurs for multi-table UPDATE # statements if self.isupdate and stmt._extra_froms and stmt_parameters: normalized_params = dict( (elements._clause_element_as_expr(c), param) for c, param in stmt_parameters.items() ) affected_tables = set() for t in stmt._extra_froms: for c in t.c: if c in normalized_params: affected_tables.add(t) check_columns[_getattr_col_key(c)] = c value = normalized_params[c] if elements._is_literal(value): value = self._create_crud_bind_param( c, value, required=value is REQUIRED, name=_col_bind_name(c)) else: self.postfetch.append(c) value = self.process(value.self_group(), **kw) values.append((c, value)) # determine tables which are actually # to be updated - process onupdate and # server_onupdate for these for t in affected_tables: for c in t.c: if c in normalized_params: continue elif c.onupdate is not None and not c.onupdate.is_sequence: if c.onupdate.is_clause_element: values.append( (c, self.process( c.onupdate.arg.self_group(), **kw) ) ) self.postfetch.append(c) else: values.append( (c, self._create_crud_bind_param( c, None, name=_col_bind_name(c) ) ) ) self.prefetch.append(c) elif c.server_onupdate is not None: self.postfetch.append(c) if self.isinsert and stmt.select_names: # for an insert from select, we can only use names that # are given, so only select for those names. cols = (stmt.table.c[_column_as_key(name)] for name in stmt.select_names) else: # iterate through all table columns to maintain # ordering, even for those cols that aren't included cols = stmt.table.columns for c in cols: col_key = _getattr_col_key(c) if col_key in parameters and col_key not in check_columns: value = parameters.pop(col_key) if elements._is_literal(value): value = self._create_crud_bind_param( c, value, required=value is REQUIRED, name=_col_bind_name(c) if not stmt._has_multi_parameters else "%s_0" % _col_bind_name(c) ) else: if isinstance(value, elements.BindParameter) and \ value.type._isnull: value = value._clone() value.type = c.type if c.primary_key and implicit_returning: self.returning.append(c) value = self.process(value.self_group(), **kw) elif implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) value = self.process(value.self_group(), **kw) else: self.postfetch.append(c) value = self.process(value.self_group(), **kw) values.append((c, value)) elif self.isinsert: if c.primary_key and \ need_pks and \ ( implicit_returning or not postfetch_lastrowid or c is not stmt.table._autoincrement_column ): if implicit_returning: if c.default is not None: if c.default.is_sequence: if self.dialect.supports_sequences and \ (not c.default.optional or \ not self.dialect.sequences_optional): proc = self.process(c.default, **kw) values.append((c, proc)) self.returning.append(c) elif c.default.is_clause_element: values.append( (c, self.process(c.default.arg.self_group(), **kw)) ) self.returning.append(c) else: values.append( (c, self._create_crud_bind_param(c, None)) ) self.prefetch.append(c) else: self.returning.append(c) else: if ( c.default is not None and ( not c.default.is_sequence or self.dialect.supports_sequences ) ) or \ c is stmt.table._autoincrement_column and ( self.dialect.supports_sequences or self.dialect.preexecute_autoincrement_sequences ): values.append( (c, self._create_crud_bind_param(c, None)) ) self.prefetch.append(c) elif c.default is not None: if c.default.is_sequence: if self.dialect.supports_sequences and \ (not c.default.optional or \ not self.dialect.sequences_optional): proc = self.process(c.default, **kw) values.append((c, proc)) if implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) elif not c.primary_key: self.postfetch.append(c) elif c.default.is_clause_element: values.append( (c, self.process(c.default.arg.self_group(), **kw)) ) if implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) elif not c.primary_key: # dont add primary key column to postfetch self.postfetch.append(c) else: values.append( (c, self._create_crud_bind_param(c, None)) ) self.prefetch.append(c) elif c.server_default is not None: if implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) elif not c.primary_key: self.postfetch.append(c) elif implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) elif self.isupdate: if c.onupdate is not None and not c.onupdate.is_sequence: if c.onupdate.is_clause_element: values.append( (c, self.process(c.onupdate.arg.self_group(), **kw)) ) if implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) else: self.postfetch.append(c) else: values.append( (c, self._create_crud_bind_param(c, None)) ) self.prefetch.append(c) elif c.server_onupdate is not None: if implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) else: self.postfetch.append(c) elif implicit_return_defaults and \ c in implicit_return_defaults: self.returning.append(c) if parameters and stmt_parameters: check = set(parameters).intersection( _column_as_key(k) for k in stmt.parameters ).difference(check_columns) if check: raise exc.CompileError( "Unconsumed column names: %s" % (", ".join("%s" % c for c in check)) ) if stmt._has_multi_parameters: values_0 = values values = [values] values.extend( [ ( c, self._create_crud_bind_param( c, row[c.key], name="%s_%d" % (c.key, i + 1) ) if c.key in row else param ) for (c, param) in values_0 ] for i, row in enumerate(stmt.parameters[1:]) ) return values def visit_delete(self, delete_stmt, **kw): self.stack.append({'correlate_froms': set([delete_stmt.table]), "iswrapper": False, "asfrom_froms": set([delete_stmt.table])}) self.isdelete = True text = "DELETE " if delete_stmt._prefixes: text += self._generate_prefixes(delete_stmt, delete_stmt._prefixes, **kw) text += "FROM " table_text = delete_stmt.table._compiler_dispatch(self, asfrom=True, iscrud=True) if delete_stmt._hints: dialect_hints = dict([ (table, hint_text) for (table, dialect), hint_text in delete_stmt._hints.items() if dialect in ('*', self.dialect.name) ]) if delete_stmt.table in dialect_hints: table_text = self.format_from_hint_text( table_text, delete_stmt.table, dialect_hints[delete_stmt.table], True ) else: dialect_hints = None text += table_text if delete_stmt._returning: self.returning = delete_stmt._returning if self.returning_precedes_values: text += " " + self.returning_clause( delete_stmt, delete_stmt._returning) if delete_stmt._whereclause is not None: text += " WHERE " text += delete_stmt._whereclause._compiler_dispatch(self) if self.returning and not self.returning_precedes_values: text += " " + self.returning_clause( delete_stmt, delete_stmt._returning) self.stack.pop(-1) return text def visit_savepoint(self, savepoint_stmt): return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt) def visit_rollback_to_savepoint(self, savepoint_stmt): return "ROLLBACK TO SAVEPOINT %s" % \ self.preparer.format_savepoint(savepoint_stmt) def visit_release_savepoint(self, savepoint_stmt): return "RELEASE SAVEPOINT %s" % \ self.preparer.format_savepoint(savepoint_stmt) class DDLCompiler(Compiled): @util.memoized_property def sql_compiler(self): return self.dialect.statement_compiler(self.dialect, None) @util.memoized_property def type_compiler(self): return self.dialect.type_compiler @property def preparer(self): return self.dialect.identifier_preparer def construct_params(self, params=None): return None def visit_ddl(self, ddl, **kwargs): # table events can substitute table and schema name context = ddl.context if isinstance(ddl.target, schema.Table): context = context.copy() preparer = self.dialect.identifier_preparer path = preparer.format_table_seq(ddl.target) if len(path) == 1: table, sch = path[0], '' else: table, sch = path[-1], path[0] context.setdefault('table', table) context.setdefault('schema', sch) context.setdefault('fullname', preparer.format_table(ddl.target)) return self.sql_compiler.post_process_text(ddl.statement % context) def visit_create_schema(self, create): schema = self.preparer.format_schema(create.element) return "CREATE SCHEMA " + schema def visit_drop_schema(self, drop): schema = self.preparer.format_schema(drop.element) text = "DROP SCHEMA " + schema if drop.cascade: text += " CASCADE" return text def visit_create_table(self, create): table = create.element preparer = self.dialect.identifier_preparer text = "\n" + " ".join(['CREATE'] + \ table._prefixes + \ ['TABLE', preparer.format_table(table), "("]) separator = "\n" # if only one primary key, specify it along with the column first_pk = False for create_column in create.columns: column = create_column.element try: processed = self.process(create_column, first_pk=column.primary_key and not first_pk) if processed is not None: text += separator separator = ", \n" text += "\t" + processed if column.primary_key: first_pk = True except exc.CompileError as ce: util.raise_from_cause( exc.CompileError(util.u("(in table '%s', column '%s'): %s") % ( table.description, column.name, ce.args[0] ))) const = self.create_table_constraints(table) if const: text += ", \n\t" + const text += "\n)%s\n\n" % self.post_create_table(table) return text def visit_create_column(self, create, first_pk=False): column = create.element if column.system: return None text = self.get_column_specification( column, first_pk=first_pk ) const = " ".join(self.process(constraint) \ for constraint in column.constraints) if const: text += " " + const return text def create_table_constraints(self, table): # On some DB order is significant: visit PK first, then the # other constraints (engine.ReflectionTest.testbasic failed on FB2) constraints = [] if table.primary_key: constraints.append(table.primary_key) constraints.extend([c for c in table._sorted_constraints if c is not table.primary_key]) return ", \n\t".join(p for p in (self.process(constraint) for constraint in constraints if ( constraint._create_rule is None or constraint._create_rule(self)) and ( not self.dialect.supports_alter or not getattr(constraint, 'use_alter', False) )) if p is not None ) def visit_drop_table(self, drop): return "\nDROP TABLE " + self.preparer.format_table(drop.element) def visit_drop_view(self, drop): return "\nDROP VIEW " + self.preparer.format_table(drop.element) def _verify_index_table(self, index): if index.table is None: raise exc.CompileError("Index '%s' is not associated " "with any table." % index.name) def visit_create_index(self, create, include_schema=False, include_table_schema=True): index = create.element self._verify_index_table(index) preparer = self.preparer text = "CREATE " if index.unique: text += "UNIQUE " text += "INDEX %s ON %s (%s)" \ % ( self._prepared_index_name(index, include_schema=include_schema), preparer.format_table(index.table, use_schema=include_table_schema), ', '.join( self.sql_compiler.process(expr, include_table=False, literal_binds=True) for expr in index.expressions) ) return text def visit_drop_index(self, drop): index = drop.element return "\nDROP INDEX " + self._prepared_index_name(index, include_schema=True) def _prepared_index_name(self, index, include_schema=False): if include_schema and index.table is not None and index.table.schema: schema = index.table.schema schema_name = self.preparer.quote_schema(schema) else: schema_name = None ident = index.name if isinstance(ident, elements._truncated_label): max_ = self.dialect.max_index_name_length or \ self.dialect.max_identifier_length if len(ident) > max_: ident = ident[0:max_ - 8] + \ "_" + util.md5_hex(ident)[-4:] else: self.dialect.validate_identifier(ident) index_name = self.preparer.quote(ident) if schema_name: index_name = schema_name + "." + index_name return index_name def visit_add_constraint(self, create): return "ALTER TABLE %s ADD %s" % ( self.preparer.format_table(create.element.table), self.process(create.element) ) def visit_create_sequence(self, create): text = "CREATE SEQUENCE %s" % \ self.preparer.format_sequence(create.element) if create.element.increment is not None: text += " INCREMENT BY %d" % create.element.increment if create.element.start is not None: text += " START WITH %d" % create.element.start return text def visit_drop_sequence(self, drop): return "DROP SEQUENCE %s" % \ self.preparer.format_sequence(drop.element) def visit_drop_constraint(self, drop): return "ALTER TABLE %s DROP CONSTRAINT %s%s" % ( self.preparer.format_table(drop.element.table), self.preparer.format_constraint(drop.element), drop.cascade and " CASCADE" or "" ) def get_column_specification(self, column, **kwargs): colspec = self.preparer.format_column(column) + " " + \ self.dialect.type_compiler.process(column.type) default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default if not column.nullable: colspec += " NOT NULL" return colspec def post_create_table(self, table): return '' def get_column_default_string(self, column): if isinstance(column.server_default, schema.DefaultClause): if isinstance(column.server_default.arg, util.string_types): return "'%s'" % column.server_default.arg else: return self.sql_compiler.process(column.server_default.arg) else: return None def visit_check_constraint(self, constraint): text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext, include_table=False, literal_binds=True) text += self.define_constraint_deferrability(constraint) return text def visit_column_check_constraint(self, constraint): text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) text += "CHECK (%s)" % constraint.sqltext text += self.define_constraint_deferrability(constraint) return text def visit_primary_key_constraint(self, constraint): if len(constraint) == 0: return '' text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) text += "PRIMARY KEY " text += "(%s)" % ', '.join(self.preparer.quote(c.name) for c in constraint) text += self.define_constraint_deferrability(constraint) return text def visit_foreign_key_constraint(self, constraint): preparer = self.dialect.identifier_preparer text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ preparer.format_constraint(constraint) remote_table = list(constraint._elements.values())[0].column.table text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % ( ', '.join(preparer.quote(f.parent.name) for f in constraint._elements.values()), self.define_constraint_remote_table( constraint, remote_table, preparer), ', '.join(preparer.quote(f.column.name) for f in constraint._elements.values()) ) text += self.define_constraint_match(constraint) text += self.define_constraint_cascades(constraint) text += self.define_constraint_deferrability(constraint) return text def define_constraint_remote_table(self, constraint, table, preparer): """Format the remote table clause of a CREATE CONSTRAINT clause.""" return preparer.format_table(table) def visit_unique_constraint(self, constraint): if len(constraint) == 0: return '' text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) text += "UNIQUE (%s)" % ( ', '.join(self.preparer.quote(c.name) for c in constraint)) text += self.define_constraint_deferrability(constraint) return text def define_constraint_cascades(self, constraint): text = "" if constraint.ondelete is not None: text += " ON DELETE %s" % constraint.ondelete if constraint.onupdate is not None: text += " ON UPDATE %s" % constraint.onupdate return text def define_constraint_deferrability(self, constraint): text = "" if constraint.deferrable is not None: if constraint.deferrable: text += " DEFERRABLE" else: text += " NOT DEFERRABLE" if constraint.initially is not None: text += " INITIALLY %s" % constraint.initially return text def define_constraint_match(self, constraint): text = "" if constraint.match is not None: text += " MATCH %s" % constraint.match return text class GenericTypeCompiler(TypeCompiler): def visit_FLOAT(self, type_): return "FLOAT" def visit_REAL(self, type_): return "REAL" def visit_NUMERIC(self, type_): if type_.precision is None: return "NUMERIC" elif type_.scale is None: return "NUMERIC(%(precision)s)" % \ {'precision': type_.precision} else: return "NUMERIC(%(precision)s, %(scale)s)" % \ {'precision': type_.precision, 'scale': type_.scale} def visit_DECIMAL(self, type_): if type_.precision is None: return "DECIMAL" elif type_.scale is None: return "DECIMAL(%(precision)s)" % \ {'precision': type_.precision} else: return "DECIMAL(%(precision)s, %(scale)s)" % \ {'precision': type_.precision, 'scale': type_.scale} def visit_INTEGER(self, type_): return "INTEGER" def visit_SMALLINT(self, type_): return "SMALLINT" def visit_BIGINT(self, type_): return "BIGINT" def visit_TIMESTAMP(self, type_): return 'TIMESTAMP' def visit_DATETIME(self, type_): return "DATETIME" def visit_DATE(self, type_): return "DATE" def visit_TIME(self, type_): return "TIME" def visit_CLOB(self, type_): return "CLOB" def visit_NCLOB(self, type_): return "NCLOB" def _render_string_type(self, type_, name): text = name if type_.length: text += "(%d)" % type_.length if type_.collation: text += ' COLLATE "%s"' % type_.collation return text def visit_CHAR(self, type_): return self._render_string_type(type_, "CHAR") def visit_NCHAR(self, type_): return self._render_string_type(type_, "NCHAR") def visit_VARCHAR(self, type_): return self._render_string_type(type_, "VARCHAR") def visit_NVARCHAR(self, type_): return self._render_string_type(type_, "NVARCHAR") def visit_TEXT(self, type_): return self._render_string_type(type_, "TEXT") def visit_BLOB(self, type_): return "BLOB" def visit_BINARY(self, type_): return "BINARY" + (type_.length and "(%d)" % type_.length or "") def visit_VARBINARY(self, type_): return "VARBINARY" + (type_.length and "(%d)" % type_.length or "") def visit_BOOLEAN(self, type_): return "BOOLEAN" def visit_large_binary(self, type_): return self.visit_BLOB(type_) def visit_boolean(self, type_): return self.visit_BOOLEAN(type_) def visit_time(self, type_): return self.visit_TIME(type_) def visit_datetime(self, type_): return self.visit_DATETIME(type_) def visit_date(self, type_): return self.visit_DATE(type_) def visit_big_integer(self, type_): return self.visit_BIGINT(type_) def visit_small_integer(self, type_): return self.visit_SMALLINT(type_) def visit_integer(self, type_): return self.visit_INTEGER(type_) def visit_real(self, type_): return self.visit_REAL(type_) def visit_float(self, type_): return self.visit_FLOAT(type_) def visit_numeric(self, type_): return self.visit_NUMERIC(type_) def visit_string(self, type_): return self.visit_VARCHAR(type_) def visit_unicode(self, type_): return self.visit_VARCHAR(type_) def visit_text(self, type_): return self.visit_TEXT(type_) def visit_unicode_text(self, type_): return self.visit_TEXT(type_) def visit_enum(self, type_): return self.visit_VARCHAR(type_) def visit_null(self, type_): raise exc.CompileError("Can't generate DDL for %r; " "did you forget to specify a " "type on this Column?" % type_) def visit_type_decorator(self, type_): return self.process(type_.type_engine(self.dialect)) def visit_user_defined(self, type_): return type_.get_col_spec() class IdentifierPreparer(object): """Handle quoting and case-folding of identifiers based on options.""" reserved_words = RESERVED_WORDS legal_characters = LEGAL_CHARACTERS illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS def __init__(self, dialect, initial_quote='"', final_quote=None, escape_quote='"', omit_schema=False): """Construct a new ``IdentifierPreparer`` object. initial_quote Character that begins a delimited identifier. final_quote Character that ends a delimited identifier. Defaults to `initial_quote`. omit_schema Prevent prepending schema name. Useful for databases that do not support schemae. """ self.dialect = dialect self.initial_quote = initial_quote self.final_quote = final_quote or self.initial_quote self.escape_quote = escape_quote self.escape_to_quote = self.escape_quote * 2 self.omit_schema = omit_schema self._strings = {} def _escape_identifier(self, value): """Escape an identifier. Subclasses should override this to provide database-dependent escaping behavior. """ return value.replace(self.escape_quote, self.escape_to_quote) def _unescape_identifier(self, value): """Canonicalize an escaped identifier. Subclasses should override this to provide database-dependent unescaping behavior that reverses _escape_identifier. """ return value.replace(self.escape_to_quote, self.escape_quote) def quote_identifier(self, value): """Quote an identifier. Subclasses should override this to provide database-dependent quoting behavior. """ return self.initial_quote + \ self._escape_identifier(value) + \ self.final_quote def _requires_quotes(self, value): """Return True if the given identifier requires quoting.""" lc_value = value.lower() return (lc_value in self.reserved_words or value[0] in self.illegal_initial_characters or not self.legal_characters.match(util.text_type(value)) or (lc_value != value)) def quote_schema(self, schema, force=None): """Conditionally quote a schema. Subclasses can override this to provide database-dependent quoting behavior for schema names. the 'force' flag should be considered deprecated. """ return self.quote(schema, force) def quote(self, ident, force=None): """Conditionally quote an identifier. the 'force' flag should be considered deprecated. """ force = getattr(ident, "quote", None) if force is None: if ident in self._strings: return self._strings[ident] else: if self._requires_quotes(ident): self._strings[ident] = self.quote_identifier(ident) else: self._strings[ident] = ident return self._strings[ident] elif force: return self.quote_identifier(ident) else: return ident def format_sequence(self, sequence, use_schema=True): name = self.quote(sequence.name) if not self.omit_schema and use_schema and sequence.schema is not None: name = self.quote_schema(sequence.schema) + "." + name return name def format_label(self, label, name=None): return self.quote(name or label.name) def format_alias(self, alias, name=None): return self.quote(name or alias.name) def format_savepoint(self, savepoint, name=None): return self.quote(name or savepoint.ident) def format_constraint(self, constraint): return self.quote(constraint.name) def format_table(self, table, use_schema=True, name=None): """Prepare a quoted table and schema name.""" if name is None: name = table.name result = self.quote(name) if not self.omit_schema and use_schema \ and getattr(table, "schema", None): result = self.quote_schema(table.schema) + "." + result return result def format_schema(self, name, quote=None): """Prepare a quoted schema name.""" return self.quote(name, quote) def format_column(self, column, use_table=False, name=None, table_name=None): """Prepare a quoted column name.""" if name is None: name = column.name if not getattr(column, 'is_literal', False): if use_table: return self.format_table( column.table, use_schema=False, name=table_name) + "." + self.quote(name) else: return self.quote(name) else: # literal textual elements get stuck into ColumnClause a lot, # which shouldn't get quoted if use_table: return self.format_table(column.table, use_schema=False, name=table_name) + '.' + name else: return name def format_table_seq(self, table, use_schema=True): """Format table name and schema as a tuple.""" # Dialects with more levels in their fully qualified references # ('database', 'owner', etc.) could override this and return # a longer sequence. if not self.omit_schema and use_schema and \ getattr(table, 'schema', None): return (self.quote_schema(table.schema), self.format_table(table, use_schema=False)) else: return (self.format_table(table, use_schema=False), ) @util.memoized_property def _r_identifiers(self): initial, final, escaped_final = \ [re.escape(s) for s in (self.initial_quote, self.final_quote, self._escape_identifier(self.final_quote))] r = re.compile( r'(?:' r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s' r'|([^\.]+))(?=\.|$))+' % {'initial': initial, 'final': final, 'escaped': escaped_final}) return r def unformat_identifiers(self, identifiers): """Unpack 'schema.table.column'-like strings into components.""" r = self._r_identifiers return [self._unescape_identifier(i) for i in [a or b for a, b in r.findall(identifiers)]]
camayak/django-apiserver
refs/heads/master
tests/complex/api/urls.py
1
from django.conf.urls.defaults import * from apiserver.api import Api from complex.api.resources import PostResource, ProfileResource, CommentResource, UserResource, GroupResource api = API(api_name='v1') api.register(PostResource(), canonical=True) api.register(ProfileResource(), canonical=True) api.register(CommentResource(), canonical=True) api.register(UserResource(), canonical=True) api.register(GroupResource(), canonical=True) urlpatterns = api.urls
ProjetPP/PPP-Logger
refs/heads/master
ppp_logger/http.py
1
"""Handles the HTTP frontend (ie. answers to requests from a UI).""" import cgi import json import logging from ppp_libmodule import HttpRequestHandler as HttpRequestHandler from ppp_libmodule.exceptions import ClientError, InvalidConfig from .api import Api from .logger import Logger DOC_URL = 'https://github.com/ProjetPP/PPP-Logger#readme' class RequestHandler(HttpRequestHandler): """Handles one request.""" def on_bad_method(self): """Returns a basic response to GET requests (probably sent by humans trying to open the link in a web browser.""" text = 'Bad method, only POST is supported. See: ' + DOC_URL return self.make_response('405 Method Not Allowed', 'text/plain; charset=utf-8', text ) def on_unknown_uri(self): """Returns a basic response to GET requests (probably sent by humans trying to open the link in a web browser.""" text = 'URI not found, only / is supported. See: ' + DOC_URL return self.make_response('404 Not Found', 'text/plain; charset=utf-8', text ) def process_request(self, request): """Processes a request.""" try: request = json.loads(request.read().decode()) except ValueError: raise ClientError('Data is not valid JSON.') answer = self.router_class(request).answer() return self.make_response('200 OK', 'application/json', json.dumps(answer) ) def on_get(self): form = cgi.FieldStorage(fp=self.environ['wsgi.input'], environ=self.environ.copy()) try: answer = Api(form).answer() except ClientError as e: return self.make_response('405 Client Error', 'text/plain; charset=utf-8', e.args[0]) return self.make_response('200 OK', 'application/json', json.dumps(answer)) def app(environ, start_response): """Function called by the WSGI server.""" return RequestHandler(environ, start_response, Logger).dispatch()
doismellburning/django
refs/heads/master
tests/null_fk_ordering/tests.py
24
from __future__ import unicode_literals from django.test import TestCase from .models import Author, Article, SystemInfo, Forum, Post, Comment class NullFkOrderingTests(TestCase): def test_ordering_across_null_fk(self): """ Regression test for #7512 ordering across nullable Foreign Keys shouldn't exclude results """ author_1 = Author.objects.create(name='Tom Jones') author_2 = Author.objects.create(name='Bob Smith') Article.objects.create(title='No author on this article') Article.objects.create(author=author_1, title='This article written by Tom Jones') Article.objects.create(author=author_2, title='This article written by Bob Smith') # We can't compare results directly (since different databases sort NULLs to # different ends of the ordering), but we can check that all results are # returned. self.assertEqual(len(list(Article.objects.all())), 3) s = SystemInfo.objects.create(system_name='System Info') f = Forum.objects.create(system_info=s, forum_name='First forum') p = Post.objects.create(forum=f, title='First Post') Comment.objects.create(post=p, comment_text='My first comment') Comment.objects.create(comment_text='My second comment') s2 = SystemInfo.objects.create(system_name='More System Info') f2 = Forum.objects.create(system_info=s2, forum_name='Second forum') p2 = Post.objects.create(forum=f2, title='Second Post') Comment.objects.create(comment_text='Another first comment') Comment.objects.create(post=p2, comment_text='Another second comment') # We have to test this carefully. Some databases sort NULL values before # everything else, some sort them afterwards. So we extract the ordered list # and check the length. Before the fix, this list was too short (some values # were omitted). self.assertEqual(len(list(Comment.objects.all())), 4)
drayanaindra/shoop
refs/heads/master
shoop/core/models/contacts.py
2
# -*- coding: utf-8 -*- # This file is part of Shoop. # # Copyright (c) 2012-2015, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from __future__ import unicode_literals from django.conf import settings from django.db import models from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ from enumfields import Enum, EnumField from parler.models import TranslatableModel, TranslatedFields from polymorphic.polymorphic_model import PolymorphicModel from timezone_field.fields import TimeZoneField from shoop.core.fields import InternalIdentifierField, LanguageField from shoop.core.utils.name_mixin import NameMixin @python_2_unicode_compatible class ContactGroup(TranslatableModel): identifier = InternalIdentifierField() members = models.ManyToManyField("Contact", related_name="groups", verbose_name=_('members'), blank=True) show_pricing = models.BooleanField(verbose_name=_('show as pricing option'), default=True) translations = TranslatedFields( name=models.CharField(max_length=64, verbose_name=_('name')), ) class Meta: verbose_name = _('contact group') verbose_name_plural = _('contact groups') def __str__(self): return self.safe_translation_getter("name", default="Group<%s>" % (self.identifier or self.id)) @python_2_unicode_compatible class Contact(NameMixin, PolymorphicModel): is_anonymous = False is_all_seeing = False created_on = models.DateTimeField(auto_now_add=True, editable=False) identifier = InternalIdentifierField(null=True, blank=True) is_active = models.BooleanField(default=True, db_index=True) # TODO: parent contact? default_shipping_address = models.ForeignKey( "Address", null=True, blank=True, related_name="+", verbose_name=_('shipping address'), on_delete=models.PROTECT ) default_billing_address = models.ForeignKey( "Address", null=True, blank=True, related_name="+", verbose_name=_('billing address'), on_delete=models.PROTECT ) default_shipping_method = models.ForeignKey( "ShippingMethod", verbose_name=_('default shipping method'), blank=True, null=True ) default_payment_method = models.ForeignKey( "PaymentMethod", verbose_name=_('default payment method'), blank=True, null=True ) language = LanguageField(verbose_name=_('language'), blank=True) marketing_permission = models.BooleanField(default=True, verbose_name=_('marketing permission')) phone = models.CharField(max_length=64, blank=True, verbose_name=_('phone')) www = models.URLField(max_length=128, blank=True, verbose_name=_('web address')) timezone = TimeZoneField(blank=True, null=True) prefix = models.CharField(verbose_name=_('name prefix'), max_length=64, blank=True) name = models.CharField(max_length=256, verbose_name=_('name')) suffix = models.CharField(verbose_name=_('name suffix'), max_length=64, blank=True) name_ext = models.CharField(max_length=256, blank=True, verbose_name=_('name extension')) email = models.EmailField(max_length=256, blank=True, verbose_name=_('email')) tax_group = models.ForeignKey("CustomerTaxGroup", null=True) def __str__(self): return self.full_name class Meta: verbose_name = _('contact') verbose_name_plural = _('contacts') class CompanyContact(Contact): members = models.ManyToManyField("Contact", related_name="company_memberships", blank=True) vat_code = models.CharField(max_length=32, blank=True, verbose_name=_('VAT code')) class Meta: verbose_name = _('company') verbose_name_plural = _('companies') class Gender(Enum): UNDISCLOSED = "u" MALE = "m" FEMALE = "f" OTHER = "o" class PersonContact(Contact): user = models.OneToOneField(settings.AUTH_USER_MODEL, blank=True, null=True, related_name="contact") gender = EnumField(Gender, default=Gender.UNDISCLOSED, max_length=4) birth_date = models.DateField(blank=True, null=True) # TODO: Figure out how/when/if the name and email fields are updated from users class Meta: verbose_name = _('person') verbose_name_plural = _('persons') def save(self, *args, **kwargs): if self.user_id and not self.pk: # Copy things user = self.user if not self.name: self.name = user.get_full_name() if not self.email: self.email = user.email return super(PersonContact, self).save(*args, **kwargs) @property def is_all_seeing(self): if self.user_id: return self.user.is_superuser class AnonymousContact(Contact): pk = id = None is_anonymous = True class Meta: managed = False # This isn't something that should actually exist in the database def __nonzero__(self): return False __bool__ = __nonzero__ def __eq__(self, other): return type(self) == type(other) def save(self, *args, **kwargs): raise NotImplementedError("Not implemented: AnonymousContacts aren't saveable, silly") def delete(self, *args, **kwargs): raise NotImplementedError("Not implemented: AnonymousContacts don't exist in the database, silly") @property def groups(self): return ContactGroup.objects.none() def get_person_contact(user): """ Get PersonContact of given user. If given user is non-zero (evaluates true as bool) and not anonymous, return the PersonContact of the user. If there is no PersonContact for the user yet, create it first. When this creation happens, details (name, email, is_active) are copied from the user. If given user is None (or otherwise evaluates as false) or anonymous, return the AnonymousContact. :param user: User object (or None) to get contact for :type user: django.contrib.auth.models.User|None :return: PersonContact of the user or AnonymousContact :rtype PersonContact|AnonymousContact """ if not user or user.is_anonymous(): return AnonymousContact() defaults = { 'is_active': user.is_active, 'name': user.get_full_name(), 'email': user.email, } return PersonContact.objects.get_or_create(user=user, defaults=defaults)[0]
kuralesache/kuralesache.github.io
refs/heads/master
tools/process_images.py
2
from PIL import Image import glob import os.path import re import json def get_frame(path): return int(path.rpartition('.')[0].rpartition('\\')[2]) for character in glob.glob('.\\raw\\*'): character_name = character.rpartition('\\')[2] for move in glob.glob(character + '\\*'): move_name = move.rpartition('\\')[2] if not os.path.exists(character_name): os.makedirs(character_name) hitbox_file = open(character_name + '\\' + move_name + '.json', 'w') # {left_bound, bottom_bound, frame_data} hitbox_file.write('characters.' + character_name + '.animations.' + move_name + '=') # set the number of frames in the animation based on the .txt filename data = [0] * get_frame(glob.glob(move + '\\*.txt')[0]) # process each image in the animation folder for image_path in glob.glob(move + '\\*.png'): frame = get_frame(image_path) img = Image.open(image_path).convert('RGB') pixels = img.load() # calculate bounds of valuable data left_bound = img.size[0] right_bound = 0 top_bound = img.size[1] bottom_bound = 0 for i in range(img.size[0]): for j in range(img.size[1]): if pixels[i,j][0] != pixels[i,j][1]: if i < left_bound: left_bound = i if i > right_bound: right_bound = i + 1 if j < top_bound: top_bound = j if j > bottom_bound: bottom_bound = j + 1 data[frame] = {'left_bound':left_bound, 'top_bound':top_bound, 'frame_data':[None] * (right_bound - left_bound) } for i in range(left_bound, right_bound): data[frame]['frame_data'][i - left_bound] = [None] * (bottom_bound - top_bound) for j in range(top_bound, bottom_bound): if pixels[i,j][0] == pixels[i,j][1]: data[frame]['frame_data'][i - left_bound][j - top_bound] = 0 elif pixels[i,j][2] > pixels[i,j][0] and pixels[i,j][1] > pixels[i,j][0]: data[frame]['frame_data'][i - left_bound][j - top_bound] = 0 else: data[frame]['frame_data'][i - left_bound][j - top_bound] = 1 hitbox_file.write(json.dumps(data, separators=(',',':'))) hitbox_file.write(';') hitbox_file.close()
bitmazk/django-logger
refs/heads/master
logger/utils.py
1
"""Utilities for the ``logger`` app.""" from logger.models import ( Action, ActionParameter, ActionParameterType, Log, ) class Logger(object): """The class that holds all the methods needed for logging.""" def create_log(self, action_name, param_dict): """ Creates a ``Log`` object based on the above values. :action_name: String representing the action type. E.g. "payment" :param_dict: A dictionary in the format of {'parameter_type1': 'value1', ... } defining the values of the action to log. """ # retrieve the action type action = self.get_action(action_name) # create a new log log = Log(action=action) log.save() # assign the paremeters to the log for (param, value) in param_dict.iteritems(): log.action_parameter.add(self.get_action_parameter(param, value)) return log def get_action(self, action_name): """Returns the ``Action`` object matching the action_name argument.""" try: action = Action.objects.get(name=action_name) except Action.DoesNotExist: action = Action(name=action_name) action.save() return action def get_action_parameter(self, param, value): """Returns an ``ActionParameter`` object.""" # create action parameter action_parameter = ActionParameter( parameter_type=self.get_parameter_type(param)) action_parameter.set_value(value) action_parameter.save() return action_parameter def get_parameter_type(self, param): """Retrieves an ``ActionParameterType`` object.""" # retrieve ActionParameterType. If it doesn't exist, create it try: parameter_type = ActionParameterType.objects.get(name=param) except ActionParameterType.DoesNotExist: parameter_type = ActionParameterType(name=param) parameter_type.save() return parameter_type
dudepare/django
refs/heads/master
tests/sites_framework/tests.py
115
from django.apps import apps from django.conf import settings from django.contrib.sites.managers import CurrentSiteManager from django.contrib.sites.models import Site from django.core import checks from django.db import models from django.test import TestCase from .models import ( AbstractArticle, CustomArticle, ExclusiveArticle, SyndicatedArticle, ) class SitesFrameworkTestCase(TestCase): def setUp(self): Site.objects.get_or_create(id=settings.SITE_ID, domain="example.com", name="example.com") Site.objects.create(id=settings.SITE_ID + 1, domain="example2.com", name="example2.com") self._old_models = apps.app_configs['sites_framework'].models.copy() def tearDown(self): apps.app_configs['sites_framework'].models = self._old_models apps.all_models['sites_framework'] = self._old_models apps.clear_cache() def test_site_fk(self): article = ExclusiveArticle.objects.create(title="Breaking News!", site_id=settings.SITE_ID) self.assertEqual(ExclusiveArticle.on_site.all().get(), article) def test_sites_m2m(self): article = SyndicatedArticle.objects.create(title="Fresh News!") article.sites.add(Site.objects.get(id=settings.SITE_ID)) article.sites.add(Site.objects.get(id=settings.SITE_ID + 1)) article2 = SyndicatedArticle.objects.create(title="More News!") article2.sites.add(Site.objects.get(id=settings.SITE_ID + 1)) self.assertEqual(SyndicatedArticle.on_site.all().get(), article) def test_custom_named_field(self): article = CustomArticle.objects.create( title="Tantalizing News!", places_this_article_should_appear_id=settings.SITE_ID, ) self.assertEqual(CustomArticle.on_site.all().get(), article) def test_invalid_name(self): class InvalidArticle(AbstractArticle): site = models.ForeignKey(Site, models.CASCADE) objects = models.Manager() on_site = CurrentSiteManager("places_this_article_should_appear") errors = InvalidArticle.check() expected = [ checks.Error( ("CurrentSiteManager could not find a field named " "'places_this_article_should_appear'."), hint=None, obj=InvalidArticle.on_site, id='sites.E001', ) ] self.assertEqual(errors, expected) def test_invalid_field_type(self): class ConfusedArticle(AbstractArticle): site = models.IntegerField() errors = ConfusedArticle.check() expected = [ checks.Error( "CurrentSiteManager cannot use 'ConfusedArticle.site' as it is not a ForeignKey or ManyToManyField.", hint=None, obj=ConfusedArticle.on_site, id='sites.E002', ) ] self.assertEqual(errors, expected)
snnn/tensorflow
refs/heads/master
tensorflow/contrib/nn/python/ops/fwd_gradients.py
50
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Forward-mode derivatives.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops.gradients_impl import gradients def fwd_gradients(ys, xs, grad_xs=None, assert_unused=False): """Computes forward-mode derivatives. This is accomplished in pure-python using tensorflow's existing (reverse-mode) gradients. There is additional overhead on graph construction, but runtime performance should be equal to a manual implementation [citation needed]. See https://j-towns.github.io/2017/06/12/A-new-trick.html and https://github.com/HIPS/autograd/pull/175 for the original discussion of this method, and https://github.com/renmengye/tensorflow-forward-ad for a "direct" implementation. Args: ys: A list of tensors. xs: A list of tensors. grad_xs: An optional list of tensors. If provided, must have the same length and shapes compatible with xs. assert_unused: Add assertions that intermediate values are not computed. Returns: A list of tensors of the same shapes as ys. The directional derivatives of ys with respect to xs in the direction grad_xs. Leaving grad_xs unspecified is equivalent to passing in 1s for each x in xs. """ # This version of forward-mode autodiff is based on code by Tim Cooijmans # and handles list arguments and certain special cases such as when the # ys doesn't depend on one or more of the xs, and when tf.IndexedSlices are # generated by the first tf.gradients call. us = [array_ops.zeros_like(y) + float('nan') for y in ys] dydxs = gradients(ys, xs, grad_ys=us) # deal with strange types that tf.gradients returns but can't deal with dydxs = [ops.convert_to_tensor(dydx) if isinstance(dydx, ops.IndexedSlices) else dydx for dydx in dydxs] if assert_unused: with ops.control_dependencies(dydxs): assert_unused = control_flow_ops.Assert(False, [1], name='fwd_gradients') with ops.control_dependencies([assert_unused]): dydxs = array_ops.identity_n(dydxs) dydxs = [array_ops.zeros_like(x) if dydx is None else dydx for x, dydx in zip(xs, dydxs)] for x, dydx in zip(xs, dydxs): dydx.set_shape(x.shape) dysdx = gradients(dydxs, us, grad_ys=grad_xs) return dysdx
lesserwhirls/scipy-cwt
refs/heads/cwt
scipy/weave/tests/test_numpy_scalar_spec.py
10
import os import sys # Note: test_dir is global to this file. # It is made by setup_test_location() #globals global test_dir test_dir = '' import numpy from numpy.testing import TestCase, dec, assert_ from scipy.weave import inline_tools,ext_tools from scipy.weave.build_tools import msvc_exists, gcc_exists from scipy.weave.catalog import unique_file from scipy.weave.numpy_scalar_spec import numpy_complex_scalar_converter def unique_mod(d,file_name): f = os.path.basename(unique_file(d,file_name)) m = os.path.splitext(f)[0] return m def remove_whitespace(in_str): out = in_str.replace(" ","") out = out.replace("\t","") out = out.replace("\n","") return out #---------------------------------------------------------------------------- # Scalar conversion test classes # int, float, complex #---------------------------------------------------------------------------- class NumpyComplexScalarConverter(TestCase): compiler = '' def setUp(self): self.converter = numpy_complex_scalar_converter() @dec.slow def test_type_match_string(self): assert_( not self.converter.type_match('string') ) @dec.slow def test_type_match_int(self): assert_( not self.converter.type_match(5)) @dec.slow def test_type_match_float(self): assert_( not self.converter.type_match(5.)) @dec.slow def test_type_match_complex128(self): assert_(self.converter.type_match(numpy.complex128(5.+1j))) @dec.slow def test_complex_var_in(self): mod_name = sys._getframe().f_code.co_name + self.compiler mod_name = unique_mod(test_dir,mod_name) mod = ext_tools.ext_module(mod_name) a = numpy.complex(1.+1j) code = "a=std::complex<double>(2.,2.);" test = ext_tools.ext_function('test',code,['a']) mod.add_function(test) mod.compile(location = test_dir, compiler = self.compiler) exec 'from ' + mod_name + ' import test' b=numpy.complex128(1.+1j) test(b) try: b = 1. test(b) except TypeError: pass try: b = 'abc' test(b) except TypeError: pass @dec.slow def test_complex_return(self): mod_name = sys._getframe().f_code.co_name + self.compiler mod_name = unique_mod(test_dir,mod_name) mod = ext_tools.ext_module(mod_name) a = 1.+1j code = """ a= a + std::complex<double>(2.,2.); return_val = PyComplex_FromDoubles(a.real(),a.imag()); """ test = ext_tools.ext_function('test',code,['a']) mod.add_function(test) mod.compile(location = test_dir, compiler = self.compiler) exec 'from ' + mod_name + ' import test' b=1.+1j c = test(b) assert_( c == 3.+3j) @dec.slow def test_inline(self): a = numpy.complex128(1+1j) result = inline_tools.inline("return_val=1.0/a;",['a']) assert_( result==.5-.5j) # class TestMsvcNumpyComplexScalarConverter( # TestNumpyComplexScalarConverter): # compiler = 'msvc' # class TestUnixNumpyComplexScalarConverter( # TestNumpyComplexScalarConverter): # compiler = '' # class TestGccNumpyComplexScalarConverter( # TestNumpyComplexScalarConverter): # compiler = 'gcc' for _n in dir(): if _n[-9:]=='Converter': if msvc_exists(): exec "class Test%sMsvc(%s):\n compiler = 'msvc'"%(_n,_n) else: exec "class Test%sUnix(%s):\n compiler = ''"%(_n,_n) if gcc_exists(): exec "class Test%sGcc(%s):\n compiler = 'gcc'"%(_n,_n) def setup_test_location(): import tempfile #test_dir = os.path.join(tempfile.gettempdir(),'test_files') test_dir = tempfile.mktemp() if not os.path.exists(test_dir): os.mkdir(test_dir) sys.path.insert(0,test_dir) return test_dir test_dir = setup_test_location() def teardown_test_location(): import tempfile test_dir = os.path.join(tempfile.gettempdir(),'test_files') if sys.path[0] == test_dir: sys.path = sys.path[1:] return test_dir def remove_file(name): test_dir = os.path.abspath(name) if not msvc_exists(): for _n in dir(): if _n[:8]=='TestMsvc': exec 'del '+_n else: for _n in dir(): if _n[:8]=='TestUnix': exec 'del '+_n if not (gcc_exists() and msvc_exists() and sys.platform == 'win32'): for _n in dir(): if _n[:7]=='TestGcc': exec 'del '+_n if __name__ == "__main__": import nose nose.run(argv=['', __file__])
Archassault/archassaultweb
refs/heads/master
mirrors/migrations/0024_auto__add_field_mirrorlog_location.py
2
# -*- coding: utf-8 -*- from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): db.add_column(u'mirrors_mirrorlog', 'location', self.gf('django.db.models.fields.related.ForeignKey')(related_name='logs', null=True, to=orm['mirrors.CheckLocation']), keep_default=False) def backwards(self, orm): db.delete_column(u'mirrors_mirrorlog', 'location_id') models = { u'mirrors.checklocation': { 'Meta': {'ordering': "('hostname', 'source_ip')", 'object_name': 'CheckLocation'}, 'country': ('django_countries.fields.CountryField', [], {'max_length': '2'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'source_ip': ('django.db.models.fields.GenericIPAddressField', [], {'unique': 'True', 'max_length': '39'}) }, u'mirrors.mirror': { 'Meta': {'ordering': "('name',)", 'object_name': 'Mirror'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'admin_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'blank': 'True'}), 'alternate_email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'isos': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'rsync_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}), 'rsync_user': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}), 'tier': ('django.db.models.fields.SmallIntegerField', [], {'default': '2'}), 'upstream': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mirrors.Mirror']", 'null': 'True', 'on_delete': 'models.SET_NULL'}) }, u'mirrors.mirrorlog': { 'Meta': {'object_name': 'MirrorLog'}, 'check_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'duration': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'error': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_success': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'null': 'True', 'to': u"orm['mirrors.CheckLocation']"}), 'url': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['mirrors.MirrorUrl']"}) }, u'mirrors.mirrorprotocol': { 'Meta': {'ordering': "('protocol',)", 'object_name': 'MirrorProtocol'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), 'default': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_download': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'protocol': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}) }, u'mirrors.mirrorrsync': { 'Meta': {'object_name': 'MirrorRsync'}, 'created': ('django.db.models.fields.DateTimeField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.CharField', [], {'max_length': '44'}), 'mirror': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rsync_ips'", 'to': u"orm['mirrors.Mirror']"}) }, u'mirrors.mirrorurl': { 'Meta': {'object_name': 'MirrorUrl'}, 'country': ('django_countries.fields.CountryField', [], {'db_index': 'True', 'max_length': '2', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {}), 'has_ipv4': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'has_ipv6': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mirror': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'urls'", 'to': u"orm['mirrors.Mirror']"}), 'protocol': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'urls'", 'on_delete': 'models.PROTECT', 'to': u"orm['mirrors.MirrorProtocol']"}), 'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) } } complete_apps = ['mirrors']
ftomassetti/intellij-community
refs/heads/master
python/testData/quickFixes/PyRemoveParameterQuickFixTest/param_after.py
80
def foo(): def a(): pass x = 1 x = 2
suncycheng/intellij-community
refs/heads/master
python/testData/copyPaste/Selection3.dst.py
83
if True: a = 1 b = 2 def f(): <selection> c = 3 <caret></selection>
kleskjr/scipy
refs/heads/master
tools/win32/detect_cpu_extensions_wine.py
79
#!/usr/bin/python """ Detect which x86 CPU extension instructions the given scipy install uses. This file can be used in the release process to check that the nosse installer does not contain SSE instructions. This has happened before, see for example ticket #1170. Is meant to be run on OS X with Wine. Make sure objdump.exe is installed. See also tools/win32build/misc/x86analysis.py in numpy for a similar script that checks a single file. """ import subprocess import sys import os from optparse import OptionParser OBJDUMP = os.environ['HOME'] + '/.wine/drive_c/MinGW/bin/objdump.exe' SCIPY_PY25 = os.environ['HOME'] + '/.wine/drive_c/Python25/Lib/site-packages/scipy/' SCIPY_PY26 = os.environ['HOME'] + '/.wine/drive_c/Python26/Lib/site-packages/scipy/' SCIPY_PY27 = os.environ['HOME'] + '/.wine/drive_c/Python27/Lib/site-packages/scipy/' SCIPY_PY31 = os.environ['HOME'] + '/.wine/drive_c/Python31/Lib/site-packages/scipy/' NUMPY_PY25 = os.environ['HOME'] + '/.wine/drive_c/Python25/Lib/site-packages/numpy/' NUMPY_PY26 = os.environ['HOME'] + '/.wine/drive_c/Python26/Lib/site-packages/numpy/' NUMPY_PY27 = os.environ['HOME'] + '/.wine/drive_c/Python27/Lib/site-packages/numpy/' NUMPY_PY31 = os.environ['HOME'] + '/.wine/drive_c/Python31/Lib/site-packages/numpy/' SSE3_LIBS = os.environ['HOME'] + '/.wine/drive_c/local/lib/yop/sse3' SSE2_LIBS = os.environ['HOME'] + '/.wine/drive_c/local/lib/yop/sse2' NOSSE_LIBS = os.environ['HOME'] + '/.wine/drive_c/local/lib/yop/nosse' # The install to check basepath = SCIPY_PY25 def main(): # a set of all unique CPU extension codes found allcodes = set() # walk the SciPy tree and check all binary files for root, dirs, files in os.walk(basepath): for fl in files: if os.path.splitext(fl)[1] in ['.a', '.pyd', '.so']: full_fpath = os.path.join(root, fl) codes = single_file_checkext(full_fpath) for code in codes: allcodes.add(code) write_summary(allcodes) def single_file_checkext(fname, striproot=True): if striproot: sys.stdout.write('%s: ' % fname.replace(basepath, '')) else: sys.stdout.write('%s: ' % fname) sys.stdout.flush() codes = process(path_as_windows(fname)) sys.stdout.write(" ".join(codes)) sys.stdout.write("\n") return codes def path_as_windows(fpath): """Return the file path as Wine expects.""" winepath = 'C:\\' + fpath.split('drive_c')[1] return winepath def write_summary(allcodes): """Write a summary of all found codes to stdout.""" print """\n ---------------------------------------------------------------------------- Checked all binary files for CPU extension codes. Found the following codes:""" for code in allcodes: print code print """ ---------------------------------------------------------------------------- """ def process(fn): p = subprocess.Popen(['wine', OBJDUMP, '-d', fn], stdout=subprocess.PIPE) codes = {} for line in p.stdout: r = line.split("\t") if len(r) != 3: continue instr = r[2].split()[0].lower() if instr in INSTRS: codes[INSTRS[instr]] = True print instr codes = codes.keys() codes.sort() return codes #------------------------------------------------------------------------------ # Instruction lists #------------------------------------------------------------------------------ # x86 EXTS_x86 = dict( _486='bswap cmpxch cpuid invd invlpg wbinvd xadd', pentium='cmpxchg8b rdmsr rdtsc wrmsr', pentium_mmx='rdpmc', pentium_pro='cmova cmovae cmovb cmovbe cmovc cmove cmovg cmovge cmovl cmovle cmovna cmovnae cmovnb cmovnbe cmovnc cmovne cmovng cmovnge cmovnl cmovnle cmovno cmovnp cmovns cmovnz cmovo cmovp cmovpe cmovpo cmovs cmovz sysenter sysexit rdpmc ud2', amd_k6_2='syscall sysret', sse='maskmovq movntps movntq prefetch0 prefetch1 prefetch2 prefetchnta sfence', sse2='clflush lfence maskmovdqu mfence movntdq movnti movntpd pause', sse3='lddqu', sse3_intel='monitor mwait', intel_vt='vmptrld vmptrst vmclear vmread vmwrite vmcall vmlaunch vmresume vmxoff vmxon', amd_v='clgi skinit stgi vmload vmmcall vmrun vmsave', x86_64='cmpxchg16b rdtscp', sse4a='lzcnt popcnt', ) # x87 EXTS_x87 = dict( pentium_pro='fcmovb, fcmovbe, fcmove, fcmovnb, fcmovnbe, fcmovne, fcmovnu, fcmovu fcomi fcomip fucomi fucomip', sse='fxrstor fxsave', sse3='fisttp', undocumented='ffreep', ) # SIMD EXTS_simd = dict( mmx='emms movd movq packssdw packsswb packuswb paddb paddd paddsb paddsw paddusb paddusw paddw pand pandn pcmpeqb pcmpeqd pcmpeqw pcmpgtb pcmpgtd pcmpgtw pmaddwd pmulhw pmullw por pslld psllq psllw psrad psraw psrld psrlq psrlw psubb psubd psubsb psubsw psubusb psubusw psubw punpckhbw punpckhdq punpckhwd punpcklbw punpckldq punpcklwd pxor', emmx='paveb paddsiw pmagw pdistib psubsiw pmvzb pmulhrw pmvnzb pmvlzb pmvgezb pmulhriw pmachriw', _3dnow='femms pavgusb pf2id pfacc pfadd pfcmpeq pfcmpge pfcmpgt pfmax pfmin pfmul pfrcp pfrcpit1 pfrcpit2 pfrsqit1 pfrsqrt pfsub pfsubr pi2fd pmulhrw prefetch prefetchw', _3dnowplus='pf2iw pfnacc pfpnacc pi2fw pswapd', _3dnowplus_geodegx='pfrsqrtv pfrcpv', sse='addps addss cmpps cmpss comiss cvtpi2ps cvtps2pi cvtsi2ss cvtss2si cvttps2pi cvttss2si divps divss ldmxcsr maxps maxss minps minss movaps movhlps movhps movlhps movlps movmskps movntps movss movups mulps mulss rcpps rcpss rsqrtps rsqrtss shufps sqrtps sqrtss stmxcsr subps subss ucomiss unpckhps unpcklps andnps andps orps pavgb pavgw pextrw pinsrw pmaxsw pmaxub pminsw pminub pmovmskb pmulhuw psadbw pshufw xorps', sse2='addpd addsd andnpd andpd cmppd cmpsd comisd cvtdq2pd cvtdq2ps cvtpd2dq cvtpd2pi cvtpd2ps cvtpi2pd cvtps2dq cvtps2pd cvtsd2si cvtsd2ss cvtsi2sd cvtss2sd cvttpd2dq cvttpd2pi cvtps2dq cvttsd2si divpd divsd maxpd maxsd minpd minsd movapd movhpd movlpd movmskpd movsd movupd mulpd mulsd orpd shufpd sqrtpd sqrtsd subpd subsd ucomisd unpckhpd unpcklpd xorpd movdq2q movdqa movdqu movq2dq paddq psubq pmuludq pshufhw pshuflw pshufd pslldq psrldq punpckhqdq punpcklqdq', sse3='addsubpd addsubps haddpd haddps hsubpd hsubps movddup movshdup movsldup', ssse3='psignw psignd psignb pshufb pmulhrsw pmaddubsw phsubw phsubsw phsubd phaddw phaddsw phaddd palignr pabsw pabsd pabsb', sse4_1='mpsadbw phminposuw pmulld pmuldq dpps dppd blendps blendpd blendvps blendvpd pblendvb pblendw pminsb pmaxsb pminuw pmaxuw pminud pmaxud pminsd pmaxsd roundps roundss roundpd roundsd insertps pinsrb pinsrd/pinsrq extractps pextrb pextrw pextrd/pextrq pmovsxbw pmovzxbw pmovsxbd pmovzxbd pmovsxbq pmovzxbq pmovsxwd pmovzxwd pmovsxwq pmovzxwq pmovsxdq pmovzxdq ptest pcmpeqq packusdw movntdqa', sse4a='extrq insertq movntsd movntss', sse4_2='crc32 pcmpestri pcmpestrm pcmpistri pcmpistrm pcmpgtq', fma='vfmaddpd vfmaddps vfmaddsd vfmaddss vfmaddsubpd vfmaddsubps vfmsubaddpd vfmsubaddps vfmsubpd vfmsubps vfmsubsd vfmsubss vfnmaddpd vfnmaddps vfnmaddsd vfnmadss vfnmsubpd vfnmsubps vfnmsubsd vfnmsubss', ) INSTRS = dict() for ext in [EXTS_x86, EXTS_x87, EXTS_simd]: for key, value in ext.items(): if key.startswith('_'): key = key[1:] for v in value.split(): INSTRS[v] = key #------------------------------------------------------------------------------ if __name__ == "__main__": main()
asadoughi/quark
refs/heads/master
quark/db/migration/alembic/versions/374c1bdb4480_add_port_network_plugin.py
6
"""add_port_network_plugin Revision ID: 374c1bdb4480 Revises: 4da4444d7706 Create Date: 2015-10-20 12:08:24.780056 """ # revision identifiers, used by Alembic. revision = '374c1bdb4480' down_revision = '4da4444d7706' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('quark_ports', sa.Column('network_plugin', sa.String(length=36), nullable=True)) def downgrade(): op.drop_column('quark_ports', 'network_plugin')
hominlinx/linux-bananapi
refs/heads/lemaker-3.4
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
12527
# Util.py - Python extension for perf script, miscellaneous utility code # # Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. import errno, os FUTEX_WAIT = 0 FUTEX_WAKE = 1 FUTEX_PRIVATE_FLAG = 128 FUTEX_CLOCK_REALTIME = 256 FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) NSECS_PER_SEC = 1000000000 def avg(total, n): return total / n def nsecs(secs, nsecs): return secs * NSECS_PER_SEC + nsecs def nsecs_secs(nsecs): return nsecs / NSECS_PER_SEC def nsecs_nsecs(nsecs): return nsecs % NSECS_PER_SEC def nsecs_str(nsecs): str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)), return str def add_stats(dict, key, value): if not dict.has_key(key): dict[key] = (value, value, value, 1) else: min, max, avg, count = dict[key] if value < min: min = value if value > max: max = value avg = (avg + value) / 2 dict[key] = (min, max, avg, count + 1) def clear_term(): print("\x1b[H\x1b[2J") audit_package_warned = False try: import audit machine_to_id = { 'x86_64': audit.MACH_86_64, 'alpha' : audit.MACH_ALPHA, 'ia64' : audit.MACH_IA64, 'ppc' : audit.MACH_PPC, 'ppc64' : audit.MACH_PPC64, 's390' : audit.MACH_S390, 's390x' : audit.MACH_S390X, 'i386' : audit.MACH_X86, 'i586' : audit.MACH_X86, 'i686' : audit.MACH_X86, } try: machine_to_id['armeb'] = audit.MACH_ARMEB except: pass machine_id = machine_to_id[os.uname()[4]] except: if not audit_package_warned: audit_package_warned = True print "Install the audit-libs-python package to get syscall names" def syscall_name(id): try: return audit.audit_syscall_to_name(id, machine_id) except: return str(id) def strerror(nr): try: return errno.errorcode[abs(nr)] except: return "Unknown %d errno" % nr
jswartwood/node-streams
refs/heads/gh-pages
node_modules/socket.io/node_modules/redis/node_modules/hiredis/build/c4che/build.config.py
59
version = 0x105016 tools = [{'tool': 'ar', 'tooldir': None, 'funs': None}, {'tool': 'cxx', 'tooldir': None, 'funs': None}, {'tool': 'gxx', 'tooldir': None, 'funs': None}, {'tool': 'compiler_cxx', 'tooldir': None, 'funs': None}, {'tool': 'osx', 'tooldir': None, 'funs': None}, {'tool': 'node_addon', 'tooldir': None, 'funs': None}]
ahnjungho/fabtools
refs/heads/master
fabtools/require/python.py
6
""" Python environments and packages ================================ This module provides high-level tools for using Python `virtual environments`_ and installing Python packages using the `pip`_ installer. .. _virtual environments: http://www.virtualenv.org/ .. _pip: http://www.pip-installer.org/ """ from fabtools.python import ( create_virtualenv, install, install_pip, install_requirements, is_installed, is_pip_installed, virtualenv_exists, ) from fabtools.python_setuptools import ( install_setuptools, is_setuptools_installed, ) from fabtools.system import UnsupportedFamily, distrib_family MIN_SETUPTOOLS_VERSION = '0.7' MIN_PIP_VERSION = '1.5' def setuptools(version=MIN_SETUPTOOLS_VERSION, python_cmd='python'): """ Require `setuptools`_ to be installed. If setuptools is not installed, or if a version older than *version* is installed, the latest version will be installed. .. _setuptools: http://pythonhosted.org/setuptools/ """ from fabtools.require.deb import package as require_deb_package from fabtools.require.rpm import package as require_rpm_package if not is_setuptools_installed(python_cmd=python_cmd): family = distrib_family() if family == 'debian': require_deb_package('python-dev') elif family == 'redhat': require_rpm_package('python-devel') elif family == 'arch': pass # ArchLinux installs header with base package else: raise UnsupportedFamily(supported=['debian', 'redhat', 'arch']) install_setuptools(python_cmd=python_cmd) def pip(version=MIN_PIP_VERSION, pip_cmd='pip', python_cmd='python'): """ Require `pip`_ to be installed. If pip is not installed, or if a version older than *version* is installed, the latest version will be installed. .. _pip: http://www.pip-installer.org/ """ setuptools(python_cmd=python_cmd) if not is_pip_installed(version, pip_cmd=pip_cmd): install_pip(python_cmd=python_cmd) def package(pkg_name, url=None, pip_cmd='pip', python_cmd='python', allow_external=False, allow_unverified=False, **kwargs): """ Require a Python package. If the package is not installed, it will be installed using the `pip installer`_. Package names are case insensitive. Starting with version 1.5, pip no longer scrapes insecure external urls by default and no longer installs externally hosted files by default. Use ``allow_external=True`` or ``allow_unverified=True`` to change these behaviours. :: from fabtools.python import virtualenv from fabtools import require # Install package system-wide (not recommended) require.python.package('foo', use_sudo=True) # Install package in an existing virtual environment with virtualenv('/path/to/venv'): require.python.package('bar') .. _pip installer: http://www.pip-installer.org/ """ pip(MIN_PIP_VERSION, python_cmd=python_cmd) if not is_installed(pkg_name, pip_cmd=pip_cmd): install(url or pkg_name, pip_cmd=pip_cmd, allow_external=[url or pkg_name] if allow_external else [], allow_unverified=[url or pkg_name] if allow_unverified else [], **kwargs) def packages(pkg_list, pip_cmd='pip', python_cmd='python', allow_external=None, allow_unverified=None, **kwargs): """ Require several Python packages. Package names are case insensitive. Starting with version 1.5, pip no longer scrapes insecure external urls by default and no longer installs externally hosted files by default. Use ``allow_external=['foo', 'bar']`` or ``allow_unverified=['bar', 'baz']`` to change these behaviours for specific packages. """ if allow_external is None: allow_external = [] if allow_unverified is None: allow_unverified = [] pip(MIN_PIP_VERSION, python_cmd=python_cmd) pkg_list = [pkg for pkg in pkg_list if not is_installed(pkg, pip_cmd=pip_cmd)] if pkg_list: install(pkg_list, pip_cmd=pip_cmd, allow_external=allow_external, allow_unverified=allow_unverified, **kwargs) def requirements(filename, pip_cmd='pip', python_cmd='python', allow_external=None, allow_unverified=None, **kwargs): """ Require Python packages from a pip `requirements file`_. Starting with version 1.5, pip no longer scrapes insecure external urls by default and no longer installs externally hosted files by default. Use ``allow_external=['foo', 'bar']`` or ``allow_unverified=['bar', 'baz']`` to change these behaviours for specific packages. :: from fabtools.python import virtualenv from fabtools import require # Install requirements in an existing virtual environment with virtualenv('/path/to/venv'): require.python.requirements('requirements.txt') .. _requirements file: http://www.pip-installer.org/en/latest/requirements.html """ pip(MIN_PIP_VERSION, python_cmd=python_cmd) install_requirements(filename, pip_cmd=pip_cmd, allow_external=allow_external, allow_unverified=allow_unverified, **kwargs) def virtualenv(directory, system_site_packages=False, venv_python=None, use_sudo=False, user=None, clear=False, prompt=None, virtualenv_cmd='virtualenv', pip_cmd='pip', python_cmd='python'): """ Require a Python `virtual environment`_. :: from fabtools import require require.python.virtualenv('/path/to/venv') .. _virtual environment: http://www.virtualenv.org/ """ package('virtualenv', use_sudo=True, pip_cmd=pip_cmd, python_cmd=python_cmd) if not virtualenv_exists(directory): create_virtualenv( directory, system_site_packages=system_site_packages, venv_python=venv_python, use_sudo=use_sudo, user=user, clear=clear, prompt=prompt, virtualenv_cmd=virtualenv_cmd, )
liang42hao/django-chartit
refs/heads/master
chartit/exceptions.py
6
"""Global ChartIt exception and warning classes.""" class APIInputError(Exception): """Some kind of problem when validating the user input.""" pass
XXMrHyde/android_external_chromium_org
refs/heads/darkkat-4.4
tools/checkdeps/checkdeps.py
24
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Makes sure that files include headers from allowed directories. Checks DEPS files in the source tree for rules, and applies those rules to "#include" commands in source files. Any source file including something not permitted by the DEPS files will fail. The format of the deps file: First you have the normal module-level deps. These are the ones used by gclient. An example would be: deps = { "base":"http://foo.bar/trunk/base" } DEPS files not in the top-level of a module won't need this. Then you have any additional include rules. You can add (using "+") or subtract (using "-") from the previously specified rules (including module-level deps). You can also specify a path that is allowed for now but that we intend to remove, using "!"; this is treated the same as "+" when check_deps is run by our bots, but a presubmit step will show a warning if you add a new include of a file that is only allowed by "!". Note that for .java files, there is currently no difference between "+" and "!", even in the presubmit step. include_rules = { # Code should be able to use base (it's specified in the module-level # deps above), but nothing in "base/evil" because it's evil. "-base/evil", # But this one subdirectory of evil is OK. "+base/evil/not", # And it can include files from this other directory even though there is # no deps rule for it. "+tools/crime_fighter", # This dependency is allowed for now but work is ongoing to remove it, # so you shouldn't add further dependencies on it. "!base/evil/ok_for_now.h", } If you have certain include rules that should only be applied for some files within this directory and subdirectories, you can write a section named specific_include_rules that is a hash map of regular expressions to the list of rules that should apply to files matching them. Note that such rules will always be applied before the rules from 'include_rules' have been applied, but the order in which rules associated with different regular expressions is applied is arbitrary. specific_include_rules = { ".*_(unit|browser|api)test\.cc": [ "+libraries/testsupport", ], } DEPS files may be placed anywhere in the tree. Each one applies to all subdirectories, where there may be more DEPS files that provide additions or subtractions for their own sub-trees. There is an implicit rule for the current directory (where the DEPS file lives) and all of its subdirectories. This prevents you from having to explicitly allow the current directory everywhere. This implicit rule is applied first, so you can modify or remove it using the normal include rules. The rules are processed in order. This means you can explicitly allow a higher directory and then take away permissions from sub-parts, or the reverse. Note that all directory separators must be slashes (Unix-style) and not backslashes. All directories should be relative to the source root and use only lowercase. """ import os import optparse import re import subprocess import sys import copy import cpp_checker import java_checker import results from rules import Rule, Rules # Variable name used in the DEPS file to add or subtract include files from # the module-level deps. INCLUDE_RULES_VAR_NAME = 'include_rules' # Variable name used in the DEPS file to add or subtract include files # from module-level deps specific to files whose basename (last # component of path) matches a given regular expression. SPECIFIC_INCLUDE_RULES_VAR_NAME = 'specific_include_rules' # Optionally present in the DEPS file to list subdirectories which should not # be checked. This allows us to skip third party code, for example. SKIP_SUBDIRS_VAR_NAME = 'skip_child_includes' def NormalizePath(path): """Returns a path normalized to how we write DEPS rules and compare paths. """ return path.lower().replace('\\', '/') def _IsTestFile(filename): """Does a rudimentary check to try to skip test files; this could be improved but is good enough for now. """ return re.match('(test|mock|dummy)_.*|.*_[a-z]*test\.(cc|mm|java)', filename) class DepsChecker(object): """Parses include_rules from DEPS files and can verify files in the source tree against them. """ def __init__(self, base_directory=None, verbose=False, being_tested=False, ignore_temp_rules=False, skip_tests=False): """Creates a new DepsChecker. Args: base_directory: OS-compatible path to root of checkout, e.g. C:\chr\src. verbose: Set to true for debug output. being_tested: Set to true to ignore the DEPS file at tools/checkdeps/DEPS. """ self.base_directory = base_directory self.verbose = verbose self._under_test = being_tested self._ignore_temp_rules = ignore_temp_rules self._skip_tests = skip_tests if not base_directory: self.base_directory = os.path.abspath( os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..')) self.results_formatter = results.NormalResultsFormatter(verbose) self.git_source_directories = set() self._AddGitSourceDirectories() # Map of normalized directory paths to rules to use for those # directories, or None for directories that should be skipped. self.directory_rules = {} self._ApplyDirectoryRulesAndSkipSubdirs(Rules(), self.base_directory) def Report(self): """Prints a report of results, and returns an exit code for the process.""" if self.results_formatter.GetResults(): self.results_formatter.PrintResults() return 1 print '\nSUCCESS\n' return 0 def _ApplyRules(self, existing_rules, includes, specific_includes, cur_dir): """Applies the given include rules, returning the new rules. Args: existing_rules: A set of existing rules that will be combined. include: The list of rules from the "include_rules" section of DEPS. specific_includes: E.g. {'.*_unittest\.cc': ['+foo', '-blat']} rules from the "specific_include_rules" section of DEPS. cur_dir: The current directory, normalized path. We will create an implicit rule that allows inclusion from this directory. Returns: A new set of rules combining the existing_rules with the other arguments. """ rules = copy.deepcopy(existing_rules) # First apply the implicit "allow" rule for the current directory. if cur_dir.startswith( NormalizePath(os.path.normpath(self.base_directory))): relative_dir = cur_dir[len(self.base_directory) + 1:] source = relative_dir if len(source) == 0: source = 'top level' # Make the help string a little more meaningful. rules.AddRule('+' + relative_dir, 'Default rule for ' + source) else: raise Exception('Internal error: base directory is not at the beginning' + ' for\n %s and base dir\n %s' % (cur_dir, self.base_directory)) def ApplyOneRule(rule_str, dependee_regexp=None): """Deduces a sensible description for the rule being added, and adds the rule with its description to |rules|. If we are ignoring temporary rules, this function does nothing for rules beginning with the Rule.TEMP_ALLOW character. """ if self._ignore_temp_rules and rule_str.startswith(Rule.TEMP_ALLOW): return rule_block_name = 'include_rules' if dependee_regexp: rule_block_name = 'specific_include_rules' if not relative_dir: rule_description = 'the top level %s' % rule_block_name else: rule_description = relative_dir + "'s %s" % rule_block_name rules.AddRule(rule_str, rule_description, dependee_regexp) # Apply the additional explicit rules. for (_, rule_str) in enumerate(includes): ApplyOneRule(rule_str) # Finally, apply the specific rules. for regexp, specific_rules in specific_includes.iteritems(): for rule_str in specific_rules: ApplyOneRule(rule_str, regexp) return rules def _ApplyDirectoryRules(self, existing_rules, dir_name): """Combines rules from the existing rules and the new directory. Any directory can contain a DEPS file. Toplevel DEPS files can contain module dependencies which are used by gclient. We use these, along with additional include rules and implicit rules for the given directory, to come up with a combined set of rules to apply for the directory. Args: existing_rules: The rules for the parent directory. We'll add-on to these. dir_name: The directory name that the deps file may live in (if it exists). This will also be used to generate the implicit rules. This is a non-normalized path. Returns: A tuple containing: (1) the combined set of rules to apply to the sub-tree, and (2) a list of all subdirectories that should NOT be checked, as specified in the DEPS file (if any). """ norm_dir_name = NormalizePath(dir_name) # Check for a .svn directory in this directory or check this directory is # contained in git source direcotries. This will tell us if it's a source # directory and should be checked. if not (os.path.exists(os.path.join(dir_name, ".svn")) or (norm_dir_name in self.git_source_directories)): return (None, []) # Check the DEPS file in this directory. if self.verbose: print 'Applying rules from', dir_name def FromImpl(_unused, _unused2): pass # NOP function so "From" doesn't fail. def FileImpl(_unused): pass # NOP function so "File" doesn't fail. class _VarImpl: def __init__(self, local_scope): self._local_scope = local_scope def Lookup(self, var_name): """Implements the Var syntax.""" if var_name in self._local_scope.get('vars', {}): return self._local_scope['vars'][var_name] raise Exception('Var is not defined: %s' % var_name) local_scope = {} global_scope = { 'File': FileImpl, 'From': FromImpl, 'Var': _VarImpl(local_scope).Lookup, } deps_file = os.path.join(dir_name, 'DEPS') # The second conditional here is to disregard the # tools/checkdeps/DEPS file while running tests. This DEPS file # has a skip_child_includes for 'testdata' which is necessary for # running production tests, since there are intentional DEPS # violations under the testdata directory. On the other hand when # running tests, we absolutely need to verify the contents of that # directory to trigger those intended violations and see that they # are handled correctly. if os.path.isfile(deps_file) and ( not self._under_test or not os.path.split(dir_name)[1] == 'checkdeps'): execfile(deps_file, global_scope, local_scope) elif self.verbose: print ' No deps file found in', dir_name # Even if a DEPS file does not exist we still invoke ApplyRules # to apply the implicit "allow" rule for the current directory include_rules = local_scope.get(INCLUDE_RULES_VAR_NAME, []) specific_include_rules = local_scope.get(SPECIFIC_INCLUDE_RULES_VAR_NAME, {}) skip_subdirs = local_scope.get(SKIP_SUBDIRS_VAR_NAME, []) return (self._ApplyRules(existing_rules, include_rules, specific_include_rules, norm_dir_name), skip_subdirs) def _ApplyDirectoryRulesAndSkipSubdirs(self, parent_rules, dir_path): """Given |parent_rules| and a subdirectory |dir_path| from the directory that owns the |parent_rules|, add |dir_path|'s rules to |self.directory_rules|, and add None entries for any of its subdirectories that should be skipped. """ directory_rules, excluded_subdirs = self._ApplyDirectoryRules(parent_rules, dir_path) self.directory_rules[NormalizePath(dir_path)] = directory_rules for subdir in excluded_subdirs: self.directory_rules[NormalizePath( os.path.normpath(os.path.join(dir_path, subdir)))] = None def GetDirectoryRules(self, dir_path): """Returns a Rules object to use for the given directory, or None if the given directory should be skipped. This takes care of first building rules for parent directories (up to self.base_directory) if needed. Args: dir_path: A real (non-normalized) path to the directory you want rules for. """ norm_dir_path = NormalizePath(dir_path) if not norm_dir_path.startswith( NormalizePath(os.path.normpath(self.base_directory))): dir_path = os.path.join(self.base_directory, dir_path) norm_dir_path = NormalizePath(dir_path) parent_dir = os.path.dirname(dir_path) parent_rules = None if not norm_dir_path in self.directory_rules: parent_rules = self.GetDirectoryRules(parent_dir) # We need to check for an entry for our dir_path again, in case we # are at a path e.g. A/B/C where A/B/DEPS specifies the C # subdirectory to be skipped; in this case, the invocation to # GetDirectoryRules(parent_dir) has already filled in an entry for # A/B/C. if not norm_dir_path in self.directory_rules: if not parent_rules: # If the parent directory should be skipped, then the current # directory should also be skipped. self.directory_rules[norm_dir_path] = None else: self._ApplyDirectoryRulesAndSkipSubdirs(parent_rules, dir_path) return self.directory_rules[norm_dir_path] def CheckDirectory(self, start_dir): """Checks all relevant source files in the specified directory and its subdirectories for compliance with DEPS rules throughout the tree (starting at |self.base_directory|). |start_dir| must be a subdirectory of |self.base_directory|. On completion, self.results_formatter has the results of processing, and calling Report() will print a report of results. """ java = java_checker.JavaChecker(self.base_directory, self.verbose) cpp = cpp_checker.CppChecker(self.verbose) checkers = dict( (extension, checker) for checker in [java, cpp] for extension in checker.EXTENSIONS) self._CheckDirectoryImpl(checkers, start_dir) def _CheckDirectoryImpl(self, checkers, dir_name): rules = self.GetDirectoryRules(dir_name) if rules == None: return # Collect a list of all files and directories to check. files_to_check = [] dirs_to_check = [] contents = os.listdir(dir_name) for cur in contents: full_name = os.path.join(dir_name, cur) if os.path.isdir(full_name): dirs_to_check.append(full_name) elif os.path.splitext(full_name)[1] in checkers: if not self._skip_tests or not _IsTestFile(cur): files_to_check.append(full_name) # First check all files in this directory. for cur in files_to_check: checker = checkers[os.path.splitext(cur)[1]] file_status = checker.CheckFile(rules, cur) if file_status.HasViolations(): self.results_formatter.AddError(file_status) # Next recurse into the subdirectories. for cur in dirs_to_check: self._CheckDirectoryImpl(checkers, cur) def CheckAddedCppIncludes(self, added_includes): """This is used from PRESUBMIT.py to check new #include statements added in the change being presubmit checked. Args: added_includes: ((file_path, (include_line, include_line, ...), ...) Return: A list of tuples, (bad_file_path, rule_type, rule_description) where rule_type is one of Rule.DISALLOW or Rule.TEMP_ALLOW and rule_description is human-readable. Empty if no problems. """ cpp = cpp_checker.CppChecker(self.verbose) problems = [] for file_path, include_lines in added_includes: if not cpp.IsCppFile(file_path): pass rules_for_file = self.GetDirectoryRules(os.path.dirname(file_path)) if rules_for_file: for line in include_lines: is_include, violation = cpp.CheckLine( rules_for_file, line, file_path, True) if violation: rule_type = violation.violated_rule.allow if rule_type != Rule.ALLOW: violation_text = results.NormalResultsFormatter.FormatViolation( violation, self.verbose) problems.append((file_path, rule_type, violation_text)) return problems def _AddGitSourceDirectories(self): """Adds any directories containing sources managed by git to self.git_source_directories. """ if not os.path.exists(os.path.join(self.base_directory, '.git')): return popen_out = os.popen('cd %s && git ls-files --full-name .' % subprocess.list2cmdline([self.base_directory])) for line in popen_out.readlines(): dir_name = os.path.join(self.base_directory, os.path.dirname(line)) # Add the directory as well as all the parent directories. Use # forward slashes and lower case to normalize paths. while dir_name != self.base_directory: self.git_source_directories.add(NormalizePath(dir_name)) dir_name = os.path.dirname(dir_name) self.git_source_directories.add(NormalizePath(self.base_directory)) def PrintUsage(): print """Usage: python checkdeps.py [--root <root>] [tocheck] --root ROOT Specifies the repository root. This defaults to "../../.." relative to the script file. This will be correct given the normal location of the script in "<root>/tools/checkdeps". --(others) There are a few lesser-used options; run with --help to show them. tocheck Specifies the directory, relative to root, to check. This defaults to "." so it checks everything. Examples: python checkdeps.py python checkdeps.py --root c:\\source chrome""" def main(): option_parser = optparse.OptionParser() option_parser.add_option( '', '--root', default='', dest='base_directory', help='Specifies the repository root. This defaults ' 'to "../../.." relative to the script file, which ' 'will normally be the repository root.') option_parser.add_option( '', '--ignore-temp-rules', action='store_true', dest='ignore_temp_rules', default=False, help='Ignore !-prefixed (temporary) rules.') option_parser.add_option( '', '--generate-temp-rules', action='store_true', dest='generate_temp_rules', default=False, help='Print rules to temporarily allow files that fail ' 'dependency checking.') option_parser.add_option( '', '--count-violations', action='store_true', dest='count_violations', default=False, help='Count #includes in violation of intended rules.') option_parser.add_option( '', '--skip-tests', action='store_true', dest='skip_tests', default=False, help='Skip checking test files (best effort).') option_parser.add_option( '-v', '--verbose', action='store_true', default=False, help='Print debug logging') options, args = option_parser.parse_args() deps_checker = DepsChecker(options.base_directory, verbose=options.verbose, ignore_temp_rules=options.ignore_temp_rules, skip_tests=options.skip_tests) # Figure out which directory we have to check. start_dir = deps_checker.base_directory if len(args) == 1: # Directory specified. Start here. It's supposed to be relative to the # base directory. start_dir = os.path.abspath( os.path.join(deps_checker.base_directory, args[0])) elif len(args) >= 2 or (options.generate_temp_rules and options.count_violations): # More than one argument, or incompatible flags, we don't handle this. PrintUsage() return 1 print 'Using base directory:', deps_checker.base_directory print 'Checking:', start_dir if options.generate_temp_rules: deps_checker.results_formatter = results.TemporaryRulesFormatter() elif options.count_violations: deps_checker.results_formatter = results.CountViolationsFormatter() deps_checker.CheckDirectory(start_dir) return deps_checker.Report() if '__main__' == __name__: sys.exit(main())
supriyantomaftuh/django
refs/heads/master
django/utils/xmlutils.py
491
""" Utilities for XML generation/parsing. """ import re from xml.sax.saxutils import XMLGenerator class UnserializableContentError(ValueError): pass class SimplerXMLGenerator(XMLGenerator): def addQuickElement(self, name, contents=None, attrs=None): "Convenience method for adding an element with no children" if attrs is None: attrs = {} self.startElement(name, attrs) if contents is not None: self.characters(contents) self.endElement(name) def characters(self, content): if content and re.search(r'[\x00-\x08\x0B-\x0C\x0E-\x1F]', content): # Fail loudly when content has control chars (unsupported in XML 1.0) # See http://www.w3.org/International/questions/qa-controls raise UnserializableContentError("Control characters are not supported in XML 1.0") XMLGenerator.characters(self, content)
allotria/intellij-community
refs/heads/master
python/testData/intentions/PyConvertTypeCommentToVariableAnnotationIntentionTest/chainedAssignment.py
31
x = y = 42 # ty<caret>pe: int
autosportlabs/kivy
refs/heads/master
setup.py
4
# # Kivy - Cross-platform UI framework # https://kivy.org/ # from __future__ import print_function import sys build_examples = False if "--build_examples" in sys.argv: build_examples = True sys.argv.remove("--build_examples") from copy import deepcopy import os from os.path import join, dirname, sep, exists, basename, isdir from os import walk, environ from distutils.version import LooseVersion from distutils.sysconfig import get_python_inc from collections import OrderedDict from time import sleep from subprocess import check_output, CalledProcessError from datetime import datetime if environ.get('KIVY_USE_SETUPTOOLS'): from setuptools import setup, Extension print('Using setuptools') else: from distutils.core import setup from distutils.extension import Extension print('Using distutils') PY3 = sys.version > '3' if PY3: # fix error with py3's LooseVersion comparisons def ver_equal(self, other): return self.version == other LooseVersion.__eq__ = ver_equal def get_version(filename='kivy/version.py'): VERSION = kivy.__version__ DATE = datetime.utcnow().strftime('%Y%m%d') try: GIT_REVISION = check_output( ['git', 'rev-parse', 'HEAD'] ).strip().decode('ascii') except CalledProcessError: GIT_REVISION = "Unknown" cnt = ( "# THIS FILE IS GENERATED FROM KIVY SETUP.PY\n" "__version__ = '%(version)s'\n" "__hash__ = '%(hash)s'\n" "__date__ = '%(date)s'\n" ) with open(filename, 'w') as f: f.write(cnt % { 'version': VERSION, 'hash': GIT_REVISION, 'date': DATE }) return VERSION MIN_CYTHON_STRING = '0.23' MIN_CYTHON_VERSION = LooseVersion(MIN_CYTHON_STRING) MAX_CYTHON_STRING = '0.23' MAX_CYTHON_VERSION = LooseVersion(MAX_CYTHON_STRING) CYTHON_UNSUPPORTED = () def getoutput(cmd, env=None): import subprocess p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) p.wait() if p.returncode: # if not returncode == 0 print('WARNING: A problem occurred while running {0} (code {1})\n' .format(cmd, p.returncode)) stderr_content = p.stderr.read() if stderr_content: print('{0}\n'.format(stderr_content)) return "" return p.stdout.read() def pkgconfig(*packages, **kw): flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'} lenviron = None pconfig = join(sys.prefix, 'libs', 'pkgconfig') if isdir(pconfig): lenviron = environ.copy() lenviron['PKG_CONFIG_PATH'] = '{};{}'.format( environ.get('PKG_CONFIG_PATH', ''), pconfig) cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages)) results = getoutput(cmd, lenviron).split() for token in results: ext = token[:2].decode('utf-8') flag = flag_map.get(ext) if not flag: continue kw.setdefault(flag, []).append(token[2:].decode('utf-8')) return kw # ----------------------------------------------------------------------------- # Determine on which platform we are platform = sys.platform # Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575) if sys.platform == 'darwin': if sys.maxsize > 2 ** 32: osx_arch = 'x86_64' else: osx_arch = 'i386' # Detect Python for android project (http://github.com/kivy/python-for-android) ndkplatform = environ.get('NDKPLATFORM') if ndkplatform is not None and environ.get('LIBLINK'): platform = 'android' kivy_ios_root = environ.get('KIVYIOSROOT', None) if kivy_ios_root is not None: platform = 'ios' if exists('/opt/vc/include/bcm_host.h'): platform = 'rpi' if exists('/usr/lib/arm-linux-gnueabihf/libMali.so'): platform = 'mali' # ----------------------------------------------------------------------------- # Detect options # c_options = OrderedDict() c_options['use_rpi'] = platform == 'rpi' c_options['use_mali'] = platform == 'mali' c_options['use_egl'] = False c_options['use_opengl_es2'] = None c_options['use_opengl_mock'] = environ.get('READTHEDOCS', None) == 'True' c_options['use_sdl2'] = None c_options['use_ios'] = False c_options['use_mesagl'] = False c_options['use_x11'] = False c_options['use_gstreamer'] = None c_options['use_avfoundation'] = platform == 'darwin' c_options['use_osx_frameworks'] = platform == 'darwin' c_options['debug_gl'] = False # now check if environ is changing the default values for key in list(c_options.keys()): ukey = key.upper() if ukey in environ: value = bool(int(environ[ukey])) print('Environ change {0} -> {1}'.format(key, value)) c_options[key] = value # ----------------------------------------------------------------------------- # Cython check # on python-for-android and kivy-ios, cython usage is external cython_unsupported_append = ''' Please note that the following versions of Cython are not supported at all: {} '''.format(', '.join(map(str, CYTHON_UNSUPPORTED))) cython_min = '''\ This version of Cython is not compatible with Kivy. Please upgrade to at least version {0}, preferably the newest supported version {1}. If your platform provides a Cython package, make sure you have upgraded to the newest version. If the newest version available is still too low, please remove it and install the newest supported Cython via pip: pip install -I Cython=={1}{2}\ '''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING, cython_unsupported_append if CYTHON_UNSUPPORTED else '') cython_max = '''\ This version of Cython is untested with Kivy. While this version may work perfectly fine, it is possible that you may experience issues. If you do have issues, please downgrade to a supported version. It is best to use the newest supported version, {1}, but the minimum supported version is {0}. If your platform provides a Cython package, check if you can downgrade to a supported version. Otherwise, uninstall the platform package and install Cython via pip: pip install -I Cython=={1}{2}\ '''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING, cython_unsupported_append if CYTHON_UNSUPPORTED else '') cython_unsupported = '''\ This version of Cython suffers from known bugs and is unsupported. Please install the newest supported version, {1}, if possible, but the minimum supported version is {0}. If your platform provides a Cython package, check if you can install a supported version. Otherwise, uninstall the platform package and install Cython via pip: pip install -I Cython=={1}{2}\ '''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING, cython_unsupported_append) have_cython = False skip_cython = False if platform in ('ios', 'android'): print('\nCython check avoided.') skip_cython = True else: try: # check for cython from Cython.Distutils import build_ext have_cython = True import Cython cy_version_str = Cython.__version__ cy_ver = LooseVersion(cy_version_str) print('\nDetected Cython version {}'.format(cy_version_str)) if cy_ver < MIN_CYTHON_VERSION: print(cython_min) raise ImportError('Incompatible Cython Version') if cy_ver in CYTHON_UNSUPPORTED: print(cython_unsupported) raise ImportError('Incompatible Cython Version') if cy_ver > MAX_CYTHON_VERSION: print(cython_max) sleep(1) except ImportError: print("\nCython is missing, it's required for compiling kivy !\n\n") raise if not have_cython: from distutils.command.build_ext import build_ext # ----------------------------------------------------------------------------- # Setup classes # the build path where kivy is being compiled src_path = build_path = dirname(__file__) class KivyBuildExt(build_ext): def finalize_options(self): retval = build_ext.finalize_options(self) global build_path if (self.build_lib is not None and exists(self.build_lib) and not self.inplace): build_path = self.build_lib return retval def build_extensions(self): # build files config_h_fn = ('include', 'config.h') config_pxi_fn = ('include', 'config.pxi') config_py_fn = ('setupconfig.py', ) # generate headers config_h = '// Autogenerated file for Kivy C configuration\n' config_h += '#define __PY3 {0}\n'.format(int(PY3)) config_pxi = '# Autogenerated file for Kivy Cython configuration\n' config_pxi += 'DEF PY3 = {0}\n'.format(int(PY3)) config_py = '# Autogenerated file for Kivy configuration\n' config_py += 'PY3 = {0}\n'.format(int(PY3)) config_py += 'CYTHON_MIN = {0}\nCYTHON_MAX = {1}\n'.format( repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING)) config_py += 'CYTHON_BAD = {0}\n'.format(repr(', '.join(map( str, CYTHON_UNSUPPORTED)))) # generate content print('Build configuration is:') for opt, value in c_options.items(): value = int(bool(value)) print(' * {0} = {1}'.format(opt, value)) opt = opt.upper() config_h += '#define __{0} {1}\n'.format(opt, value) config_pxi += 'DEF {0} = {1}\n'.format(opt, value) config_py += '{0} = {1}\n'.format(opt, value) debug = bool(self.debug) print(' * debug = {0}'.format(debug)) config_pxi += 'DEF DEBUG = {0}\n'.format(debug) config_py += 'DEBUG = {0}\n'.format(debug) config_pxi += 'DEF PLATFORM = "{0}"\n'.format(platform) config_py += 'PLATFORM = "{0}"\n'.format(platform) for fn, content in ( (config_h_fn, config_h), (config_pxi_fn, config_pxi), (config_py_fn, config_py)): build_fn = expand(build_path, *fn) if self.update_if_changed(build_fn, content): print('Updated {}'.format(build_fn)) src_fn = expand(src_path, *fn) if src_fn != build_fn and self.update_if_changed(src_fn, content): print('Updated {}'.format(src_fn)) c = self.compiler.compiler_type print('Detected compiler is {}'.format(c)) if c != 'msvc': for e in self.extensions: e.extra_link_args += ['-lm'] build_ext.build_extensions(self) def update_if_changed(self, fn, content): need_update = True if exists(fn): with open(fn) as fd: need_update = fd.read() != content if need_update: with open(fn, 'w') as fd: fd.write(content) return need_update def _check_and_fix_sdl2_mixer(f_path): print("Check if SDL2_mixer smpeg2 have an @executable_path") rpath_from = ("@executable_path/../Frameworks/SDL2.framework" "/Versions/A/SDL2") rpath_to = "@rpath/../../../../SDL2.framework/Versions/A/SDL2" smpeg2_path = ("{}/Versions/A/Frameworks/smpeg2.framework" "/Versions/A/smpeg2").format(f_path) output = getoutput(("otool -L '{}'").format(smpeg2_path)).decode('utf-8') if "@executable_path" not in output: return print("WARNING: Your SDL2_mixer version is invalid") print("WARNING: The smpeg2 framework embedded in SDL2_mixer contains a") print("WARNING: reference to @executable_path that will fail the") print("WARNING: execution of your application.") print("WARNING: We are going to change:") print("WARNING: from: {}".format(rpath_from)) print("WARNING: to: {}".format(rpath_to)) getoutput("install_name_tool -change {} {} {}".format( rpath_from, rpath_to, smpeg2_path)) output = getoutput(("otool -L '{}'").format(smpeg2_path)) if b"@executable_path" not in output: print("WARNING: Change successfully applied!") print("WARNING: You'll never see this message again.") else: print("WARNING: Unable to apply the changes, sorry.") # ----------------------------------------------------------------------------- # extract version (simulate doc generation, kivy will be not imported) environ['KIVY_DOC_INCLUDE'] = '1' import kivy # extra build commands go in the cmdclass dict {'command-name': CommandClass} # see tools.packaging.{platform}.build.py for custom build commands for # portable packages. Also e.g. we use build_ext command from cython if its # installed for c extensions. from kivy.tools.packaging.factory import FactoryBuild cmdclass = { 'build_factory': FactoryBuild, 'build_ext': KivyBuildExt} try: # add build rules for portable packages to cmdclass if platform == 'win32': from kivy.tools.packaging.win32.build import WindowsPortableBuild cmdclass['build_portable'] = WindowsPortableBuild elif platform == 'darwin': from kivy.tools.packaging.osx.build import OSXPortableBuild cmdclass['build_portable'] = OSXPortableBuild except ImportError: print('User distribution detected, avoid portable command.') # Detect which opengl version headers to use if platform in ('android', 'darwin', 'ios', 'rpi', 'mali'): c_options['use_opengl_es2'] = True elif c_options['use_opengl_es2'] is None: c_options['use_opengl_es2'] = \ environ.get('KIVY_GRAPHICS', '').lower() == 'gles' print('Using this graphics system: {}'.format( ['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)])) # check if we are in a kivy-ios build if platform == 'ios': print('Kivy-IOS project environment detect, use it.') print('Kivy-IOS project located at {0}'.format(kivy_ios_root)) c_options['use_ios'] = True c_options['use_sdl2'] = True elif platform == 'darwin': if c_options['use_osx_frameworks']: if osx_arch == "i386": print("Warning: building with frameworks fail on i386") else: print("OSX framework used, force to x86_64 only") environ["ARCHFLAGS"] = environ.get("ARCHFLAGS", "-arch x86_64") print("OSX ARCHFLAGS are: {}".format(environ["ARCHFLAGS"])) # detect gstreamer, only on desktop # works if we forced the options or in autodetection if platform not in ('ios', 'android') and (c_options['use_gstreamer'] in (None, True)): if c_options['use_osx_frameworks'] and platform == 'darwin': # check the existence of frameworks f_path = '/Library/Frameworks/GStreamer.framework' if not exists(f_path): c_options['use_gstreamer'] = False print('Missing GStreamer framework {}'.format(f_path)) else: c_options['use_gstreamer'] = True gst_flags = { 'extra_link_args': [ '-F/Library/Frameworks', '-Xlinker', '-rpath', '-Xlinker', '/Library/Frameworks', '-Xlinker', '-headerpad', '-Xlinker', '190', '-framework', 'GStreamer'], 'include_dirs': [join(f_path, 'Headers')]} else: # use pkg-config approach instead gst_flags = pkgconfig('gstreamer-1.0') if 'libraries' in gst_flags: c_options['use_gstreamer'] = True # detect SDL2, only on desktop and iOS, or android if explicitly enabled # works if we forced the options or in autodetection sdl2_flags = {} if c_options['use_sdl2'] or ( platform not in ('android',) and c_options['use_sdl2'] is None): if c_options['use_osx_frameworks'] and platform == 'darwin': # check the existence of frameworks sdl2_valid = True sdl2_flags = { 'extra_link_args': [ '-F/Library/Frameworks', '-Xlinker', '-rpath', '-Xlinker', '/Library/Frameworks', '-Xlinker', '-headerpad', '-Xlinker', '190'], 'include_dirs': [], 'extra_compile_args': ['-F/Library/Frameworks'] } for name in ('SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'): f_path = '/Library/Frameworks/{}.framework'.format(name) if not exists(f_path): print('Missing framework {}'.format(f_path)) sdl2_valid = False continue sdl2_flags['extra_link_args'] += ['-framework', name] sdl2_flags['include_dirs'] += [join(f_path, 'Headers')] print('Found sdl2 frameworks: {}'.format(f_path)) if name == 'SDL2_mixer': _check_and_fix_sdl2_mixer(f_path) if not sdl2_valid: c_options['use_sdl2'] = False print('Deactivate SDL2 compilation due to missing frameworks') else: c_options['use_sdl2'] = True print('Activate SDL2 compilation') elif platform != "ios": # use pkg-config approach instead sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer') if 'libraries' in sdl2_flags: c_options['use_sdl2'] = True # ----------------------------------------------------------------------------- # declare flags def get_modulename_from_file(filename): filename = filename.replace(sep, '/') pyx = '.'.join(filename.split('.')[:-1]) pyxl = pyx.split('/') while pyxl[0] != 'kivy': pyxl.pop(0) if pyxl[1] == 'kivy': pyxl.pop(0) return '.'.join(pyxl) def expand(root, *args): return join(root, 'kivy', *args) class CythonExtension(Extension): def __init__(self, *args, **kwargs): Extension.__init__(self, *args, **kwargs) self.cython_directives = { 'c_string_encoding': 'utf-8', 'profile': 'USE_PROFILE' in environ, 'embedsignature': 'USE_EMBEDSIGNATURE' in environ} # XXX with pip, setuptools is imported before distutils, and change # our pyx to c, then, cythonize doesn't happen. So force again our # sources self.sources = args[1] def merge(d1, *args): d1 = deepcopy(d1) for d2 in args: for key, value in d2.items(): value = deepcopy(value) if key in d1: d1[key].extend(value) else: d1[key] = value return d1 def determine_base_flags(): flags = { 'libraries': [], 'include_dirs': [join(src_path, 'kivy', 'include')], 'library_dirs': [], 'extra_link_args': [], 'extra_compile_args': []} if c_options['use_ios']: sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT')) if not sysroot: raise Exception('IOSSDKROOT is not set') flags['include_dirs'] += [sysroot] flags['extra_compile_args'] += ['-isysroot', sysroot] flags['extra_link_args'] += ['-isysroot', sysroot] elif platform.startswith('freebsd'): flags['include_dirs'] += [join( environ.get('LOCALBASE', '/usr/local'), 'include')] flags['library_dirs'] += [join( environ.get('LOCALBASE', '/usr/local'), 'lib')] elif platform == 'darwin': v = os.uname() if v[2] >= '13.0.0': # use xcode-select to search on the right Xcode path # XXX use the best SDK available instead of a specific one import platform as _platform xcode_dev = getoutput('xcode-select -p').splitlines()[0] sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2]) print('Xcode detected at {}, and using OS X{} sdk'.format( xcode_dev, sdk_mac_ver)) sysroot = join( xcode_dev.decode('utf-8'), 'Platforms/MacOSX.platform/Developer/SDKs', 'MacOSX{}.sdk'.format(sdk_mac_ver), 'System/Library/Frameworks') else: sysroot = ('/System/Library/Frameworks/' 'ApplicationServices.framework/Frameworks') flags['extra_compile_args'] += ['-F%s' % sysroot] flags['extra_link_args'] += ['-F%s' % sysroot] elif platform == 'win32': flags['include_dirs'] += [get_python_inc(prefix=sys.prefix)] flags['library_dirs'] += [join(sys.prefix, "libs")] return flags def determine_gl_flags(): kivy_graphics_include = join(src_path, 'kivy', 'include') flags = {'include_dirs': [kivy_graphics_include], 'libraries': []} base_flags = {'include_dirs': [kivy_graphics_include], 'libraries': []} if c_options['use_opengl_mock']: return flags, base_flags if platform == 'win32': flags['libraries'] = ['opengl32', 'glew32'] elif platform == 'ios': flags['libraries'] = ['GLESv2'] flags['extra_link_args'] = ['-framework', 'OpenGLES'] elif platform == 'darwin': flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch] flags['extra_compile_args'] = ['-arch', osx_arch] elif platform.startswith('freebsd'): flags['libraries'] = ['GL'] elif platform.startswith('openbsd'): flags['include_dirs'] = ['/usr/X11R6/include'] flags['library_dirs'] = ['/usr/X11R6/lib'] flags['libraries'] = ['GL'] elif platform == 'android': flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')] flags['library_dirs'] = [join(ndkplatform, 'usr', 'lib')] flags['libraries'] = ['GLESv2'] elif platform == 'rpi': flags['include_dirs'] = [ '/opt/vc/include', '/opt/vc/include/interface/vcos/pthreads', '/opt/vc/include/interface/vmcs_host/linux'] flags['library_dirs'] = ['/opt/vc/lib'] flags['libraries'] = ['bcm_host', 'EGL', 'GLESv2'] elif platform == 'mali': flags['include_dirs'] = ['/usr/include/'] flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf'] flags['libraries'] = ['GLESv2'] c_options['use_x11'] = True c_options['use_egl'] = True else: flags['libraries'] = ['GL'] return flags, base_flags def determine_sdl2(): flags = {} if not c_options['use_sdl2']: return flags sdl2_path = environ.get('KIVY_SDL2_PATH', None) if sdl2_flags and not sdl2_path and platform == 'darwin': return sdl2_flags # no pkgconfig info, or we want to use a specific sdl2 path, so perform # manual configuration flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'] split_chr = ';' if platform == 'win32' else ':' sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else [] if not sdl2_paths: sdl_inc = join(sys.prefix, 'include', 'SDL2') if isdir(sdl_inc): sdl2_paths = [sdl_inc] sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2']) flags['include_dirs'] = sdl2_paths flags['extra_link_args'] = [] flags['extra_compile_args'] = [] flags['library_dirs'] = ( sdl2_paths if sdl2_paths else ['/usr/local/lib/']) if sdl2_flags: flags = merge(flags, sdl2_flags) # ensure headers for all the SDL2 and sub libraries are available libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image'] can_compile = True for lib in libs_to_check: found = False for d in flags['include_dirs']: fn = join(d, '{}.h'.format(lib)) if exists(fn): found = True print('SDL2: found {} header at {}'.format(lib, fn)) break if not found: print('SDL2: missing sub library {}'.format(lib)) can_compile = False if not can_compile: c_options['use_sdl2'] = False return {} return flags base_flags = determine_base_flags() gl_flags, gl_flags_base = determine_gl_flags() # ----------------------------------------------------------------------------- # sources to compile # all the dependencies have been found manually with: # grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx} graphics_dependencies = { 'gl_redirect.h': ['common_subset.h', 'gl_mock.h'], 'buffer.pyx': ['common.pxi'], 'context.pxd': ['instructions.pxd', 'texture.pxd', 'vbo.pxd', 'cgl.pxd'], 'cgl.pxd': ['common.pxi', 'config.pxi', 'gl_redirect.h'], 'compiler.pxd': ['instructions.pxd'], 'compiler.pyx': ['context_instructions.pxd'], 'cgl.pyx': ['cgl.pxd'], 'cgl_mock.pyx': ['cgl.pxd'], 'cgl_sdl2.pyx': ['cgl.pxd'], 'cgl_gl.pyx': ['cgl.pxd'], 'cgl_glew.pyx': ['cgl.pxd'], 'context_instructions.pxd': [ 'transformation.pxd', 'instructions.pxd', 'texture.pxd'], 'fbo.pxd': ['cgl.pxd', 'instructions.pxd', 'texture.pxd'], 'fbo.pyx': [ 'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd'], 'gl_instructions.pyx': [ 'config.pxi', 'opcodes.pxi', 'cgl.pxd', 'instructions.pxd'], 'instructions.pxd': [ 'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd', 'texture.pxd', '../_event.pxd'], 'instructions.pyx': [ 'config.pxi', 'opcodes.pxi', 'cgl.pxd', 'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'], 'opengl.pyx': [ 'config.pxi', 'common.pxi', 'cgl.pxd', 'gl_redirect.h'], 'opengl_utils.pyx': [ 'opengl_utils_def.pxi', 'cgl.pxd', ], 'shader.pxd': ['cgl.pxd', 'transformation.pxd', 'vertex.pxd'], 'shader.pyx': [ 'config.pxi', 'common.pxi', 'cgl.pxd', 'vertex.pxd', 'transformation.pxd', 'context.pxd', 'gl_debug_logger.pxi'], 'stencil_instructions.pxd': ['instructions.pxd'], 'stencil_instructions.pyx': [ 'config.pxi', 'opcodes.pxi', 'cgl.pxd', 'gl_debug_logger.pxi'], 'scissor_instructions.pyx': [ 'config.pxi', 'opcodes.pxi', 'cgl.pxd'], 'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd', 'vertex_instructions.pxd', 'tesselator.pxd'], 'texture.pxd': ['cgl.pxd'], 'texture.pyx': [ 'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd', 'cgl.pxd', 'opengl_utils.pxd', 'img_tools.pxi', 'gl_debug_logger.pxi'], 'vbo.pxd': ['buffer.pxd', 'cgl.pxd', 'vertex.pxd'], 'vbo.pyx': [ 'config.pxi', 'common.pxi', 'context.pxd', 'instructions.pxd', 'shader.pxd', 'gl_debug_logger.pxi'], 'vertex.pxd': ['cgl.pxd'], 'vertex.pyx': ['config.pxi', 'common.pxi'], 'vertex_instructions.pyx': [ 'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd', 'instructions.pxd', 'vertex_instructions.pxd', 'cgl.pxd', 'texture.pxd', 'vertex_instructions_line.pxi'], 'vertex_instructions_line.pxi': ['stencil_instructions.pxd']} sources = { '_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}), '_clock.pyx': {}, 'weakproxy.pyx': {}, 'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}), 'graphics/buffer.pyx': merge(base_flags, gl_flags_base), 'graphics/context.pyx': merge(base_flags, gl_flags_base), 'graphics/compiler.pyx': merge(base_flags, gl_flags_base), 'graphics/context_instructions.pyx': merge(base_flags, gl_flags_base), 'graphics/fbo.pyx': merge(base_flags, gl_flags_base), 'graphics/gl_instructions.pyx': merge(base_flags, gl_flags_base), 'graphics/instructions.pyx': merge(base_flags, gl_flags_base), 'graphics/opengl.pyx': merge(base_flags, gl_flags_base), 'graphics/opengl_utils.pyx': merge(base_flags, gl_flags_base), 'graphics/shader.pyx': merge(base_flags, gl_flags_base), 'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags_base), 'graphics/scissor_instructions.pyx': merge(base_flags, gl_flags_base), 'graphics/texture.pyx': merge(base_flags, gl_flags_base), 'graphics/transformation.pyx': merge(base_flags, gl_flags_base), 'graphics/vbo.pyx': merge(base_flags, gl_flags_base), 'graphics/vertex.pyx': merge(base_flags, gl_flags_base), 'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags_base), 'graphics/cgl.pyx': merge(base_flags, gl_flags_base), 'graphics/cgl_backend/cgl_mock.pyx': merge(base_flags, gl_flags_base), 'graphics/cgl_backend/cgl_gl.pyx': merge(base_flags, gl_flags), 'graphics/cgl_backend/cgl_glew.pyx': merge(base_flags, gl_flags), 'graphics/cgl_backend/cgl_sdl2.pyx': merge(base_flags, gl_flags_base), 'graphics/cgl_backend/cgl_debug.pyx': merge(base_flags, gl_flags_base), 'core/text/text_layout.pyx': base_flags, 'graphics/tesselator.pyx': merge(base_flags, { 'include_dirs': ['kivy/lib/libtess2/Include'], 'c_depends': [ 'lib/libtess2/Source/bucketalloc.c', 'lib/libtess2/Source/dict.c', 'lib/libtess2/Source/geom.c', 'lib/libtess2/Source/mesh.c', 'lib/libtess2/Source/priorityq.c', 'lib/libtess2/Source/sweep.c', 'lib/libtess2/Source/tess.c' ] }), 'graphics/svg.pyx': merge(base_flags, gl_flags_base) } if c_options["use_sdl2"]: sdl2_flags = determine_sdl2() if c_options['use_sdl2'] and sdl2_flags: sources['graphics/cgl_backend/cgl_sdl2.pyx'] = merge( sources['graphics/cgl_backend/cgl_sdl2.pyx'], sdl2_flags) sdl2_depends = {'depends': ['lib/sdl2.pxi']} for source_file in ('core/window/_window_sdl2.pyx', 'core/image/_img_sdl2.pyx', 'core/text/_text_sdl2.pyx', 'core/audio/audio_sdl2.pyx', 'core/clipboard/_clipboard_sdl2.pyx'): sources[source_file] = merge( base_flags, sdl2_flags, sdl2_depends) if platform in ('darwin', 'ios'): # activate ImageIO provider for our core image if platform == 'ios': osx_flags = {'extra_link_args': [ '-framework', 'Foundation', '-framework', 'UIKit', '-framework', 'AudioToolbox', '-framework', 'CoreGraphics', '-framework', 'QuartzCore', '-framework', 'ImageIO', '-framework', 'Accelerate']} else: osx_flags = {'extra_link_args': [ '-framework', 'ApplicationServices']} sources['core/image/img_imageio.pyx'] = merge( base_flags, osx_flags) if c_options['use_avfoundation']: import platform as _platform mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]] if mac_ver >= [10, 7]: osx_flags = { 'extra_link_args': ['-framework', 'AVFoundation'], 'extra_compile_args': ['-ObjC++'], 'depends': ['core/camera/camera_avfoundation_implem.m']} sources['core/camera/camera_avfoundation.pyx'] = merge( base_flags, osx_flags) else: print('AVFoundation cannot be used, OSX >= 10.7 is required') if c_options['use_rpi']: sources['lib/vidcore_lite/egl.pyx'] = merge( base_flags, gl_flags) sources['lib/vidcore_lite/bcm.pyx'] = merge( base_flags, gl_flags) if c_options['use_x11']: libs = ['Xrender', 'X11'] if c_options['use_egl']: libs += ['EGL'] else: libs += ['GL'] sources['core/window/window_x11.pyx'] = merge( base_flags, gl_flags, { # FIXME add an option to depend on them but not compile them # cause keytab is included in core, and core is included in # window_x11 # # 'depends': [ # 'core/window/window_x11_keytab.c', # 'core/window/window_x11_core.c'], 'libraries': libs}) if c_options['use_gstreamer']: sources['lib/gstplayer/_gstplayer.pyx'] = merge( base_flags, gst_flags, { 'depends': ['lib/gstplayer/_gstplayer.h']}) # ----------------------------------------------------------------------------- # extension modules def get_dependencies(name, deps=None): if deps is None: deps = [] for dep in graphics_dependencies.get(name, []): if dep not in deps: deps.append(dep) get_dependencies(dep, deps) return deps def resolve_dependencies(fn, depends): fn = basename(fn) deps = [] get_dependencies(fn, deps) get_dependencies(fn.replace('.pyx', '.pxd'), deps) return [expand(src_path, 'graphics', x) for x in deps] def get_extensions_from_sources(sources): ext_modules = [] if environ.get('KIVY_FAKE_BUILDEXT'): print('Fake build_ext asked, will generate only .h/.c') return ext_modules for pyx, flags in sources.items(): is_graphics = pyx.startswith('graphics') pyx = expand(src_path, pyx) depends = [expand(src_path, x) for x in flags.pop('depends', [])] c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])] if not have_cython: pyx = '%s.c' % pyx[:-4] if is_graphics: depends = resolve_dependencies(pyx, depends) f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in ( 'c', 'cpp', 'm')] module_name = get_modulename_from_file(pyx) flags_clean = {'depends': depends} for key, value in flags.items(): if len(value): flags_clean[key] = value ext_modules.append(CythonExtension( module_name, [pyx] + f_depends + c_depends, **flags_clean)) return ext_modules ext_modules = get_extensions_from_sources(sources) # ----------------------------------------------------------------------------- # automatically detect data files split_examples = int(environ.get('KIVY_SPLIT_EXAMPLES', '0')) data_file_prefix = 'share/kivy-' examples = {} examples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json', 'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv', 'mpg', 'glsl', 'zip') for root, subFolders, files in walk('examples'): for fn in files: ext = fn.split('.')[-1].lower() if ext not in examples_allowed_ext: continue filename = join(root, fn) directory = '%s%s' % (data_file_prefix, dirname(filename)) if directory not in examples: examples[directory] = [] examples[directory].append(filename) binary_deps = [] binary_deps_path = join(src_path, 'kivy', 'binary_deps') if isdir(binary_deps_path): for root, dirnames, filenames in walk(binary_deps_path): for fname in filenames: binary_deps.append( join(root.replace(binary_deps_path, 'binary_deps'), fname)) # ----------------------------------------------------------------------------- # setup ! if not build_examples: setup( name='Kivy', version=get_version(), author='Kivy Team and other contributors', author_email='kivy-dev@googlegroups.com', url='http://kivy.org', license='MIT', description=( 'A software library for rapid development of ' 'hardware-accelerated multitouch applications.'), ext_modules=ext_modules, cmdclass=cmdclass, packages=[ 'kivy', 'kivy.adapters', 'kivy.core', 'kivy.core.audio', 'kivy.core.camera', 'kivy.core.clipboard', 'kivy.core.image', 'kivy.core.gl', 'kivy.core.spelling', 'kivy.core.text', 'kivy.core.video', 'kivy.core.window', 'kivy.deps', 'kivy.effects', 'kivy.graphics', 'kivy.graphics.cgl_backend', 'kivy.garden', 'kivy.input', 'kivy.input.postproc', 'kivy.input.providers', 'kivy.lang', 'kivy.lib', 'kivy.lib.osc', 'kivy.lib.gstplayer', 'kivy.lib.vidcore_lite', 'kivy.modules', 'kivy.network', 'kivy.storage', 'kivy.tests', 'kivy.tools', 'kivy.tools.packaging', 'kivy.tools.packaging.pyinstaller_hooks', 'kivy.tools.highlight', 'kivy.extras', 'kivy.uix', 'kivy.uix.behaviors', 'kivy.uix.recycleview', ], package_dir={'kivy': 'kivy'}, package_data={'kivy': [ '*.pxd', '*.pxi', 'core/text/*.pxd', 'core/text/*.pxi', 'graphics/*.pxd', 'graphics/*.pxi', 'graphics/*.h', 'include/*', 'lib/vidcore_lite/*.pxd', 'lib/vidcore_lite/*.pxi', 'data/*.kv', 'data/*.json', 'data/fonts/*.ttf', 'data/images/*.png', 'data/images/*.jpg', 'data/images/*.gif', 'data/images/*.atlas', 'data/keyboards/*.json', 'data/logo/*.png', 'data/glsl/*.png', 'data/glsl/*.vs', 'data/glsl/*.fs', 'tests/*.zip', 'tests/*.kv', 'tests/*.png', 'tests/*.ttf', 'tests/*.ogg', 'tools/highlight/*.vim', 'tools/highlight/*.el', 'tools/packaging/README.txt', 'tools/packaging/win32/kivy.bat', 'tools/packaging/win32/kivyenv.sh', 'tools/packaging/win32/README.txt', 'tools/packaging/osx/Info.plist', 'tools/packaging/osx/InfoPlist.strings', 'tools/gles_compat/*.h', 'tools/packaging/osx/kivy.sh'] + binary_deps}, data_files=[] if split_examples else list(examples.items()), classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: MacOS X', 'Environment :: Win32 (MS Windows)', 'Environment :: X11 Applications', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Information Technology', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: BSD :: FreeBSD', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Artistic Software', 'Topic :: Games/Entertainment', 'Topic :: Multimedia :: Graphics :: 3D Rendering', 'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera', 'Topic :: Multimedia :: Graphics :: Presentation', 'Topic :: Multimedia :: Graphics :: Viewers', 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3', 'Topic :: Multimedia :: Video :: Display', 'Topic :: Scientific/Engineering :: Human Machine Interfaces', 'Topic :: Scientific/Engineering :: Visualization', ('Topic :: Software Development :: Libraries :: ' 'Application Frameworks'), 'Topic :: Software Development :: User Interfaces'], dependency_links=[ 'https://github.com/kivy-garden/garden/archive/master.zip'], install_requires=['Kivy-Garden>=0.1.4', 'docutils', 'pygments'], setup_requires=[ 'cython>=' + MIN_CYTHON_STRING ] if not skip_cython else []) else: setup( name='Kivy-examples', version=get_version(), author='Kivy Team and other contributors', author_email='kivy-dev@googlegroups.com', url='http://kivy.org', license='MIT', description=('Kivy examples.'), data_files=list(examples.items()))
kendazheng/wizcloud2
refs/heads/develop
wizcloud/rctrl/api.py
14224
# -*- coding: utf-8 -*-
indevgr/django
refs/heads/master
django/core/management/commands/makemessages.py
6
from __future__ import unicode_literals import fnmatch import glob import io import os import re import sys from functools import total_ordering from itertools import dropwhile import django from django.conf import settings from django.core.files.temp import NamedTemporaryFile from django.core.management.base import BaseCommand, CommandError from django.core.management.utils import ( find_command, handle_extensions, popen_wrapper, ) from django.utils._os import upath from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_str from django.utils.functional import cached_property from django.utils.jslex import prepare_js_for_gettext from django.utils.text import get_text_list plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL) STATUS_OK = 0 NO_LOCALE_DIR = object() def check_programs(*programs): for program in programs: if find_command(program) is None: raise CommandError( "Can't find %s. Make sure you have GNU gettext tools 0.15 or " "newer installed." % program ) @total_ordering class TranslatableFile(object): def __init__(self, dirpath, file_name, locale_dir): self.file = file_name self.dirpath = dirpath self.locale_dir = locale_dir def __repr__(self): return "<TranslatableFile: %s>" % os.sep.join([self.dirpath, self.file]) def __eq__(self, other): return self.path == other.path def __lt__(self, other): return self.path < other.path @property def path(self): return os.path.join(self.dirpath, self.file) class BuildFile(object): """ Represents the state of a translatable file during the build process. """ def __init__(self, command, domain, translatable): self.command = command self.domain = domain self.translatable = translatable @cached_property def is_templatized(self): if self.domain == 'djangojs': return self.command.gettext_version < (0, 18, 3) elif self.domain == 'django': file_ext = os.path.splitext(self.translatable.file)[1] return file_ext != '.py' return False @cached_property def path(self): return self.translatable.path @cached_property def work_path(self): """ Path to a file which is being fed into GNU gettext pipeline. This may be either a translatable or its preprocessed version. """ if not self.is_templatized: return self.path extension = { 'djangojs': 'c', 'django': 'py', }.get(self.domain) filename = '%s.%s' % (self.translatable.file, extension) return os.path.join(self.translatable.dirpath, filename) def preprocess(self): """ Preprocess (if necessary) a translatable file before passing it to xgettext GNU gettext utility. """ from django.utils.translation import templatize if not self.is_templatized: return with io.open(self.path, 'r', encoding=settings.FILE_CHARSET) as fp: src_data = fp.read() if self.domain == 'djangojs': content = prepare_js_for_gettext(src_data) elif self.domain == 'django': content = templatize(src_data, self.path[2:]) with io.open(self.work_path, 'w', encoding='utf-8') as fp: fp.write(content) def postprocess_messages(self, msgs): """ Postprocess messages generated by xgettext GNU gettext utility. Transform paths as if these messages were generated from original translatable files rather than from preprocessed versions. """ if not self.is_templatized: return msgs # Remove '.py' suffix if os.name == 'nt': # Preserve '.\' prefix on Windows to respect gettext behavior old = '#: ' + self.work_path new = '#: ' + self.path else: old = '#: ' + self.work_path[2:] new = '#: ' + self.path[2:] return msgs.replace(old, new) def cleanup(self): """ Remove a preprocessed copy of a translatable file (if any). """ if self.is_templatized: # This check is needed for the case of a symlinked file and its # source being processed inside a single group (locale dir); # removing either of those two removes both. if os.path.exists(self.work_path): os.unlink(self.work_path) def write_pot_file(potfile, msgs): """ Write the :param potfile: POT file with the :param msgs: contents, previously making sure its format is valid. """ if os.path.exists(potfile): # Strip the header msgs = '\n'.join(dropwhile(len, msgs.split('\n'))) else: msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8') with io.open(potfile, 'a', encoding='utf-8') as fp: fp.write(msgs) class Command(BaseCommand): help = ( "Runs over the entire source tree of the current directory and " "pulls out all strings marked for translation. It creates (or updates) a message " "file in the conf/locale (in the django tree) or locale (for projects and " "applications) directory.\n\nYou must run this command with one of either the " "--locale, --exclude, or --all options." ) translatable_file_class = TranslatableFile build_file_class = BuildFile requires_system_checks = False leave_locale_alone = True msgmerge_options = ['-q', '--previous'] msguniq_options = ['--to-code=utf-8'] msgattrib_options = ['--no-obsolete'] xgettext_options = ['--from-code=UTF-8', '--add-comments=Translators'] def add_arguments(self, parser): parser.add_argument( '--locale', '-l', default=[], dest='locale', action='append', help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). ' 'Can be used multiple times.', ) parser.add_argument( '--exclude', '-x', default=[], dest='exclude', action='append', help='Locales to exclude. Default is none. Can be used multiple times.', ) parser.add_argument( '--domain', '-d', default='django', dest='domain', help='The domain of the message files (default: "django").', ) parser.add_argument( '--all', '-a', action='store_true', dest='all', default=False, help='Updates the message files for all existing locales.', ) parser.add_argument( '--extension', '-e', dest='extensions', action='append', help='The file extension(s) to examine (default: "html,txt,py", or "js" ' 'if the domain is "djangojs"). Separate multiple extensions with ' 'commas, or use -e multiple times.', ) parser.add_argument( '--symlinks', '-s', action='store_true', dest='symlinks', default=False, help='Follows symlinks to directories when examining source code ' 'and templates for translation strings.', ) parser.add_argument( '--ignore', '-i', action='append', dest='ignore_patterns', default=[], metavar='PATTERN', help='Ignore files or directories matching this glob-style pattern. ' 'Use multiple times to ignore more.', ) parser.add_argument( '--no-default-ignore', action='store_false', dest='use_default_ignore_patterns', default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*', '*~' and '*.pyc'.", ) parser.add_argument( '--no-wrap', action='store_true', dest='no_wrap', default=False, help="Don't break long message lines into several lines.", ) parser.add_argument( '--no-location', action='store_true', dest='no_location', default=False, help="Don't write '#: filename:line' lines.", ) parser.add_argument( '--no-obsolete', action='store_true', dest='no_obsolete', default=False, help="Remove obsolete message strings.", ) parser.add_argument( '--keep-pot', action='store_true', dest='keep_pot', default=False, help="Keep .pot file after making messages. Useful when debugging.", ) def handle(self, *args, **options): locale = options['locale'] exclude = options['exclude'] self.domain = options['domain'] self.verbosity = options['verbosity'] process_all = options['all'] extensions = options['extensions'] self.symlinks = options['symlinks'] # Need to ensure that the i18n framework is enabled if settings.configured: settings.USE_I18N = True else: settings.configure(USE_I18N=True) ignore_patterns = options['ignore_patterns'] if options['use_default_ignore_patterns']: ignore_patterns += ['CVS', '.*', '*~', '*.pyc'] self.ignore_patterns = list(set(ignore_patterns)) # Avoid messing with mutable class variables if options['no_wrap']: self.msgmerge_options = self.msgmerge_options[:] + ['--no-wrap'] self.msguniq_options = self.msguniq_options[:] + ['--no-wrap'] self.msgattrib_options = self.msgattrib_options[:] + ['--no-wrap'] self.xgettext_options = self.xgettext_options[:] + ['--no-wrap'] if options['no_location']: self.msgmerge_options = self.msgmerge_options[:] + ['--no-location'] self.msguniq_options = self.msguniq_options[:] + ['--no-location'] self.msgattrib_options = self.msgattrib_options[:] + ['--no-location'] self.xgettext_options = self.xgettext_options[:] + ['--no-location'] self.no_obsolete = options['no_obsolete'] self.keep_pot = options['keep_pot'] if self.domain not in ('django', 'djangojs'): raise CommandError("currently makemessages only supports domains " "'django' and 'djangojs'") if self.domain == 'djangojs': exts = extensions if extensions else ['js'] else: exts = extensions if extensions else ['html', 'txt', 'py'] self.extensions = handle_extensions(exts) if (locale is None and not exclude and not process_all) or self.domain is None: raise CommandError( "Type '%s help %s' for usage information." % (os.path.basename(sys.argv[0]), sys.argv[1]) ) if self.verbosity > 1: self.stdout.write( 'examining files with the extensions: %s\n' % get_text_list(list(self.extensions), 'and') ) self.invoked_for_django = False self.locale_paths = [] self.default_locale_path = None if os.path.isdir(os.path.join('conf', 'locale')): self.locale_paths = [os.path.abspath(os.path.join('conf', 'locale'))] self.default_locale_path = self.locale_paths[0] self.invoked_for_django = True else: self.locale_paths.extend(settings.LOCALE_PATHS) # Allow to run makemessages inside an app dir if os.path.isdir('locale'): self.locale_paths.append(os.path.abspath('locale')) if self.locale_paths: self.default_locale_path = self.locale_paths[0] if not os.path.exists(self.default_locale_path): os.makedirs(self.default_locale_path) # Build locale list locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % self.default_locale_path)) all_locales = map(os.path.basename, locale_dirs) # Account for excluded locales if process_all: locales = all_locales else: locales = locale or all_locales locales = set(locales) - set(exclude) if locales: check_programs('msguniq', 'msgmerge', 'msgattrib') check_programs('xgettext') try: potfiles = self.build_potfiles() # Build po files for each selected locale for locale in locales: if self.verbosity > 0: self.stdout.write("processing locale %s\n" % locale) for potfile in potfiles: self.write_po_file(potfile, locale) finally: if not self.keep_pot: self.remove_potfiles() @cached_property def gettext_version(self): # Gettext tools will output system-encoded bytestrings instead of UTF-8, # when looking up the version. It's especially a problem on Windows. out, err, status = popen_wrapper( ['xgettext', '--version'], stdout_encoding=DEFAULT_LOCALE_ENCODING, ) m = re.search(r'(\d+)\.(\d+)\.?(\d+)?', out) if m: return tuple(int(d) for d in m.groups() if d is not None) else: raise CommandError("Unable to get gettext version. Is it installed?") def build_potfiles(self): """ Build pot files and apply msguniq to them. """ file_list = self.find_files(".") self.remove_potfiles() self.process_files(file_list) potfiles = [] for path in self.locale_paths: potfile = os.path.join(path, '%s.pot' % str(self.domain)) if not os.path.exists(potfile): continue args = ['msguniq'] + self.msguniq_options + [potfile] msgs, errors, status = popen_wrapper(args) if errors: if status != STATUS_OK: raise CommandError( "errors happened while running msguniq\n%s" % errors) elif self.verbosity > 0: self.stdout.write(errors) with io.open(potfile, 'w', encoding='utf-8') as fp: fp.write(msgs) potfiles.append(potfile) return potfiles def remove_potfiles(self): for path in self.locale_paths: pot_path = os.path.join(path, '%s.pot' % str(self.domain)) if os.path.exists(pot_path): os.unlink(pot_path) def find_files(self, root): """ Helper method to get all files in the given root. Also check that there is a matching locale dir for each file. """ def is_ignored(path, ignore_patterns): """ Check if the given path should be ignored or not. """ filename = os.path.basename(path) def ignore(pattern): return fnmatch.fnmatchcase(filename, pattern) or fnmatch.fnmatchcase(path, pattern) return any(ignore(pattern) for pattern in ignore_patterns) ignore_patterns = [os.path.normcase(p) for p in self.ignore_patterns] dir_suffixes = {'%s*' % path_sep for path_sep in {'/', os.sep}} norm_patterns = [] for p in ignore_patterns: for dir_suffix in dir_suffixes: if p.endswith(dir_suffix): norm_patterns.append(p[:-len(dir_suffix)]) break else: norm_patterns.append(p) all_files = [] ignored_roots = [os.path.normpath(p) for p in (settings.MEDIA_ROOT, settings.STATIC_ROOT) if p] for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=self.symlinks): for dirname in dirnames[:]: if (is_ignored(os.path.normpath(os.path.join(dirpath, dirname)), norm_patterns) or os.path.join(os.path.abspath(dirpath), dirname) in ignored_roots): dirnames.remove(dirname) if self.verbosity > 1: self.stdout.write('ignoring directory %s\n' % dirname) elif dirname == 'locale': dirnames.remove(dirname) self.locale_paths.insert(0, os.path.join(os.path.abspath(dirpath), dirname)) for filename in filenames: file_path = os.path.normpath(os.path.join(dirpath, filename)) file_ext = os.path.splitext(filename)[1] if file_ext not in self.extensions or is_ignored(file_path, self.ignore_patterns): if self.verbosity > 1: self.stdout.write('ignoring file %s in %s\n' % (filename, dirpath)) else: locale_dir = None for path in self.locale_paths: if os.path.abspath(dirpath).startswith(os.path.dirname(path)): locale_dir = path break if not locale_dir: locale_dir = self.default_locale_path if not locale_dir: locale_dir = NO_LOCALE_DIR all_files.append(self.translatable_file_class(dirpath, filename, locale_dir)) return sorted(all_files) def process_files(self, file_list): """ Group translatable files by locale directory and run pot file build process for each group. """ file_groups = {} for translatable in file_list: file_group = file_groups.setdefault(translatable.locale_dir, []) file_group.append(translatable) for locale_dir, files in file_groups.items(): self.process_locale_dir(locale_dir, files) def process_locale_dir(self, locale_dir, files): """ Extract translatable literals from the specified files, creating or updating the POT file for a given locale directory. Uses the xgettext GNU gettext utility. """ build_files = [] for translatable in files: if self.verbosity > 1: self.stdout.write('processing file %s in %s\n' % ( translatable.file, translatable.dirpath )) if self.domain not in ('djangojs', 'django'): continue build_file = self.build_file_class(self, self.domain, translatable) try: build_file.preprocess() except UnicodeDecodeError as e: self.stdout.write( 'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % ( translatable.file, translatable.dirpath, e, ) ) continue build_files.append(build_file) if self.domain == 'djangojs': is_templatized = build_file.is_templatized args = [ 'xgettext', '-d', self.domain, '--language=%s' % ('C' if is_templatized else 'JavaScript',), '--keyword=gettext_noop', '--keyword=gettext_lazy', '--keyword=ngettext_lazy:1,2', '--keyword=pgettext:1c,2', '--keyword=npgettext:1c,2,3', '--output=-', ] elif self.domain == 'django': args = [ 'xgettext', '-d', self.domain, '--language=Python', '--keyword=gettext_noop', '--keyword=gettext_lazy', '--keyword=ngettext_lazy:1,2', '--keyword=ugettext_noop', '--keyword=ugettext_lazy', '--keyword=ungettext_lazy:1,2', '--keyword=pgettext:1c,2', '--keyword=npgettext:1c,2,3', '--keyword=pgettext_lazy:1c,2', '--keyword=npgettext_lazy:1c,2,3', '--output=-', ] else: return input_files = [bf.work_path for bf in build_files] with NamedTemporaryFile(mode='w+') as input_files_list: input_files_list.write('\n'.join(input_files)) input_files_list.flush() args.extend(['--files-from', input_files_list.name]) args.extend(self.xgettext_options) msgs, errors, status = popen_wrapper(args) if errors: if status != STATUS_OK: for build_file in build_files: build_file.cleanup() raise CommandError( 'errors happened while running xgettext on %s\n%s' % ('\n'.join(input_files), errors) ) elif self.verbosity > 0: # Print warnings self.stdout.write(errors) if msgs: if locale_dir is NO_LOCALE_DIR: file_path = os.path.normpath(build_files[0].path) raise CommandError( 'Unable to find a locale path to store translations for ' 'file %s' % file_path ) for build_file in build_files: msgs = build_file.postprocess_messages(msgs) potfile = os.path.join(locale_dir, '%s.pot' % str(self.domain)) write_pot_file(potfile, msgs) for build_file in build_files: build_file.cleanup() def write_po_file(self, potfile, locale): """ Creates or updates the PO file for self.domain and :param locale:. Uses contents of the existing :param potfile:. Uses msgmerge, and msgattrib GNU gettext utilities. """ basedir = os.path.join(os.path.dirname(potfile), locale, 'LC_MESSAGES') if not os.path.isdir(basedir): os.makedirs(basedir) pofile = os.path.join(basedir, '%s.po' % str(self.domain)) if os.path.exists(pofile): args = ['msgmerge'] + self.msgmerge_options + [pofile, potfile] msgs, errors, status = popen_wrapper(args) if errors: if status != STATUS_OK: raise CommandError( "errors happened while running msgmerge\n%s" % errors) elif self.verbosity > 0: self.stdout.write(errors) else: with io.open(potfile, 'r', encoding='utf-8') as fp: msgs = fp.read() if not self.invoked_for_django: msgs = self.copy_plural_forms(msgs, locale) msgs = msgs.replace( "#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % self.domain, "") with io.open(pofile, 'w', encoding='utf-8') as fp: fp.write(msgs) if self.no_obsolete: args = ['msgattrib'] + self.msgattrib_options + ['-o', pofile, pofile] msgs, errors, status = popen_wrapper(args) if errors: if status != STATUS_OK: raise CommandError( "errors happened while running msgattrib\n%s" % errors) elif self.verbosity > 0: self.stdout.write(errors) def copy_plural_forms(self, msgs, locale): """ Copies plural forms header contents from a Django catalog of locale to the msgs string, inserting it at the right place. msgs should be the contents of a newly created .po file. """ django_dir = os.path.normpath(os.path.join(os.path.dirname(upath(django.__file__)))) if self.domain == 'djangojs': domains = ('djangojs', 'django') else: domains = ('django',) for domain in domains: django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain) if os.path.exists(django_po): with io.open(django_po, 'r', encoding='utf-8') as fp: m = plural_forms_re.search(fp.read()) if m: plural_form_line = force_str(m.group('value')) if self.verbosity > 1: self.stdout.write("copying plural forms: %s\n" % plural_form_line) lines = [] found = False for line in msgs.split('\n'): if not found and (not line or plural_forms_re.search(line)): line = '%s\n' % plural_form_line found = True lines.append(line) msgs = '\n'.join(lines) break return msgs
chouseknecht/ansible
refs/heads/devel
lib/ansible/modules/network/f5/bigip_device_connectivity.py
38
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2017, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_device_connectivity short_description: Manages device IP configuration settings for HA on a BIG-IP description: - Manages device IP configuration settings for HA on a BIG-IP. Each BIG-IP device has synchronization and failover connectivity information (IP addresses) that you define as part of HA pairing or clustering. This module allows you to configure that information. version_added: 2.5 options: config_sync_ip: description: - Local IP address that the system uses for ConfigSync operations. type: str mirror_primary_address: description: - Specifies the primary IP address for the system to use to mirror connections. type: str mirror_secondary_address: description: - Specifies the secondary IP address for the system to use to mirror connections. type: str unicast_failover: description: - Desired addresses to use for failover operations. Options C(address) and C(port) are supported with dictionary structure where C(address) is the local IP address that the system uses for failover operations. Port specifies the port that the system uses for failover operations. If C(port) is not specified, the default value C(1026) will be used. If you are specifying the (recommended) management IP address, use 'management-ip' in the address field. type: list failover_multicast: description: - When C(yes), ensures that the Failover Multicast configuration is enabled and if no further multicast configuration is provided, ensures that C(multicast_interface), C(multicast_address) and C(multicast_port) are the defaults specified in each option's description. When C(no), ensures that Failover Multicast configuration is disabled. type: bool multicast_interface: description: - Interface over which the system sends multicast messages associated with failover. When C(failover_multicast) is C(yes) and this option is not provided, a default of C(eth0) will be used. type: str multicast_address: description: - IP address for the system to send multicast messages associated with failover. When C(failover_multicast) is C(yes) and this option is not provided, a default of C(224.0.0.245) will be used. type: str multicast_port: description: - Port for the system to send multicast messages associated with failover. When C(failover_multicast) is C(yes) and this option is not provided, a default of C(62960) will be used. This value must be between 0 and 65535. type: int cluster_mirroring: description: - Specifies whether mirroring occurs within the same cluster or between different clusters on a multi-bladed system. - This parameter is only supported on platforms that have multiple blades, such as Viprion hardware. It is not supported on VE. type: str choices: - between-clusters - within-cluster version_added: 2.7 notes: - This module is primarily used as a component of configuring HA pairs of BIG-IP devices. - Requires BIG-IP >= 12.0.0 extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) - Wojciech Wypior (@wojtek0806) ''' EXAMPLES = r''' - name: Configure device connectivity for standard HA pair bigip_device_connectivity: config_sync_ip: 10.1.30.1 mirror_primary_address: 10.1.30.1 unicast_failover: - address: management-ip - address: 10.1.30.1 provider: server: lb.mydomain.com user: admin password: secret delegate_to: localhost ''' RETURN = r''' changed: description: Denotes if the F5 configuration was updated. returned: always type: bool config_sync_ip: description: The new value of the C(config_sync_ip) setting. returned: changed type: str sample: 10.1.1.1 mirror_primary_address: description: The new value of the C(mirror_primary_address) setting. returned: changed type: str sample: 10.1.1.2 mirror_secondary_address: description: The new value of the C(mirror_secondary_address) setting. returned: changed type: str sample: 10.1.1.3 unicast_failover: description: The new value of the C(unicast_failover) setting. returned: changed type: list sample: [{'address': '10.1.1.2', 'port': 1026}] failover_multicast: description: Whether a failover multicast attribute has been changed or not. returned: changed type: bool multicast_interface: description: The new value of the C(multicast_interface) setting. returned: changed type: str sample: eth0 multicast_address: description: The new value of the C(multicast_address) setting. returned: changed type: str sample: 224.0.0.245 multicast_port: description: The new value of the C(multicast_port) setting. returned: changed type: int sample: 1026 cluster_mirroring: description: The current cluster-mirroring setting. returned: changed type: str sample: between-clusters ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import iteritems try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import transform_name from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.ipaddress import is_valid_ip except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import transform_name from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.ipaddress import is_valid_ip class Parameters(AnsibleF5Parameters): api_map = { 'unicastAddress': 'unicast_failover', 'configsyncIp': 'config_sync_ip', 'multicastInterface': 'multicast_interface', 'multicastIp': 'multicast_address', 'multicastPort': 'multicast_port', 'mirrorIp': 'mirror_primary_address', 'mirrorSecondaryIp': 'mirror_secondary_address', 'managementIp': 'management_ip', } api_attributes = [ 'configsyncIp', 'multicastInterface', 'multicastIp', 'multicastPort', 'mirrorIp', 'mirrorSecondaryIp', 'unicastAddress', ] returnables = [ 'config_sync_ip', 'multicast_interface', 'multicast_address', 'multicast_port', 'mirror_primary_address', 'mirror_secondary_address', 'failover_multicast', 'unicast_failover', 'cluster_mirroring', ] updatables = [ 'config_sync_ip', 'multicast_interface', 'multicast_address', 'multicast_port', 'mirror_primary_address', 'mirror_secondary_address', 'failover_multicast', 'unicast_failover', 'cluster_mirroring', ] @property def multicast_port(self): if self._values['multicast_port'] is None: return None result = int(self._values['multicast_port']) if result < 0 or result > 65535: raise F5ModuleError( "The specified 'multicast_port' must be between 0 and 65535." ) return result @property def multicast_address(self): if self._values['multicast_address'] is None: return None elif self._values['multicast_address'] in ["none", "any6", '']: return "any6" elif self._values['multicast_address'] == 'any': return 'any' result = self._get_validated_ip_address('multicast_address') return result @property def mirror_primary_address(self): if self._values['mirror_primary_address'] is None: return None elif self._values['mirror_primary_address'] in ["none", "any6", '']: return "any6" result = self._get_validated_ip_address('mirror_primary_address') return result @property def mirror_secondary_address(self): if self._values['mirror_secondary_address'] is None: return None elif self._values['mirror_secondary_address'] in ["none", "any6", '']: return "any6" result = self._get_validated_ip_address('mirror_secondary_address') return result @property def config_sync_ip(self): if self._values['config_sync_ip'] is None: return None elif self._values['config_sync_ip'] in ["none", '']: return "none" result = self._get_validated_ip_address('config_sync_ip') return result def _validate_unicast_failover_port(self, port): try: result = int(port) except ValueError: raise F5ModuleError( "The provided 'port' for unicast failover is not a valid number" ) except TypeError: result = 1026 return result def _validate_unicast_failover_address(self, address): if address != 'management-ip': if is_valid_ip(address): return address else: raise F5ModuleError( "'address' field in unicast failover is not a valid IP address" ) else: return address def _get_validated_ip_address(self, address): if is_valid_ip(self._values[address]): return self._values[address] raise F5ModuleError( "The specified '{0}' is not a valid IP address".format(address) ) class ApiParameters(Parameters): @property def cluster_mirroring(self): if self._values['cluster_mirroring'] is None: return None if self._values['cluster_mirroring'] == 'between': return 'between-clusters' return 'within-cluster' class ModuleParameters(Parameters): @property def unicast_failover(self): if self._values['unicast_failover'] is None: return None if self._values['unicast_failover'] == ['none']: return [] result = [] for item in self._values['unicast_failover']: address = item.get('address', None) port = item.get('port', None) address = self._validate_unicast_failover_address(address) port = self._validate_unicast_failover_port(port) result.append( dict( effectiveIp=address, effectivePort=port, ip=address, port=port ) ) if result: return result else: return None class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: change = getattr(self, returnable) if isinstance(change, dict): result.update(change) else: result[returnable] = change result = self._filter_params(result) except Exception: pass return result class ReportableChanges(Changes): returnables = [ 'config_sync_ip', 'multicast_interface', 'multicast_address', 'multicast_port', 'mirror_primary_address', 'mirror_secondary_address', 'failover_multicast', 'unicast_failover' ] @property def mirror_secondary_address(self): if self._values['mirror_secondary_address'] in ['none', 'any6']: return 'none' return self._values['mirror_secondary_address'] @property def mirror_primary_address(self): if self._values['mirror_primary_address'] in ['none', 'any6']: return 'none' return self._values['mirror_primary_address'] @property def multicast_address(self): if self._values['multicast_address'] in ['none', 'any6']: return 'none' return self._values['multicast_address'] class UsableChanges(Changes): @property def mirror_primary_address(self): if self._values['mirror_primary_address'] == ['any6', 'none', 'any']: return "any6" else: return self._values['mirror_primary_address'] @property def mirror_secondary_address(self): if self._values['mirror_secondary_address'] == ['any6', 'none', 'any']: return "any6" else: return self._values['mirror_secondary_address'] @property def multicast_address(self): if self._values['multicast_address'] == ['any6', 'none', 'any']: return "any" else: return self._values['multicast_address'] @property def unicast_failover(self): if self._values['unicast_failover'] is None: return None elif self._values['unicast_failover']: return self._values['unicast_failover'] return "none" @property def cluster_mirroring(self): if self._values['cluster_mirroring'] is None: return None elif self._values['cluster_mirroring'] == 'between-clusters': return 'between' return 'within' class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 def to_tuple(self, failovers): result = [] for x in failovers: for k, v in iteritems(x): # Have to do this in cases where the BIG-IP stores the word # "management-ip" when you specify the management IP address. # # Otherwise, a difference would be registered. if v == self.have.management_ip: v = 'management-ip' result += [(str(k), str(v))] return result @property def unicast_failover(self): if self.want.unicast_failover == [] and self.have.unicast_failover is None: return None if self.want.unicast_failover is None: return None if self.have.unicast_failover is None: return self.want.unicast_failover want = self.to_tuple(self.want.unicast_failover) have = self.to_tuple(self.have.unicast_failover) if set(want) == set(have): return None else: return self.want.unicast_failover @property def failover_multicast(self): values = ['multicast_address', 'multicast_interface', 'multicast_port'] if self.want.failover_multicast is False: if self.have.multicast_interface == 'eth0' and self.have.multicast_address == 'any' and self.have.multicast_port == 0: return None else: result = dict( failover_multicast=True, multicast_port=0, multicast_interface='eth0', multicast_address='any' ) return result else: if all(self.have._values[x] in [None, 'any6', 'any'] for x in values): return True class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = F5RestClient(**self.module.params) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def should_update(self): result = self._update_changed_options() if result: return True return False def exec_module(self): result = dict() changed = self.update() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) return result def update(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.update_on_device() if self.changes.cluster_mirroring: self.update_cluster_mirroring_on_device() return True def update_on_device(self): params = self.changes.api_params() if not params: return uri = "https://{0}:{1}/mgmt/tm/cm/device/".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) for item in response['items']: if item['selfDevice'] == 'true': uri = "https://{0}:{1}/mgmt/tm/cm/device/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(item['partition'], item['name']) ) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return raise F5ModuleError( "The host device was not found." ) def update_cluster_mirroring_on_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/db/{2}".format( self.client.provider['server'], self.client.provider['server_port'], 'statemirror.clustermirroring' ) payload = {"value": self.changes.cluster_mirroring} resp = self.client.api.patch(uri, json=payload) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def read_current_from_device(self): db = self.read_cluster_mirroring_from_device() uri = "https://{0}:{1}/mgmt/tm/cm/device/".format( self.client.provider['server'], self.client.provider['server_port'], ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) for item in response['items']: if item['selfDevice'] == 'true': uri = "https://{0}:{1}/mgmt/tm/cm/device/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(item['partition'], item['name']) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) if db: response['cluster_mirroring'] = db['value'] return ApiParameters(params=response) raise F5ModuleError( "The host device was not found." ) def read_cluster_mirroring_from_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/db/{2}".format( self.client.provider['server'], self.client.provider['server_port'], 'statemirror.clustermirroring' ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return response class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( multicast_port=dict( type='int' ), multicast_address=dict(), multicast_interface=dict(), failover_multicast=dict( type='bool' ), unicast_failover=dict( type='list' ), mirror_primary_address=dict(), mirror_secondary_address=dict(), config_sync_ip=dict(), cluster_mirroring=dict( choices=['within-cluster', 'between-clusters'] ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) self.required_together = [ ['multicast_address', 'multicast_interface', 'multicast_port'] ] def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, required_together=spec.required_together ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
jejimenez/django
refs/heads/master
tests/requests/tests.py
73
# -*- encoding: utf-8 -*- from __future__ import unicode_literals import time from datetime import datetime, timedelta from io import BytesIO from itertools import chain from django.core.exceptions import SuspiciousOperation from django.core.handlers.wsgi import LimitedStream, WSGIRequest from django.http import ( HttpRequest, HttpResponse, RawPostDataException, UnreadablePostError, parse_cookie, ) from django.test import RequestFactory, SimpleTestCase, override_settings from django.test.client import FakePayload from django.test.utils import str_prefix from django.utils import six from django.utils.encoding import force_str from django.utils.http import cookie_date, urlencode from django.utils.six.moves import http_cookies from django.utils.six.moves.urllib.parse import urlencode as original_urlencode from django.utils.timezone import utc class RequestsTests(SimpleTestCase): def test_httprequest(self): request = HttpRequest() self.assertEqual(list(request.GET.keys()), []) self.assertEqual(list(request.POST.keys()), []) self.assertEqual(list(request.COOKIES.keys()), []) self.assertEqual(list(request.META.keys()), []) # .GET and .POST should be QueryDicts self.assertEqual(request.GET.urlencode(), '') self.assertEqual(request.POST.urlencode(), '') # and FILES should be MultiValueDict self.assertEqual(request.FILES.getlist('foo'), []) def test_httprequest_full_path(self): request = HttpRequest() request.path = request.path_info = '/;some/?awful/=path/foo:bar/' request.META['QUERY_STRING'] = ';some=query&+query=string' expected = '/%3Bsome/%3Fawful/%3Dpath/foo:bar/?;some=query&+query=string' self.assertEqual(request.get_full_path(), expected) def test_httprequest_full_path_with_query_string_and_fragment(self): request = HttpRequest() request.path = request.path_info = '/foo#bar' request.META['QUERY_STRING'] = 'baz#quux' self.assertEqual(request.get_full_path(), '/foo%23bar?baz#quux') def test_httprequest_repr(self): request = HttpRequest() request.path = '/somepath/' request.method = 'GET' request.GET = {'get-key': 'get-value'} request.POST = {'post-key': 'post-value'} request.COOKIES = {'post-key': 'post-value'} request.META = {'post-key': 'post-value'} self.assertEqual(repr(request), str_prefix("<HttpRequest: GET '/somepath/'>")) def test_httprequest_repr_invalid_method_and_path(self): request = HttpRequest() self.assertEqual(repr(request), str_prefix("<HttpRequest>")) request = HttpRequest() request.method = "GET" self.assertEqual(repr(request), str_prefix("<HttpRequest>")) request = HttpRequest() request.path = "" self.assertEqual(repr(request), str_prefix("<HttpRequest>")) def test_wsgirequest(self): request = WSGIRequest({'PATH_INFO': 'bogus', 'REQUEST_METHOD': 'bogus', 'wsgi.input': BytesIO(b'')}) self.assertEqual(list(request.GET.keys()), []) self.assertEqual(list(request.POST.keys()), []) self.assertEqual(list(request.COOKIES.keys()), []) self.assertEqual(set(request.META.keys()), {'PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME', 'wsgi.input'}) self.assertEqual(request.META['PATH_INFO'], 'bogus') self.assertEqual(request.META['REQUEST_METHOD'], 'bogus') self.assertEqual(request.META['SCRIPT_NAME'], '') def test_wsgirequest_with_script_name(self): """ Ensure that the request's path is correctly assembled, regardless of whether or not the SCRIPT_NAME has a trailing slash. Refs #20169. """ # With trailing slash request = WSGIRequest({'PATH_INFO': '/somepath/', 'SCRIPT_NAME': '/PREFIX/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) self.assertEqual(request.path, '/PREFIX/somepath/') # Without trailing slash request = WSGIRequest({'PATH_INFO': '/somepath/', 'SCRIPT_NAME': '/PREFIX', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) self.assertEqual(request.path, '/PREFIX/somepath/') def test_wsgirequest_with_force_script_name(self): """ Ensure that the FORCE_SCRIPT_NAME setting takes precedence over the request's SCRIPT_NAME environment parameter. Refs #20169. """ with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'): request = WSGIRequest({'PATH_INFO': '/somepath/', 'SCRIPT_NAME': '/PREFIX/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) self.assertEqual(request.path, '/FORCED_PREFIX/somepath/') def test_wsgirequest_path_with_force_script_name_trailing_slash(self): """ Ensure that the request's path is correctly assembled, regardless of whether or not the FORCE_SCRIPT_NAME setting has a trailing slash. Refs #20169. """ # With trailing slash with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'): request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) self.assertEqual(request.path, '/FORCED_PREFIX/somepath/') # Without trailing slash with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX'): request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) self.assertEqual(request.path, '/FORCED_PREFIX/somepath/') def test_wsgirequest_repr(self): request = WSGIRequest({'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) self.assertEqual(repr(request), str_prefix("<WSGIRequest: GET '/'>")) request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) request.GET = {'get-key': 'get-value'} request.POST = {'post-key': 'post-value'} request.COOKIES = {'post-key': 'post-value'} request.META = {'post-key': 'post-value'} self.assertEqual(repr(request), str_prefix("<WSGIRequest: GET '/somepath/'>")) def test_wsgirequest_path_info(self): def wsgi_str(path_info): path_info = path_info.encode('utf-8') # Actual URL sent by the browser (bytestring) if six.PY3: path_info = path_info.decode('iso-8859-1') # Value in the WSGI environ dict (native string) return path_info # Regression for #19468 request = WSGIRequest({'PATH_INFO': wsgi_str("/سلام/"), 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')}) self.assertEqual(request.path, "/سلام/") def test_parse_cookie(self): self.assertEqual(parse_cookie('invalid@key=true'), {}) def test_httprequest_location(self): request = HttpRequest() self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"), 'https://www.example.com/asdf') request.get_host = lambda: 'www.example.com' request.path = '' self.assertEqual(request.build_absolute_uri(location="/path/with:colons"), 'http://www.example.com/path/with:colons') def test_near_expiration(self): "Cookie will expire when an near expiration time is provided" response = HttpResponse() # There is a timing weakness in this test; The # expected result for max-age requires that there be # a very slight difference between the evaluated expiration # time, and the time evaluated in set_cookie(). If this # difference doesn't exist, the cookie time will be # 1 second larger. To avoid the problem, put in a quick sleep, # which guarantees that there will be a time difference. expires = datetime.utcnow() + timedelta(seconds=10) time.sleep(0.001) response.set_cookie('datetime', expires=expires) datetime_cookie = response.cookies['datetime'] self.assertEqual(datetime_cookie['max-age'], 10) def test_aware_expiration(self): "Cookie accepts an aware datetime as expiration time" response = HttpResponse() expires = (datetime.utcnow() + timedelta(seconds=10)).replace(tzinfo=utc) time.sleep(0.001) response.set_cookie('datetime', expires=expires) datetime_cookie = response.cookies['datetime'] self.assertEqual(datetime_cookie['max-age'], 10) def test_far_expiration(self): "Cookie will expire when an distant expiration time is provided" response = HttpResponse() response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6)) datetime_cookie = response.cookies['datetime'] self.assertIn( datetime_cookie['expires'], # Slight time dependency; refs #23450 ('Sat, 01-Jan-2028 04:05:06 GMT', 'Sat, 01-Jan-2028 04:05:07 GMT') ) def test_max_age_expiration(self): "Cookie will expire if max_age is provided" response = HttpResponse() response.set_cookie('max_age', max_age=10) max_age_cookie = response.cookies['max_age'] self.assertEqual(max_age_cookie['max-age'], 10) self.assertEqual(max_age_cookie['expires'], cookie_date(time.time() + 10)) def test_httponly_cookie(self): response = HttpResponse() response.set_cookie('example', httponly=True) example_cookie = response.cookies['example'] # A compat cookie may be in use -- check that it has worked # both as an output string, and using the cookie attributes self.assertIn('; %s' % http_cookies.Morsel._reserved['httponly'], str(example_cookie)) self.assertTrue(example_cookie['httponly']) def test_unicode_cookie(self): "Verify HttpResponse.set_cookie() works with unicode data." response = HttpResponse() cookie_value = '清風' response.set_cookie('test', cookie_value) self.assertEqual(force_str(cookie_value), response.cookies['test'].value) def test_limited_stream(self): # Read all of a limited stream stream = LimitedStream(BytesIO(b'test'), 2) self.assertEqual(stream.read(), b'te') # Reading again returns nothing. self.assertEqual(stream.read(), b'') # Read a number of characters greater than the stream has to offer stream = LimitedStream(BytesIO(b'test'), 2) self.assertEqual(stream.read(5), b'te') # Reading again returns nothing. self.assertEqual(stream.readline(5), b'') # Read sequentially from a stream stream = LimitedStream(BytesIO(b'12345678'), 8) self.assertEqual(stream.read(5), b'12345') self.assertEqual(stream.read(5), b'678') # Reading again returns nothing. self.assertEqual(stream.readline(5), b'') # Read lines from a stream stream = LimitedStream(BytesIO(b'1234\n5678\nabcd\nefgh\nijkl'), 24) # Read a full line, unconditionally self.assertEqual(stream.readline(), b'1234\n') # Read a number of characters less than a line self.assertEqual(stream.readline(2), b'56') # Read the rest of the partial line self.assertEqual(stream.readline(), b'78\n') # Read a full line, with a character limit greater than the line length self.assertEqual(stream.readline(6), b'abcd\n') # Read the next line, deliberately terminated at the line end self.assertEqual(stream.readline(4), b'efgh') # Read the next line... just the line end self.assertEqual(stream.readline(), b'\n') # Read everything else. self.assertEqual(stream.readline(), b'ijkl') # Regression for #15018 # If a stream contains a newline, but the provided length # is less than the number of provided characters, the newline # doesn't reset the available character count stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9) self.assertEqual(stream.readline(10), b'1234\n') self.assertEqual(stream.readline(3), b'abc') # Now expire the available characters self.assertEqual(stream.readline(3), b'd') # Reading again returns nothing. self.assertEqual(stream.readline(2), b'') # Same test, but with read, not readline. stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9) self.assertEqual(stream.read(6), b'1234\na') self.assertEqual(stream.read(2), b'bc') self.assertEqual(stream.read(2), b'd') self.assertEqual(stream.read(2), b'') self.assertEqual(stream.read(), b'') def test_stream(self): payload = FakePayload('name=value') request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) self.assertEqual(request.read(), b'name=value') def test_read_after_value(self): """ Reading from request is allowed after accessing request contents as POST or body. """ payload = FakePayload('name=value') request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) self.assertEqual(request.POST, {'name': ['value']}) self.assertEqual(request.body, b'name=value') self.assertEqual(request.read(), b'name=value') def test_value_after_read(self): """ Construction of POST or body is not allowed after reading from request. """ payload = FakePayload('name=value') request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) self.assertEqual(request.read(2), b'na') self.assertRaises(RawPostDataException, lambda: request.body) self.assertEqual(request.POST, {}) def test_non_ascii_POST(self): payload = FakePayload(urlencode({'key': 'España'})) request = WSGIRequest({ 'REQUEST_METHOD': 'POST', 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'wsgi.input': payload, }) self.assertEqual(request.POST, {'key': ['España']}) def test_alternate_charset_POST(self): """ Test a POST with non-utf-8 payload encoding. """ payload = FakePayload(original_urlencode({'key': 'España'.encode('latin-1')})) request = WSGIRequest({ 'REQUEST_METHOD': 'POST', 'CONTENT_LENGTH': len(payload), 'CONTENT_TYPE': 'application/x-www-form-urlencoded; charset=iso-8859-1', 'wsgi.input': payload, }) self.assertEqual(request.POST, {'key': ['España']}) def test_body_after_POST_multipart_form_data(self): """ Reading body after parsing multipart/form-data is not allowed """ # Because multipart is used for large amounts of data i.e. file uploads, # we don't want the data held in memory twice, and we don't want to # silence the error by setting body = '' either. payload = FakePayload("\r\n".join([ '--boundary', 'Content-Disposition: form-data; name="name"', '', 'value', '--boundary--' ''])) request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/form-data; boundary=boundary', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) self.assertEqual(request.POST, {'name': ['value']}) self.assertRaises(RawPostDataException, lambda: request.body) def test_body_after_POST_multipart_related(self): """ Reading body after parsing multipart that isn't form-data is allowed """ # Ticket #9054 # There are cases in which the multipart data is related instead of # being a binary upload, in which case it should still be accessible # via body. payload_data = b"\r\n".join([ b'--boundary', b'Content-ID: id; name="name"', b'', b'value', b'--boundary--' b'']) payload = FakePayload(payload_data) request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/related; boundary=boundary', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) self.assertEqual(request.POST, {}) self.assertEqual(request.body, payload_data) def test_POST_multipart_with_content_length_zero(self): """ Multipart POST requests with Content-Length >= 0 are valid and need to be handled. """ # According to: # http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 # Every request.POST with Content-Length >= 0 is a valid request, # this test ensures that we handle Content-Length == 0. payload = FakePayload("\r\n".join([ '--boundary', 'Content-Disposition: form-data; name="name"', '', 'value', '--boundary--' ''])) request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/form-data; boundary=boundary', 'CONTENT_LENGTH': 0, 'wsgi.input': payload}) self.assertEqual(request.POST, {}) def test_POST_binary_only(self): payload = b'\r\n\x01\x00\x00\x00ab\x00\x00\xcd\xcc,@' environ = {'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/octet-stream', 'CONTENT_LENGTH': len(payload), 'wsgi.input': BytesIO(payload)} request = WSGIRequest(environ) self.assertEqual(request.POST, {}) self.assertEqual(request.FILES, {}) self.assertEqual(request.body, payload) # Same test without specifying content-type environ.update({'CONTENT_TYPE': '', 'wsgi.input': BytesIO(payload)}) request = WSGIRequest(environ) self.assertEqual(request.POST, {}) self.assertEqual(request.FILES, {}) self.assertEqual(request.body, payload) def test_read_by_lines(self): payload = FakePayload('name=value') request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) self.assertEqual(list(request), [b'name=value']) def test_POST_after_body_read(self): """ POST should be populated even if body is read first """ payload = FakePayload('name=value') request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) request.body # evaluate self.assertEqual(request.POST, {'name': ['value']}) def test_POST_after_body_read_and_stream_read(self): """ POST should be populated even if body is read first, and then the stream is read second. """ payload = FakePayload('name=value') request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) request.body # evaluate self.assertEqual(request.read(1), b'n') self.assertEqual(request.POST, {'name': ['value']}) def test_POST_after_body_read_and_stream_read_multipart(self): """ POST should be populated even if body is read first, and then the stream is read second. Using multipart/form-data instead of urlencoded. """ payload = FakePayload("\r\n".join([ '--boundary', 'Content-Disposition: form-data; name="name"', '', 'value', '--boundary--' ''])) request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/form-data; boundary=boundary', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload}) request.body # evaluate # Consume enough data to mess up the parsing: self.assertEqual(request.read(13), b'--boundary\r\nC') self.assertEqual(request.POST, {'name': ['value']}) def test_POST_connection_error(self): """ If wsgi.input.read() raises an exception while trying to read() the POST, the exception should be identifiable (not a generic IOError). """ class ExplodingBytesIO(BytesIO): def read(self, len=0): raise IOError("kaboom!") payload = b'name=value' request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'application/x-www-form-urlencoded', 'CONTENT_LENGTH': len(payload), 'wsgi.input': ExplodingBytesIO(payload)}) with self.assertRaises(UnreadablePostError): request.body def test_FILES_connection_error(self): """ If wsgi.input.read() raises an exception while trying to read() the FILES, the exception should be identifiable (not a generic IOError). """ class ExplodingBytesIO(BytesIO): def read(self, len=0): raise IOError("kaboom!") payload = b'x' request = WSGIRequest({'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/form-data; boundary=foo_', 'CONTENT_LENGTH': len(payload), 'wsgi.input': ExplodingBytesIO(payload)}) with self.assertRaises(UnreadablePostError): request.FILES class HostValidationTests(SimpleTestCase): poisoned_hosts = [ 'example.com@evil.tld', 'example.com:dr.frankenstein@evil.tld', 'example.com:dr.frankenstein@evil.tld:80', 'example.com:80/badpath', 'example.com: recovermypassword.com', ] @override_settings( USE_X_FORWARDED_HOST=False, ALLOWED_HOSTS=[ 'forward.com', 'example.com', 'internal.com', '12.34.56.78', '[2001:19f0:feee::dead:beef:cafe]', 'xn--4ca9at.com', '.multitenant.com', 'INSENSITIVE.com', ]) def test_http_get_host(self): # Check if X_FORWARDED_HOST is provided. request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_HOST': 'forward.com', 'HTTP_HOST': 'example.com', 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 80, } # X_FORWARDED_HOST is ignored. self.assertEqual(request.get_host(), 'example.com') # Check if X_FORWARDED_HOST isn't provided. request = HttpRequest() request.META = { 'HTTP_HOST': 'example.com', 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 80, } self.assertEqual(request.get_host(), 'example.com') # Check if HTTP_HOST isn't provided. request = HttpRequest() request.META = { 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 80, } self.assertEqual(request.get_host(), 'internal.com') # Check if HTTP_HOST isn't provided, and we're on a nonstandard port request = HttpRequest() request.META = { 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 8042, } self.assertEqual(request.get_host(), 'internal.com:8042') legit_hosts = [ 'example.com', 'example.com:80', '12.34.56.78', '12.34.56.78:443', '[2001:19f0:feee::dead:beef:cafe]', '[2001:19f0:feee::dead:beef:cafe]:8080', 'xn--4ca9at.com', # Punnycode for öäü.com 'anything.multitenant.com', 'multitenant.com', 'insensitive.com', 'example.com.', 'example.com.:80', ] for host in legit_hosts: request = HttpRequest() request.META = { 'HTTP_HOST': host, } request.get_host() # Poisoned host headers are rejected as suspicious for host in chain(self.poisoned_hosts, ['other.com', 'example.com..']): with self.assertRaises(SuspiciousOperation): request = HttpRequest() request.META = { 'HTTP_HOST': host, } request.get_host() @override_settings(USE_X_FORWARDED_HOST=True, ALLOWED_HOSTS=['*']) def test_http_get_host_with_x_forwarded_host(self): # Check if X_FORWARDED_HOST is provided. request = HttpRequest() request.META = { 'HTTP_X_FORWARDED_HOST': 'forward.com', 'HTTP_HOST': 'example.com', 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 80, } # X_FORWARDED_HOST is obeyed. self.assertEqual(request.get_host(), 'forward.com') # Check if X_FORWARDED_HOST isn't provided. request = HttpRequest() request.META = { 'HTTP_HOST': 'example.com', 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 80, } self.assertEqual(request.get_host(), 'example.com') # Check if HTTP_HOST isn't provided. request = HttpRequest() request.META = { 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 80, } self.assertEqual(request.get_host(), 'internal.com') # Check if HTTP_HOST isn't provided, and we're on a nonstandard port request = HttpRequest() request.META = { 'SERVER_NAME': 'internal.com', 'SERVER_PORT': 8042, } self.assertEqual(request.get_host(), 'internal.com:8042') # Poisoned host headers are rejected as suspicious legit_hosts = [ 'example.com', 'example.com:80', '12.34.56.78', '12.34.56.78:443', '[2001:19f0:feee::dead:beef:cafe]', '[2001:19f0:feee::dead:beef:cafe]:8080', 'xn--4ca9at.com', # Punnycode for öäü.com ] for host in legit_hosts: request = HttpRequest() request.META = { 'HTTP_HOST': host, } request.get_host() for host in self.poisoned_hosts: with self.assertRaises(SuspiciousOperation): request = HttpRequest() request.META = { 'HTTP_HOST': host, } request.get_host() @override_settings(USE_X_FORWARDED_PORT=False) def test_get_port(self): request = HttpRequest() request.META = { 'SERVER_PORT': '8080', 'HTTP_X_FORWARDED_PORT': '80', } # Shouldn't use the X-Forwarded-Port header self.assertEqual(request.get_port(), '8080') request = HttpRequest() request.META = { 'SERVER_PORT': '8080', } self.assertEqual(request.get_port(), '8080') @override_settings(USE_X_FORWARDED_PORT=True) def test_get_port_with_x_forwarded_port(self): request = HttpRequest() request.META = { 'SERVER_PORT': '8080', 'HTTP_X_FORWARDED_PORT': '80', } # Should use the X-Forwarded-Port header self.assertEqual(request.get_port(), '80') request = HttpRequest() request.META = { 'SERVER_PORT': '8080', } self.assertEqual(request.get_port(), '8080') @override_settings(DEBUG=True, ALLOWED_HOSTS=[]) def test_host_validation_disabled_in_debug_mode(self): """If ALLOWED_HOSTS is empty and DEBUG is True, all hosts pass.""" request = HttpRequest() request.META = { 'HTTP_HOST': 'example.com', } self.assertEqual(request.get_host(), 'example.com') # Invalid hostnames would normally raise a SuspiciousOperation, # but we have DEBUG=True, so this check is disabled. request = HttpRequest() request.META = { 'HTTP_HOST': "invalid_hostname.com", } self.assertEqual(request.get_host(), "invalid_hostname.com") @override_settings(ALLOWED_HOSTS=[]) def test_get_host_suggestion_of_allowed_host(self): """get_host() makes helpful suggestions if a valid-looking host is not in ALLOWED_HOSTS.""" msg_invalid_host = "Invalid HTTP_HOST header: %r." msg_suggestion = msg_invalid_host + " You may need to add %r to ALLOWED_HOSTS." msg_suggestion2 = msg_invalid_host + " The domain name provided is not valid according to RFC 1034/1035" for host in [ # Valid-looking hosts 'example.com', '12.34.56.78', '[2001:19f0:feee::dead:beef:cafe]', 'xn--4ca9at.com', # Punnycode for öäü.com ]: request = HttpRequest() request.META = {'HTTP_HOST': host} self.assertRaisesMessage( SuspiciousOperation, msg_suggestion % (host, host), request.get_host ) for domain, port in [ # Valid-looking hosts with a port number ('example.com', 80), ('12.34.56.78', 443), ('[2001:19f0:feee::dead:beef:cafe]', 8080), ]: host = '%s:%s' % (domain, port) request = HttpRequest() request.META = {'HTTP_HOST': host} self.assertRaisesMessage( SuspiciousOperation, msg_suggestion % (host, domain), request.get_host ) for host in self.poisoned_hosts: request = HttpRequest() request.META = {'HTTP_HOST': host} self.assertRaisesMessage( SuspiciousOperation, msg_invalid_host % host, request.get_host ) request = HttpRequest() request.META = {'HTTP_HOST': "invalid_hostname.com"} self.assertRaisesMessage( SuspiciousOperation, msg_suggestion2 % "invalid_hostname.com", request.get_host ) class BuildAbsoluteURITestCase(SimpleTestCase): """ Regression tests for ticket #18314. """ def setUp(self): self.factory = RequestFactory() def test_build_absolute_uri_no_location(self): """ Ensures that ``request.build_absolute_uri()`` returns the proper value when the ``location`` argument is not provided, and ``request.path`` begins with //. """ # //// is needed to create a request with a path beginning with // request = self.factory.get('////absolute-uri') self.assertEqual( request.build_absolute_uri(), 'http://testserver//absolute-uri' ) def test_build_absolute_uri_absolute_location(self): """ Ensures that ``request.build_absolute_uri()`` returns the proper value when an absolute URL ``location`` argument is provided, and ``request.path`` begins with //. """ # //// is needed to create a request with a path beginning with // request = self.factory.get('////absolute-uri') self.assertEqual( request.build_absolute_uri(location='http://example.com/?foo=bar'), 'http://example.com/?foo=bar' ) def test_build_absolute_uri_schema_relative_location(self): """ Ensures that ``request.build_absolute_uri()`` returns the proper value when a schema-relative URL ``location`` argument is provided, and ``request.path`` begins with //. """ # //// is needed to create a request with a path beginning with // request = self.factory.get('////absolute-uri') self.assertEqual( request.build_absolute_uri(location='//example.com/?foo=bar'), 'http://example.com/?foo=bar' ) def test_build_absolute_uri_relative_location(self): """ Ensures that ``request.build_absolute_uri()`` returns the proper value when a relative URL ``location`` argument is provided, and ``request.path`` begins with //. """ # //// is needed to create a request with a path beginning with // request = self.factory.get('////absolute-uri') self.assertEqual( request.build_absolute_uri(location='/foo/bar/'), 'http://testserver/foo/bar/' )
vollib/py_vollib
refs/heads/master
py_vollib/helpers/constants.py
1
# -*- coding: utf-8 -*- """ py_vollib.helpers.constants ~~~~~~~~~~~~~~~~~~~~~~~~~~~ A library for option pricing, implied volatility, and greek calculation. py_vollib is based on lets_be_rational, a Python wrapper for LetsBeRational by Peter Jaeckel as described below. :copyright: © 2017 Gammon Capital LLC :license: MIT, see LICENSE for more details. About LetsBeRational: ~~~~~~~~~~~~~~~~~~~~~ The source code of LetsBeRational resides at www.jaeckel.org/LetsBeRational.7z . :: ======================================================================================== Copyright © 2013-2014 Peter Jäckel. Permission to use, copy, modify, and distribute this software is freely granted, provided that this notice is preserved. WARRANTY DISCLAIMER The Software is provided "as is" without warranty of any kind, either express or implied, including without limitation any implied warranties of condition, uninterrupted use, merchantability, fitness for a particular purpose, or non-infringement. ======================================================================================== """ # ----------------------------------------------------------------------------- # IMPORTS # Standard library imports import sys # Related third party imports # Local application/library specific imports FLOAT_MAX = sys.float_info.max MINUS_FLOAT_MAX = - FLOAT_MAX if __name__ == "__main__": from py_vollib.helpers.doctest_helper import run_doctest run_doctest()
darkforestzero/buck
refs/heads/master
third-party/py/unittest2/setup.py
72
#! /usr/bin/env python # setup.py # Install script for unittest2 # Copyright (C) 2010 Michael Foord # E-mail: fuzzyman AT voidspace DOT org DOT uk # This software is licensed under the terms of the BSD license. # http://www.voidspace.org.uk/python/license.shtml import os import sys from unittest2 import __version__ as VERSION NAME = 'unittest2' PACKAGES = ['unittest2', 'unittest2.test'] SCRIPTS = ['unit2.py', 'unit2'] DESCRIPTION = ('The new features in unittest for Python 2.7 backported to ' 'Python 2.3+.') URL = 'http://pypi.python.org/pypi/unittest2' readme = os.path.join(os.path.dirname(__file__), 'README.txt') LONG_DESCRIPTION = open(readme).read() CLASSIFIERS = [ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.3', 'Programming Language :: Python :: 2.4', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Software Development :: Testing', ] AUTHOR = 'Michael Foord' AUTHOR_EMAIL = 'michael@voidspace.org.uk' KEYWORDS = "unittest testing tests".split(' ') params = dict( name=NAME, version=VERSION, description=DESCRIPTION, long_description=LONG_DESCRIPTION, packages=PACKAGES, scripts=SCRIPTS, author=AUTHOR, author_email=AUTHOR_EMAIL, url=URL, classifiers=CLASSIFIERS, keywords=KEYWORDS ) py_version = sys.version[:3] SCRIPT1 = 'unit2' SCRIPT2 = 'unit2-%s' % (py_version,) try: from setuptools import setup except ImportError: from distutils.core import setup else: params['entry_points'] = { 'console_scripts': [ '%s = unittest2:main_' % SCRIPT1, '%s = unittest2:main_' % SCRIPT2, ], } params['test_suite'] = 'unittest2.collector' setup(**params)
Mlieou/oj_solutions
refs/heads/master
leetcode/python/ex_656.py
3
class Solution(object): def cheapestJump(self, A, B): """ :type A: List[int] :type B: int :rtype: List[int] """ n = len(A) next = [-1] * n dp = [0] * n for i in range(n-2, -1, -1): min_cost = sys.maxsize for j in range(i+1, n): if j > i+B: break if A[j] >= 0: cost = A[i] + dp[j] if cost < min_cost: min_cost = cost next[i] = j dp[i] = min_cost i = 0 res = [] while i < n and next[i] > 0: res.append(i+1) i = next[i] if i == n - 1 and A[i] >= 0: res.append(n) else: res = [] return res
willprice/weboob
refs/heads/master
modules/dailymotion/__init__.py
9
from .module import DailymotionModule __all__ = ['DailymotionModule']
StephenWeber/ansible
refs/heads/devel
lib/ansible/modules/cloud/docker/docker_image.py
13
#!/usr/bin/python # # Copyright 2016 Red Hat | Ansible # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'committer', 'version': '1.0'} DOCUMENTATION = ''' --- module: docker_image short_description: Manage docker images. version_added: "1.5" description: - Build, load or pull an image, making the image available for creating containers. Also supports tagging an image into a repository and archiving an image to a .tar file. options: archive_path: description: - Use with state C(present) to archive an image to a .tar file. required: false version_added: "2.1" load_path: description: - Use with state C(present) to load an image from a .tar file. required: false version_added: "2.2" dockerfile: description: - Use with state C(present) to provide an alternate name for the Dockerfile to use when building an image. default: Dockerfile required: false version_added: "2.0" force: description: - Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state C(present) to build, load or pull an image when the image already exists. default: false required: false version_added: "2.1" http_timeout: description: - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of seconds. required: false version_added: "2.1" name: description: - "Image name. Name format will be one of: name, repository/name, registry_server:port/name. When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'." required: true path: description: - Use with state 'present' to build an image. Will be the path to a directory containing the context and Dockerfile for building an image. aliases: - build_path required: false pull: description: - When building an image downloads any updates to the FROM image in Dockerfile. default: true required: false version_added: "2.1" push: description: - Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter. default: false required: false version_added: "2.2" rm: description: - Remove intermediate containers after build. default: true required: false version_added: "2.1" nocache: description: - Do not use cache when building an image. default: false required: false repository: description: - Full path to a repository. Use with state C(present) to tag the image into the repository. Expects format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest). required: false version_added: "2.1" state: description: - Make assertions about the state of an image. - When C(absent) an image will be removed. Use the force option to un-tag and remove all images matching the provided name. - When C(present) check if an image exists using the provided name and tag. If the image is not found or the force option is used, the image will either be pulled, built or loaded. By default the image will be pulled from Docker Hub. To build the image, provide a path value set to a directory containing a context and Dockerfile. To load an image, specify load_path to provide a path to an archive file. To tag an image to a repository, provide a repository path. If the name contains a repository path, it will be pushed. - "NOTE: C(build) is DEPRECATED and will be removed in release 2.3. Specifying C(build) will behave the same as C(present)." required: false default: present choices: - absent - present - build tag: description: - Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to I(latest). - If C(name) parameter format is I(name:tag), then tag value from C(name) will take precedence. default: latest required: false buildargs: description: - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive. - Docker expects the value to be a string. For convenience any non-string values will be converted to strings. - Requires Docker API >= 1.21 and docker-py >= 1.7.0. type: complex required: false version_added: "2.2" container_limits: description: - A dictionary of limits applied to each container created by the build process. required: false version_added: "2.1" type: complex contains: memory: description: Set memory limit for build type: int memswap: description: Total memory (memory + swap), -1 to disable swap type: int cpushares: description: CPU shares (relative weight) type: int cpusetcpus: description: CPUs in which to allow execution, e.g., "0-3", "0,1" type: str use_tls: description: - "DEPRECATED. Whether to use tls to connect to the docker server. Set to C(no) when TLS will not be used. Set to C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that the server's certificate is valid for the server. NOTE: If you specify this option, it will set the value of the tls or tls_verify parameters." choices: - no - encrypt - verify default: no required: false version_added: "2.0" extends_documentation_fragment: - docker requirements: - "python >= 2.6" - "docker-py >= 1.7.0" - "Docker API >= 1.20" authors: - Pavel Antonov (@softzilla) - Chris Houseknecht (@chouseknecht) - James Tanner (@jctanner) ''' EXAMPLES = ''' - name: pull an image docker_image: name: pacur/centos-7 - name: Tag and push to docker hub docker_image: name: pacur/centos-7 repository: dcoppenhagan/myimage tag: 7.0 push: yes - name: Tag and push to local registry docker_image: name: centos repository: localhost:5000/centos tag: 7 push: yes - name: Remove image docker_image: state: absent name: registry.ansible.com/chouseknecht/sinatra tag: v1 - name: Build an image and push it to a private repo docker_image: path: ./sinatra name: registry.ansible.com/chouseknecht/sinatra tag: v1 push: yes - name: Archive image docker_image: name: registry.ansible.com/chouseknecht/sinatra tag: v1 archive_path: my_sinatra.tar - name: Load image from archive and push to a private registry docker_image: name: localhost:5000/myimages/sinatra tag: v1 push: yes load_path: my_sinatra.tar - name: Build image and with buildargs docker_image: path: /path/to/build/dir name: myimage buildargs: log_volume: /var/log/myapp listen_port: 8080 ''' RETURN = ''' image: description: Image inspection results for the affected image. returned: success type: complex sample: {} ''' from ansible.module_utils.docker_common import * try: if HAS_DOCKER_PY_2: from docker.auth import resolve_repository_name else: from docker.auth.auth import resolve_repository_name from docker.utils.utils import parse_repository_tag except ImportError: # missing docker-py handled in docker_common pass class ImageManager(DockerBaseClass): def __init__(self, client, results): super(ImageManager, self).__init__() self.client = client self.results = results parameters = self.client.module.params self.check_mode = self.client.check_mode self.archive_path = parameters.get('archive_path') self.container_limits = parameters.get('container_limits') self.dockerfile = parameters.get('dockerfile') self.force = parameters.get('force') self.load_path = parameters.get('load_path') self.name = parameters.get('name') self.nocache = parameters.get('nocache') self.path = parameters.get('path') self.pull = parameters.get('pull') self.repository = parameters.get('repository') self.rm = parameters.get('rm') self.state = parameters.get('state') self.tag = parameters.get('tag') self.http_timeout = parameters.get('http_timeout') self.push = parameters.get('push') self.buildargs = parameters.get('buildargs') # If name contains a tag, it takes precedence over tag parameter. repo, repo_tag = parse_repository_tag(self.name) if repo_tag: self.name = repo self.tag = repo_tag if self.state in ['present', 'build']: self.present() elif self.state == 'absent': self.absent() def fail(self, msg): self.client.fail(msg) def present(self): ''' Handles state = 'present', which includes building, loading or pulling an image, depending on user provided parameters. :returns None ''' image = self.client.find_image(name=self.name, tag=self.tag) if not image or self.force: if self.path: # Build the image if not os.path.isdir(self.path): self.fail("Requested build path %s could not be found or you do not have access." % self.path) image_name = self.name if self.tag: image_name = "%s:%s" % (self.name, self.tag) self.log("Building image %s" % image_name) self.results['actions'].append("Built image %s from %s" % (image_name, self.path)) self.results['changed'] = True if not self.check_mode: self.results['image'] = self.build_image() elif self.load_path: # Load the image from an archive if not os.path.isfile(self.load_path): self.fail("Error loading image %s. Specified path %s does not exist." % (self.name, self.load_path)) image_name = self.name if self.tag: image_name = "%s:%s" % (self.name, self.tag) self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path)) self.results['changed'] = True if not self.check_mode: self.results['image'] = self.load_image() else: # pull the image self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag)) self.results['changed'] = True if not self.check_mode: self.results['image'] = self.client.pull_image(self.name, tag=self.tag) if self.archive_path: self.archive_image(self.name, self.tag) if self.push and not self.repository: self.push_image(self.name, self.tag) elif self.repository: self.tag_image(self.name, self.tag, self.repository, force=self.force, push=self.push) def absent(self): ''' Handles state = 'absent', which removes an image. :return None ''' image = self.client.find_image(self.name, self.tag) if image: name = self.name if self.tag: name = "%s:%s" % (self.name, self.tag) if not self.check_mode: try: self.client.remove_image(name, force=self.force) except Exception as exc: self.fail("Error removing image %s - %s" % (name, str(exc))) self.results['changed'] = True self.results['actions'].append("Removed image %s" % (name)) self.results['image']['state'] = 'Deleted' def archive_image(self, name, tag): ''' Archive an image to a .tar file. Called when archive_path is passed. :param name - name of the image. Type: str :return None ''' if not tag: tag = "latest" image = self.client.find_image(name=name, tag=tag) if not image: self.log("archive image: image %s:%s not found" % (name, tag)) return image_name = "%s:%s" % (name, tag) self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path)) self.results['changed'] = True if not self.check_mode: self.log("Getting archive of image %s" % image_name) try: image = self.client.get_image(image_name) except Exception as exc: self.fail("Error getting image %s - %s" % (image_name, str(exc))) try: image_tar = open(self.archive_path, 'w') image_tar.write(image.data) image_tar.close() except Exception as exc: self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc))) image = self.client.find_image(name=name, tag=tag) if image: self.results['image'] = image def push_image(self, name, tag=None): ''' If the name of the image contains a repository path, then push the image. :param name Name of the image to push. :param tag Use a specific tag. :return: None ''' repository = name if not tag: repository, tag = parse_repository_tag(name) registry, repo_name = resolve_repository_name(repository) self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag)) if registry: self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag)) self.results['changed'] = True if not self.check_mode: status = None try: for line in self.client.push(repository, tag=tag, stream=True, decode=True): self.log(line, pretty_print=True) if line.get('errorDetail'): raise Exception(line['errorDetail']['message']) status = line.get('status') except Exception as exc: if re.search('unauthorized', str(exc)): if re.search('authentication required', str(exc)): self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." % (registry, repo_name, tag, str(exc), registry)) else: self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" % (registry, repo_name, tag, str(exc))) self.fail("Error pushing image %s: %s" % (repository, str(exc))) self.results['image'] = self.client.find_image(name=repository, tag=tag) if not self.results['image']: self.results['image'] = dict() self.results['image']['push_status'] = status def tag_image(self, name, tag, repository, force=False, push=False): ''' Tag an image into a repository. :param name: name of the image. required. :param tag: image tag. :param repository: path to the repository. required. :param force: bool. force tagging, even it image already exists with the repository path. :param push: bool. push the image once it's tagged. :return: None ''' repo, repo_tag = parse_repository_tag(repository) if not repo_tag: repo_tag = "latest" if tag: repo_tag = tag image = self.client.find_image(name=repo, tag=repo_tag) found = 'found' if image else 'not found' self.log("image %s was %s" % (repo, found)) if not image or force: self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag)) self.results['changed'] = True self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag)) if not self.check_mode: try: # Finding the image does not always work, especially running a localhost registry. In those # cases, if we don't set force=True, it errors. image_name = name if tag and not re.search(tag, name): image_name = "%s:%s" % (name, tag) tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True) if not tag_status: raise Exception("Tag operation failed.") except Exception as exc: self.fail("Error: failed to tag image - %s" % str(exc)) self.results['image'] = self.client.find_image(name=repo, tag=repo_tag) if push: self.push_image(repo, repo_tag) def build_image(self): ''' Build an image :return: image dict ''' params = dict( path=self.path, tag=self.name, rm=self.rm, nocache=self.nocache, stream=True, timeout=self.http_timeout, pull=self.pull, forcerm=self.rm, dockerfile=self.dockerfile, decode=True ) build_output = [] if self.tag: params['tag'] = "%s:%s" % (self.name, self.tag) if self.container_limits: params['container_limits'] = self.container_limits if self.buildargs: for key, value in self.buildargs.items(): if not isinstance(value, basestring): self.buildargs[key] = str(value) params['buildargs'] = self.buildargs for line in self.client.build(**params): # line = json.loads(line) self.log(line, pretty_print=True) if "stream" in line: build_output.append(line["stream"]) if line.get('error'): if line.get('errorDetail'): errorDetail = line.get('errorDetail') self.fail( "Error building %s - code: %s, message: %s, logs: %s" % ( self.name, errorDetail.get('code'), errorDetail.get('message'), build_output)) else: self.fail("Error building %s - message: %s, logs: %s" % ( self.name, line.get('error'), build_output)) return self.client.find_image(name=self.name, tag=self.tag) def load_image(self): ''' Load an image from a .tar archive :return: image dict ''' try: self.log("Opening image %s" % self.load_path) image_tar = open(self.load_path, 'r') except Exception as exc: self.fail("Error opening image %s - %s" % (self.load_path, str(exc))) try: self.log("Loading image from %s" % self.load_path) self.client.load_image(image_tar) except Exception as exc: self.fail("Error loading image %s - %s" % (self.name, str(exc))) try: image_tar.close() except Exception as exc: self.fail("Error closing image %s - %s" % (self.name, str(exc))) return self.client.find_image(self.name, self.tag) def main(): argument_spec = dict( archive_path=dict(type='path'), container_limits=dict(type='dict'), dockerfile=dict(type='str'), force=dict(type='bool', default=False), http_timeout=dict(type='int'), load_path=dict(type='path'), name=dict(type='str', required=True), nocache=dict(type='str', default=False), path=dict(type='path', aliases=['build_path']), pull=dict(type='bool', default=True), push=dict(type='bool', default=False), repository=dict(type='str'), rm=dict(type='bool', default=True), state=dict(type='str', choices=['absent', 'present', 'build'], default='present'), tag=dict(type='str', default='latest'), use_tls=dict(type='str', default='no', choices=['no', 'encrypt', 'verify']), buildargs=dict(type='dict', default=None), ) client = AnsibleDockerClient( argument_spec=argument_spec, supports_check_mode=True, ) results = dict( changed=False, actions=[], image={} ) ImageManager(client, results) client.module.exit_json(**results) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
shenyy/lily2-gem5
refs/heads/master
src/mem/slicc/ast/PairAST.py
92
# Copyright (c) 2009 The Hewlett-Packard Development Company # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from slicc.ast.AST import AST class PairAST(AST): def __init__(self, slicc, key, value): super(PairAST, self).__init__(slicc) self.key = key self.value = value def __repr__(self): return '[%s=%s]' % (self.key, self.value)
tehamalab/dgs
refs/heads/master
goals/migrations/0027_modify_translated_fields.py
1
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-06-29 06:04 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('goals', '0026_move_agency_source_and_stats_available_fields_from_indicator_to_component'), ] operations = [ migrations.AddField( model_name='component', name='name_en', field=models.CharField(max_length=255, null=True, verbose_name='Component name'), ), migrations.AddField( model_name='component', name='name_sw', field=models.CharField(max_length=255, null=True, verbose_name='Component name'), ), migrations.AddField( model_name='indicator', name='name_en', field=models.CharField(max_length=255, null=True, verbose_name='Indicator'), ), migrations.AddField( model_name='indicator', name='name_sw', field=models.CharField(max_length=255, null=True, verbose_name='Indicator'), ), migrations.AddField( model_name='target', name='name_en', field=models.CharField(max_length=255, null=True, verbose_name='Target'), ), migrations.AddField( model_name='target', name='name_sw', field=models.CharField(max_length=255, null=True, verbose_name='Target'), ), ]
myint/pyfuzz
refs/heads/master
pygen/qndispatch/qndispatch.py
1
import inspect class InvalidArgException(Exception): pass class DispatchTargetException(Exception): pass class Dispatcher(object): """General dispatcher object.""" def __init__(self, name, argnum, args): self.name = name self.argnum = argnum self.args = args self.functions = [] def get(self, args, kw): """Get function to dispatch to based on args and kw.""" if self.name in kw: arg = kw[self.name] else: arg = args[self.argnum] for reg_arg, reg_func in self.functions: if isinstance(arg, reg_arg): return reg_func raise InvalidArgException( "No function found for type %s." % str(type(arg))) def register(self, func, arg): """Register a new function with the dispatcher.""" args = inspect.getargspec(func) if args != self.args: raise DispatchTargetException( "Target arguments to not match with source.") self.functions.append((arg, func)) def on(name): """Dispatch on argument "name".""" def on_decorate(func): args = inspect.getargspec(func) argnum = args.args.index(name) dispatcher = Dispatcher(name, argnum, args) def decorator(*args, **kw): return dispatcher.get(args, kw)(*args, **kw) def when(arg): def when_decorate(func): dispatcher.register(func, arg) return decorator return when_decorate decorator.when = when return decorator return on_decorate
blazek/lrs
refs/heads/master
lrs/lrs/lrslayerroute.py
1
# -*- coding: utf-8 -*- """ /*************************************************************************** LrsPlugin A QGIS plugin Linear reference system builder and editor ------------------- begin : 2017-5-29 copyright : (C) 2017 by Radim Blažek email : radim.blazek@gmail.com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from .lrsroutebase import LrsRouteBase # Route loaded from LRS layer class LrsLayerRoute(LrsRouteBase): def __init__(self, routeId,**kwargs): super(LrsLayerRoute, self).__init__(routeId,**kwargs)
darkleons/BE
refs/heads/master
addons/website_certification/__init__.py
385
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import certification import controllers
fangxingli/hue
refs/heads/master
desktop/core/ext-py/django-openid-auth-0.5/django_openid_auth/tests/__init__.py
44
# django-openid-auth - OpenID integration for django.contrib.auth # # Copyright (C) 2009-2013 Canonical Ltd. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import unittest from test_views import * from test_store import * from test_auth import * from test_admin import * def suite(): suite = unittest.TestSuite() for name in ['test_auth', 'test_store', 'test_views', 'test_admin']: mod = __import__('%s.%s' % (__name__, name), {}, {}, ['suite']) suite.addTest(mod.suite()) return suite
markflyhigh/incubator-beam
refs/heads/master
sdks/python/apache_beam/io/gcp/datastore/v1/util_test.py
2
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Tests for util.py.""" from __future__ import absolute_import import unittest from apache_beam.io.gcp.datastore.v1 import util class MovingSumTest(unittest.TestCase): TIMESTAMP = 1500000000 def test_bad_bucket_size(self): with self.assertRaises(ValueError): _ = util.MovingSum(1, 0) def test_bad_window_size(self): with self.assertRaises(ValueError): _ = util.MovingSum(1, 2) def test_no_data(self): ms = util.MovingSum(10, 1) self.assertEqual(0, ms.sum(MovingSumTest.TIMESTAMP)) self.assertEqual(0, ms.count(MovingSumTest.TIMESTAMP)) self.assertFalse(ms.has_data(MovingSumTest.TIMESTAMP)) def test_one_data_point(self): ms = util.MovingSum(10, 1) ms.add(MovingSumTest.TIMESTAMP, 5) self.assertEqual(5, ms.sum(MovingSumTest.TIMESTAMP)) self.assertEqual(1, ms.count(MovingSumTest.TIMESTAMP)) self.assertTrue(ms.has_data(MovingSumTest.TIMESTAMP)) def test_aggregates_within_window(self): ms = util.MovingSum(10, 1) ms.add(MovingSumTest.TIMESTAMP, 5) ms.add(MovingSumTest.TIMESTAMP+1, 3) ms.add(MovingSumTest.TIMESTAMP+2, 7) self.assertEqual(15, ms.sum(MovingSumTest.TIMESTAMP+3)) self.assertEqual(3, ms.count(MovingSumTest.TIMESTAMP+3)) def test_data_expires_from_moving_window(self): ms = util.MovingSum(5, 1) ms.add(MovingSumTest.TIMESTAMP, 5) ms.add(MovingSumTest.TIMESTAMP+3, 3) ms.add(MovingSumTest.TIMESTAMP+6, 7) self.assertEqual(10, ms.sum(MovingSumTest.TIMESTAMP+7)) self.assertEqual(2, ms.count(MovingSumTest.TIMESTAMP+7)) class DynamicWriteBatcherTest(unittest.TestCase): def setUp(self): self._batcher = util.DynamicBatchSizer() # If possible, keep these test cases aligned with the Java test cases in # DatastoreV1Test.java def test_no_data(self): self.assertEqual(util.WRITE_BATCH_INITIAL_SIZE, self._batcher.get_batch_size(0)) def test_fast_queries(self): self._batcher.report_latency(0, 1000, 200) self._batcher.report_latency(0, 1000, 200) self.assertEqual(util.WRITE_BATCH_MAX_SIZE, self._batcher.get_batch_size(0)) def test_slow_queries(self): self._batcher.report_latency(0, 10000, 200) self._batcher.report_latency(0, 10000, 200) self.assertEqual(100, self._batcher.get_batch_size(0)) def test_size_not_below_minimum(self): self._batcher.report_latency(0, 30000, 50) self._batcher.report_latency(0, 30000, 50) self.assertEqual(util.WRITE_BATCH_MIN_SIZE, self._batcher.get_batch_size(0)) def test_sliding_window(self): self._batcher.report_latency(0, 30000, 50) self._batcher.report_latency(50000, 5000, 200) self._batcher.report_latency(100000, 5000, 200) self.assertEqual(200, self._batcher.get_batch_size(150000)) if __name__ == '__main__': unittest.main()
dslutz/qemu
refs/heads/master
tests/qapi-schema/test-qapi.py
11
#!/usr/bin/env python3 # # QAPI parser test harness # # Copyright (c) 2013 Red Hat Inc. # # Authors: # Markus Armbruster <armbru@redhat.com> # # This work is licensed under the terms of the GNU GPL, version 2 or later. # See the COPYING file in the top-level directory. # import argparse import difflib import os import sys from io import StringIO from qapi.error import QAPIError from qapi.schema import QAPISchema, QAPISchemaVisitor class QAPISchemaTestVisitor(QAPISchemaVisitor): def visit_module(self, name): print('module %s' % name) def visit_include(self, name, info): print('include %s' % name) def visit_enum_type(self, name, info, ifcond, features, members, prefix): print('enum %s' % name) if prefix: print(' prefix %s' % prefix) for m in members: print(' member %s' % m.name) self._print_if(m.ifcond, indent=8) self._print_if(ifcond) self._print_features(features) def visit_array_type(self, name, info, ifcond, element_type): if not info: return # suppress built-in arrays print('array %s %s' % (name, element_type.name)) self._print_if(ifcond) def visit_object_type(self, name, info, ifcond, features, base, members, variants): print('object %s' % name) if base: print(' base %s' % base.name) for m in members: print(' member %s: %s optional=%s' % (m.name, m.type.name, m.optional)) self._print_if(m.ifcond, 8) self._print_features(m.features, indent=8) self._print_variants(variants) self._print_if(ifcond) self._print_features(features) def visit_alternate_type(self, name, info, ifcond, features, variants): print('alternate %s' % name) self._print_variants(variants) self._print_if(ifcond) self._print_features(features) def visit_command(self, name, info, ifcond, features, arg_type, ret_type, gen, success_response, boxed, allow_oob, allow_preconfig): print('command %s %s -> %s' % (name, arg_type and arg_type.name, ret_type and ret_type.name)) print(' gen=%s success_response=%s boxed=%s oob=%s preconfig=%s' % (gen, success_response, boxed, allow_oob, allow_preconfig)) self._print_if(ifcond) self._print_features(features) def visit_event(self, name, info, ifcond, features, arg_type, boxed): print('event %s %s' % (name, arg_type and arg_type.name)) print(' boxed=%s' % boxed) self._print_if(ifcond) self._print_features(features) @staticmethod def _print_variants(variants): if variants: print(' tag %s' % variants.tag_member.name) for v in variants.variants: print(' case %s: %s' % (v.name, v.type.name)) QAPISchemaTestVisitor._print_if(v.ifcond, indent=8) @staticmethod def _print_if(ifcond, indent=4): if ifcond: print('%sif %s' % (' ' * indent, ifcond)) @classmethod def _print_features(cls, features, indent=4): if features: for f in features: print('%sfeature %s' % (' ' * indent, f.name)) cls._print_if(f.ifcond, indent + 4) def test_frontend(fname): schema = QAPISchema(fname) schema.visit(QAPISchemaTestVisitor()) for doc in schema.docs: if doc.symbol: print('doc symbol=%s' % doc.symbol) else: print('doc freeform') print(' body=\n%s' % doc.body.text) for arg, section in doc.args.items(): print(' arg=%s\n%s' % (arg, section.text)) for feat, section in doc.features.items(): print(' feature=%s\n%s' % (feat, section.text)) for section in doc.sections: print(' section=%s\n%s' % (section.name, section.text)) def test_and_diff(test_name, dir_name, update): sys.stdout = StringIO() try: test_frontend(os.path.join(dir_name, test_name + '.json')) except QAPIError as err: if err.info.fname is None: print("%s" % err, file=sys.stderr) return 2 errstr = str(err) + '\n' if dir_name: errstr = errstr.replace(dir_name + '/', '') actual_err = errstr.splitlines(True) else: actual_err = [] finally: actual_out = sys.stdout.getvalue().splitlines(True) sys.stdout.close() sys.stdout = sys.__stdout__ mode = 'r+' if update else 'r' try: outfp = open(os.path.join(dir_name, test_name + '.out'), mode) errfp = open(os.path.join(dir_name, test_name + '.err'), mode) expected_out = outfp.readlines() expected_err = errfp.readlines() except IOError as err: print("%s: can't open '%s': %s" % (sys.argv[0], err.filename, err.strerror), file=sys.stderr) return 2 if actual_out == expected_out and actual_err == expected_err: return 0 print("%s %s" % (test_name, 'UPDATE' if update else 'FAIL'), file=sys.stderr) out_diff = difflib.unified_diff(expected_out, actual_out, outfp.name) err_diff = difflib.unified_diff(expected_err, actual_err, errfp.name) sys.stdout.writelines(out_diff) sys.stdout.writelines(err_diff) if not update: return 1 try: outfp.truncate(0) outfp.seek(0) outfp.writelines(actual_out) errfp.truncate(0) errfp.seek(0) errfp.writelines(actual_err) except IOError as err: print("%s: can't write '%s': %s" % (sys.argv[0], err.filename, err.strerror), file=sys.stderr) return 2 return 0 def main(argv): parser = argparse.ArgumentParser( description='QAPI schema tester') parser.add_argument('-d', '--dir', action='store', default='', help="directory containing tests") parser.add_argument('-u', '--update', action='store_true', help="update expected test results") parser.add_argument('tests', nargs='*', metavar='TEST', action='store') args = parser.parse_args() status = 0 for t in args.tests: (dir_name, base_name) = os.path.split(t) dir_name = dir_name or args.dir test_name = os.path.splitext(base_name)[0] status |= test_and_diff(test_name, dir_name, args.update) exit(status) if __name__ == '__main__': main(sys.argv) exit(0)
dagnir/servo
refs/heads/master
tests/wpt/web-platform-tests/encoding/resources/single-byte-raw.py
228
def main(request, response): response.headers.set("Content-Type", "text/plain;charset=" + request.GET.first("label")) response.content = "".join(chr(byte) for byte in xrange(255))
ozgurozmen/OptimizedPKCSuite
refs/heads/master
ATmega2560/micro-ecc/emk_rules.py
49
c, link = emk.module("c", "link") emk.subdir("test")
enkore/i3pystatus
refs/heads/master
i3pystatus/mail/imap.py
5
from i3pystatus.core.util import require, internet try: from imaplib2.imaplib2 import IMAP4, IMAP4_SSL use_idle = True except ImportError: from imaplib import IMAP4, IMAP4_SSL use_idle = False import contextlib import time import socket from threading import Thread from i3pystatus.mail import Backend IMAP_EXCEPTIONS = (socket.error, socket.gaierror, IMAP4.abort, IMAP4.error) class IMAP(Backend): """ Checks for mail on a IMAP server """ settings = ( "host", "port", "username", "password", ('keyring_backend', 'alternative keyring backend for retrieving credentials'), "ssl", "mailbox", ) required = ("host", "username", "password") keyring_backend = None port = 993 ssl = True mailbox = "INBOX" imap_class = IMAP4 connection = None last = 0 def init(self): if self.ssl: self.imap_class = IMAP4_SSL if use_idle: self.thread = Thread(target=self._idle_thread) self.daemon = True self.thread.start() @contextlib.contextmanager def ensure_connection(self): try: if self.connection: self.connection.select(self.mailbox) if not self.connection: self.connection = self.imap_class(self.host, self.port) self.connection.login(self.username, self.password) self.connection.select(self.mailbox) yield except IMAP_EXCEPTIONS: # NOTE(sileht): retry just once if the connection have been # broken to ensure this is not a sporadic connection lost. # Like wifi reconnect, sleep wake up try: self.connection.close() except IMAP_EXCEPTIONS: pass try: self.connection.logout() except IMAP_EXCEPTIONS: pass # Wait a bit when disconnection occurs to not hog the cpu time.sleep(1) self.connection = None def _idle_thread(self): # update mail count on startup with self.ensure_connection(): self.count_new_mail() while True: with self.ensure_connection(): # Block until new mails self.connection.idle() # Read how many self.count_new_mail() def count_new_mail(self): self.last = len(self.connection.search(None, "UnSeen")[1][0].split()) @property @require(internet) def unread(self): if not use_idle: with self.ensure_connection(): self.count_new_mail() return self.last Backend = IMAP
avikurapati/elasticsearch
refs/heads/master
dev-tools/create_bwc_index.py
7
# Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on # an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. import argparse import glob import logging import os import random import shutil import subprocess import sys import tempfile import time DEFAULT_TRANSPORT_TCP_PORT = 9300 DEFAULT_HTTP_TCP_PORT = 9200 if sys.version_info[0] < 3: print('%s must use python 3.x (for the ES python client)' % sys.argv[0]) from datetime import datetime try: from elasticsearch import Elasticsearch from elasticsearch.exceptions import ConnectionError from elasticsearch.exceptions import TransportError except ImportError as e: print('Can\'t import elasticsearch please install `sudo pip3 install elasticsearch`') sys.exit(1) # sometimes returns True def rarely(): return random.randint(0, 10) == 0 # usually returns True def frequently(): return not rarely() # asserts the correctness of the given hits given they are sorted asc def assert_sort(hits): values = [hit['sort'] for hit in hits['hits']['hits']] assert len(values) > 0, 'expected non emtpy result' val = min(values) for x in values: assert x >= val, '%s >= %s' % (x, val) val = x # Indexes the given number of document into the given index # and randomly runs refresh, optimize and flush commands def index_documents(es, index_name, type, num_docs): logging.info('Indexing %s docs' % num_docs) for id in range(0, num_docs): es.index(index=index_name, doc_type=type, id=id, body={'string': str(random.randint(0, 100)), 'long_sort': random.randint(0, 100), 'double_sort' : float(random.randint(0, 100)), 'bool' : random.choice([True, False])}) if rarely(): es.indices.refresh(index=index_name) if rarely(): es.indices.flush(index=index_name, force=frequently()) logging.info('Flushing index') es.indices.flush(index=index_name) def delete_by_query(es, version, index_name, doc_type): logging.info('Deleting long_sort:[10..20] docs') query = {'query': {'range': {'long_sort': {'gte': 10, 'lte': 20}}}} if version.startswith('0.') or version in ('1.0.0.Beta1', '1.0.0.Beta2'): # TODO #10262: we can't write DBQ into the translog for these old versions until we fix this back-compat bug: # #4074: these versions don't expect to see the top-level 'query' to count/delete_by_query: query = query['query'] return deleted_count = es.count(index=index_name, doc_type=doc_type, body=query)['count'] result = es.delete_by_query(index=index_name, doc_type=doc_type, body=query) # make sure no shards failed: assert result['_indices'][index_name]['_shards']['failed'] == 0, 'delete by query failed: %s' % result logging.info('Deleted %d docs' % deleted_count) def run_basic_asserts(es, index_name, type, num_docs): count = es.count(index=index_name)['count'] assert count == num_docs, 'Expected %r but got %r documents' % (num_docs, count) for _ in range(0, num_docs): random_doc_id = random.randint(0, num_docs-1) doc = es.get(index=index_name, doc_type=type, id=random_doc_id) assert doc, 'Expected document for id %s but got %s' % (random_doc_id, doc) assert_sort(es.search(index=index_name, body={ 'sort': [ {'double_sort': {'order': 'asc'}} ] })) assert_sort(es.search(index=index_name, body={ 'sort': [ {'long_sort': {'order': 'asc'}} ] })) def build_version(version_tuple): return '.'.join([str(x) for x in version_tuple]) def build_tuple(version_string): return [int(x) for x in version_string.split('.')] def start_node(version, release_dir, data_dir, repo_dir, tcp_port=DEFAULT_TRANSPORT_TCP_PORT, http_port=DEFAULT_HTTP_TCP_PORT, cluster_name=None): logging.info('Starting node from %s on port %s/%s, data_dir %s' % (release_dir, tcp_port, http_port, data_dir)) if cluster_name is None: cluster_name = 'bwc_index_' + version cmd = [ os.path.join(release_dir, 'bin/elasticsearch'), '-Epath.data=%s' % data_dir, '-Epath.logs=logs', '-Ecluster.name=%s' % cluster_name, '-Enetwork.host=localhost', '-Etransport.tcp.port=%s' % tcp_port, '-Ehttp.port=%s' % http_port, '-Epath.repo=%s' % repo_dir ] if version.startswith('0.') or version.startswith('1.0.0.Beta') : cmd.append('-f') # version before 1.0 start in background automatically return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def install_plugin(version, release_dir, plugin_name): run_plugin(version, release_dir, 'install', [plugin_name]) def remove_plugin(version, release_dir, plugin_name): run_plugin(version, release_dir, 'remove', [plugin_name]) def run_plugin(version, release_dir, plugin_cmd, args): cmd = [os.path.join(release_dir, 'bin/elasticsearch-plugin'), plugin_cmd] + args subprocess.check_call(cmd) def create_client(http_port=DEFAULT_HTTP_TCP_PORT, timeout=30): logging.info('Waiting for node to startup') for _ in range(0, timeout): # TODO: ask Honza if there is a better way to do this? try: client = Elasticsearch([{'host': 'localhost', 'port': http_port}]) client.cluster.health(wait_for_nodes=1) client.count() # can we actually search or do we get a 503? -- anyway retry return client except (ConnectionError, TransportError): pass time.sleep(1) assert False, 'Timed out waiting for node for %s seconds' % timeout def generate_index(client, version, index_name): client.indices.delete(index=index_name, ignore=404) logging.info('Create single shard test index') mappings = {} if not version.startswith('2.'): # TODO: we need better "before/onOr/after" logic in python # backcompat test for legacy type level analyzer settings, see #8874 mappings['analyzer_type1'] = { 'analyzer': 'standard', 'properties': { 'string_with_index_analyzer': { 'type': 'string', 'index_analyzer': 'standard' }, } } # completion type was added in 0.90.3 if not version.startswith('0.20') and version not in ['0.90.0.Beta1', '0.90.0.RC1', '0.90.0.RC2', '0.90.0', '0.90.1', '0.90.2']: mappings['analyzer_type1']['properties']['completion_with_index_analyzer'] = { 'type': 'completion', 'index_analyzer': 'standard' } mappings['analyzer_type2'] = { 'index_analyzer': 'standard', 'search_analyzer': 'keyword', 'search_quote_analyzer': 'english', } mappings['index_name_and_path'] = { 'properties': { 'parent_multi_field': { 'type': 'string', 'path': 'just_name', 'fields': { 'raw': {'type': 'string', 'index': 'not_analyzed', 'index_name': 'raw_multi_field'} } }, 'field_with_index_name': { 'type': 'string', 'index_name': 'custom_index_name_for_field' } } } mappings['meta_fields'] = { '_id': { 'path': 'myid' }, '_routing': { 'path': 'myrouting' }, '_boost': { 'null_value': 2.0 } } mappings['custom_formats'] = { 'properties': { 'string_with_custom_postings': { 'type': 'string', 'postings_format': 'Lucene41' }, 'long_with_custom_doc_values': { 'type': 'long', 'doc_values_format': 'Lucene42' } } } mappings['auto_boost'] = { '_all': { 'auto_boost': True } } mappings['norms'] = { 'properties': { 'string_with_norms_disabled': { 'type': 'string', 'norms': { 'enabled': False } }, 'string_with_norms_enabled': { 'type': 'string', 'index': 'not_analyzed', 'norms': { 'enabled': True, 'loading': 'eager' } } } } mappings['doc'] = { 'properties': { 'string': { 'type': 'string', 'boost': 4 } } } settings = { 'number_of_shards': 1, 'number_of_replicas': 0, } if version.startswith('0.') or version.startswith('1.'): # Same as ES default (60 seconds), but missing the units to make sure they are inserted on upgrade: settings['gc_deletes'] = '60000', # Same as ES default (5 GB), but missing the units to make sure they are inserted on upgrade: settings['merge.policy.max_merged_segment'] = '5368709120' warmers = {} warmers['warmer1'] = { 'source': { 'query': { 'match_all': {} } } } client.indices.create(index=index_name, body={ 'settings': settings, 'mappings': mappings, 'warmers': warmers }) health = client.cluster.health(wait_for_status='green', wait_for_relocating_shards=0) assert health['timed_out'] == False, 'cluster health timed out %s' % health num_docs = random.randint(2000, 3000) if version == "1.1.0": # 1.1.0 is buggy and creates lots and lots of segments, so we create a # lighter index for it to keep bw tests reasonable # see https://github.com/elastic/elasticsearch/issues/5817 num_docs = int(num_docs / 10) index_documents(client, index_name, 'doc', num_docs) logging.info('Running basic asserts on the data added') run_basic_asserts(client, index_name, 'doc', num_docs) def snapshot_index(client, version, repo_dir): # Add bogus persistent settings to make sure they can be restored client.cluster.put_settings(body={ 'persistent': { 'cluster.routing.allocation.exclude.version_attr': version, # Same as ES default (30 seconds), but missing the units to make sure they are inserted on upgrade: 'discovery.zen.publish_timeout': '30000', # Same as ES default (512 KB), but missing the units to make sure they are inserted on upgrade: 'indices.recovery.file_chunk_size': '524288', } }) client.indices.put_template(name='template_' + version.lower(), order=0, body={ "template": "te*", "settings": { "number_of_shards" : 1 }, "mappings": { "type1": { "_source": { "enabled" : False } } }, "aliases": { "alias1": {}, "alias2": { "filter": { "term": {"version" : version } }, "routing": "kimchy" }, "{index}-alias": {} } }) client.snapshot.create_repository(repository='test_repo', body={ 'type': 'fs', 'settings': { 'location': repo_dir } }) client.snapshot.create(repository='test_repo', snapshot='test_1', wait_for_completion=True) client.snapshot.delete_repository(repository='test_repo') def compress_index(version, tmp_dir, output_dir): compress(tmp_dir, output_dir, 'index-%s.zip' % version, 'data') def compress_repo(version, tmp_dir, output_dir): compress(tmp_dir, output_dir, 'repo-%s.zip' % version, 'repo') def compress(tmp_dir, output_dir, zipfile, directory): abs_output_dir = os.path.abspath(output_dir) zipfile = os.path.join(abs_output_dir, zipfile) if os.path.exists(zipfile): os.remove(zipfile) logging.info('Compressing index into %s, tmpDir %s', zipfile, tmp_dir) olddir = os.getcwd() os.chdir(tmp_dir) subprocess.check_call('zip -r %s %s' % (zipfile, directory), shell=True) os.chdir(olddir) def parse_config(): parser = argparse.ArgumentParser(description='Builds an elasticsearch index for backwards compatibility tests') required = parser.add_mutually_exclusive_group(required=True) required.add_argument('versions', metavar='X.Y.Z', nargs='*', default=[], help='The elasticsearch version to build an index for') required.add_argument('--all', action='store_true', default=False, help='Recreate all existing backwards compatibility indexes') parser.add_argument('--releases-dir', '-d', default='backwards', metavar='DIR', help='The directory containing elasticsearch releases') parser.add_argument('--output-dir', '-o', default='core/src/test/resources/indices/bwc', help='The directory to write the zipped index into') parser.add_argument('--tcp-port', default=DEFAULT_TRANSPORT_TCP_PORT, type=int, help='The port to use as the minimum port for TCP communication') parser.add_argument('--http-port', default=DEFAULT_HTTP_TCP_PORT, type=int, help='The port to use as the minimum port for HTTP communication') cfg = parser.parse_args() if not os.path.exists(cfg.output_dir): parser.error('Output directory does not exist: %s' % cfg.output_dir) if not cfg.versions: # --all for bwc_index in glob.glob(os.path.join(cfg.output_dir, 'index-*.zip')): version = os.path.basename(bwc_index)[len('index-'):-len('.zip')] cfg.versions.append(version) return cfg def create_bwc_index(cfg, version): logging.info('--> Creating bwc index for %s' % version) release_dir = os.path.join(cfg.releases_dir, 'elasticsearch-%s' % version) if not os.path.exists(release_dir): raise RuntimeError('ES version %s does not exist in %s' % (version, cfg.releases_dir)) snapshot_supported = not (version.startswith('0.') or version == '1.0.0.Beta1') tmp_dir = tempfile.mkdtemp() data_dir = os.path.join(tmp_dir, 'data') repo_dir = os.path.join(tmp_dir, 'repo') logging.info('Temp data dir: %s' % data_dir) logging.info('Temp repo dir: %s' % repo_dir) node = None try: node = start_node(version, release_dir, data_dir, repo_dir, cfg.tcp_port, cfg.http_port) client = create_client(cfg.http_port) index_name = 'index-%s' % version.lower() generate_index(client, version, index_name) if snapshot_supported: snapshot_index(client, version, repo_dir) # 10067: get a delete-by-query into the translog on upgrade. We must do # this after the snapshot, because it calls flush. Otherwise the index # will already have the deletions applied on upgrade. if version.startswith('0.') or version.startswith('1.'): delete_by_query(client, version, index_name, 'doc') shutdown_node(node) node = None compress_index(version, tmp_dir, cfg.output_dir) if snapshot_supported: compress_repo(version, tmp_dir, cfg.output_dir) finally: if node is not None: # This only happens if we've hit an exception: shutdown_node(node) shutil.rmtree(tmp_dir) def shutdown_node(node): logging.info('Shutting down node with pid %d', node.pid) node.terminate() node.wait() def main(): logging.basicConfig(format='[%(levelname)s] [%(asctime)s] %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %I:%M:%S %p') logging.getLogger('elasticsearch').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.WARN) cfg = parse_config() for version in cfg.versions: create_bwc_index(cfg, version) if __name__ == '__main__': try: main() except KeyboardInterrupt: print('Caught keyboard interrupt, exiting...')
StefanRijnhart/server-tools
refs/heads/7.0
auth_admin_passkey/__openerp__.py
2
# -*- encoding: utf-8 -*- ############################################################################## # # Admin Passkey module for OpenERP # Copyright (C) 2013-2014 GRAP (http://www.grap.coop) # @author Sylvain LE GAL (https://twitter.com/legalsylvain) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Authentification - Admin Passkey', 'version': '2.1.1', 'category': 'base', 'description': """ Admin password become a passkey for all active logins ===================================================== Functionality : --------------- * Administrator has now the possibility to login in with any login; * By default, OpenERP will send a mail to user and admin to indicate them; * If a user and the admin have the same password, admin will be informed; Technical information : ----------------------- * Create two ir_config_parameter to enable / disable mail sending; Copyright, Author and Licence : ------------------------------- * Copyright : 2014, Groupement Régional Alimentaire de Proximité; * Author : Sylvain LE GAL (https://twitter.com/legalsylvain); * Licence : AGPL-3 (http://www.gnu.org/licenses/) """, 'author': "GRAP,Odoo Community Association (OCA)", 'website': 'http://www.grap.coop', 'license': 'AGPL-3', 'depends': [ 'mail', ], 'data': [ 'data/ir_config_parameter.xml', 'view/res_config_view.xml', ], 'demo': [], 'js': [], 'css': [], 'qweb': [], 'images': [], 'post_load': '', 'application': False, 'installable': True, 'auto_install': False, }
edevil/django
refs/heads/master
django/conf/locale/cy/formats.py
160
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j F Y' # '25 Hydref 2006' TIME_FORMAT = 'P' # '2:30 y.b.' DATETIME_FORMAT = 'j F Y, P' # '25 Hydref 2006, 2:30 y.b.' YEAR_MONTH_FORMAT = 'F Y' # 'Hydref 2006' MONTH_DAY_FORMAT = 'j F' # '25 Hydref' SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006' SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 y.b.' FIRST_DAY_OF_WEEK = 1 # 'Dydd Llun' # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06' ) DATETIME_INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59' '%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200' '%d/%m/%Y %H:%M', # '25/10/2006 14:30' '%d/%m/%Y', # '25/10/2006' '%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59' '%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200' '%d/%m/%y %H:%M', # '25/10/06 14:30' '%d/%m/%y', # '25/10/06' ) DECIMAL_SEPARATOR = '.' THOUSAND_SEPARATOR = ',' NUMBER_GROUPING = 3
EzyInsights/Diamond
refs/heads/master
src/collectors/entropy/entropy.py
48
# coding=utf-8 """ Uses /proc to collect available entropy #### Dependencies * /proc/sys/kernel/random/entropy_avail """ import diamond.collector import os class EntropyStatCollector(diamond.collector.Collector): PROC = '/proc/sys/kernel/random/entropy_avail' def get_default_config(self): """ Returns the default collector settings """ config = super(EntropyStatCollector, self).get_default_config() config.update({ 'path': 'entropy' }) return config def collect(self): if not os.access(self.PROC, os.R_OK): return None # open file entropy_file = open(self.PROC) # read value entropy = entropy_file.read().strip() # Close file entropy_file.close() # Publish value self.publish_gauge("available", entropy)
js0701/chromium-crosswalk
refs/heads/master
third_party/tlslite/tlslite/errors.py
111
# Authors: # Trevor Perrin # Dave Baggett (Arcode Corporation) - Added TLSUnsupportedError. # # See the LICENSE file for legal information regarding use of this file. """Exception classes. @sort: TLSError, TLSAbruptCloseError, TLSAlert, TLSLocalAlert, TLSRemoteAlert, TLSAuthenticationError, TLSNoAuthenticationError, TLSAuthenticationTypeError, TLSFingerprintError, TLSAuthorizationError, TLSValidationError, TLSFaultError, TLSUnsupportedError """ import socket from .constants import AlertDescription, AlertLevel class TLSError(Exception): """Base class for all TLS Lite exceptions.""" def __str__(self): """"At least print out the Exception time for str(...).""" return repr(self) class TLSClosedConnectionError(TLSError, socket.error): """An attempt was made to use the connection after it was closed.""" pass class TLSAbruptCloseError(TLSError): """The socket was closed without a proper TLS shutdown. The TLS specification mandates that an alert of some sort must be sent before the underlying socket is closed. If the socket is closed without this, it could signify that an attacker is trying to truncate the connection. It could also signify a misbehaving TLS implementation, or a random network failure. """ pass class TLSAlert(TLSError): """A TLS alert has been signalled.""" pass _descriptionStr = {\ AlertDescription.close_notify: "close_notify",\ AlertDescription.unexpected_message: "unexpected_message",\ AlertDescription.bad_record_mac: "bad_record_mac",\ AlertDescription.decryption_failed: "decryption_failed",\ AlertDescription.record_overflow: "record_overflow",\ AlertDescription.decompression_failure: "decompression_failure",\ AlertDescription.handshake_failure: "handshake_failure",\ AlertDescription.no_certificate: "no certificate",\ AlertDescription.bad_certificate: "bad_certificate",\ AlertDescription.unsupported_certificate: "unsupported_certificate",\ AlertDescription.certificate_revoked: "certificate_revoked",\ AlertDescription.certificate_expired: "certificate_expired",\ AlertDescription.certificate_unknown: "certificate_unknown",\ AlertDescription.illegal_parameter: "illegal_parameter",\ AlertDescription.unknown_ca: "unknown_ca",\ AlertDescription.access_denied: "access_denied",\ AlertDescription.decode_error: "decode_error",\ AlertDescription.decrypt_error: "decrypt_error",\ AlertDescription.export_restriction: "export_restriction",\ AlertDescription.protocol_version: "protocol_version",\ AlertDescription.insufficient_security: "insufficient_security",\ AlertDescription.internal_error: "internal_error",\ AlertDescription.inappropriate_fallback: "inappropriate_fallback",\ AlertDescription.user_canceled: "user_canceled",\ AlertDescription.no_renegotiation: "no_renegotiation",\ AlertDescription.unknown_psk_identity: "unknown_psk_identity"} class TLSLocalAlert(TLSAlert): """A TLS alert has been signalled by the local implementation. @type description: int @ivar description: Set to one of the constants in L{tlslite.constants.AlertDescription} @type level: int @ivar level: Set to one of the constants in L{tlslite.constants.AlertLevel} @type message: str @ivar message: Description of what went wrong. """ def __init__(self, alert, message=None): self.description = alert.description self.level = alert.level self.message = message def __str__(self): alertStr = TLSAlert._descriptionStr.get(self.description) if alertStr == None: alertStr = str(self.description) if self.message: return alertStr + ": " + self.message else: return alertStr class TLSRemoteAlert(TLSAlert): """A TLS alert has been signalled by the remote implementation. @type description: int @ivar description: Set to one of the constants in L{tlslite.constants.AlertDescription} @type level: int @ivar level: Set to one of the constants in L{tlslite.constants.AlertLevel} """ def __init__(self, alert): self.description = alert.description self.level = alert.level def __str__(self): alertStr = TLSAlert._descriptionStr.get(self.description) if alertStr == None: alertStr = str(self.description) return alertStr class TLSAuthenticationError(TLSError): """The handshake succeeded, but the other party's authentication was inadequate. This exception will only be raised when a L{tlslite.Checker.Checker} has been passed to a handshake function. The Checker will be invoked once the handshake completes, and if the Checker objects to how the other party authenticated, a subclass of this exception will be raised. """ pass class TLSNoAuthenticationError(TLSAuthenticationError): """The Checker was expecting the other party to authenticate with a certificate chain, but this did not occur.""" pass class TLSAuthenticationTypeError(TLSAuthenticationError): """The Checker was expecting the other party to authenticate with a different type of certificate chain.""" pass class TLSFingerprintError(TLSAuthenticationError): """The Checker was expecting the other party to authenticate with a certificate chain that matches a different fingerprint.""" pass class TLSAuthorizationError(TLSAuthenticationError): """The Checker was expecting the other party to authenticate with a certificate chain that has a different authorization.""" pass class TLSValidationError(TLSAuthenticationError): """The Checker has determined that the other party's certificate chain is invalid.""" def __init__(self, msg, info=None): # Include a dict containing info about this validation failure TLSAuthenticationError.__init__(self, msg) self.info = info class TLSFaultError(TLSError): """The other party responded incorrectly to an induced fault. This exception will only occur during fault testing, when a TLSConnection's fault variable is set to induce some sort of faulty behavior, and the other party doesn't respond appropriately. """ pass class TLSUnsupportedError(TLSError): """The implementation doesn't support the requested (or required) capabilities.""" pass
gsmartway/odoo
refs/heads/8.0
addons/base_import_module/controllers/main.py
354
# -*- coding: utf-8 -*- import functools import openerp from openerp.http import Controller, route, request, Response def webservice(f): @functools.wraps(f) def wrap(*args, **kw): try: return f(*args, **kw) except Exception, e: return Response(response=str(e), status=500) return wrap class ImportModule(Controller): def check_user(self, uid=None): if uid is None: uid = request.uid is_admin = request.registry['res.users'].has_group(request.cr, uid, 'base.group_erp_manager') if not is_admin: raise openerp.exceptions.AccessError("Only administrators can upload a module") @route('/base_import_module/login', type='http', auth='none', methods=['POST']) @webservice def login(self, login, password, db=None): if db and db != request.db: raise Exception("Could not select database '%s'" % db) uid = request.session.authenticate(request.db, login, password) if not uid: return Response(response="Wrong login/password", status=401) self.check_user(uid) return "ok" @route('/base_import_module/upload', type='http', auth='user', methods=['POST']) @webservice def upload(self, mod_file=None, force='', **kw): self.check_user() force = True if force == '1' else False return request.registry['ir.module.module'].import_zipfile(request.cr, request.uid, mod_file, force=force, context=request.context)[0]
bruderstein/PythonScript
refs/heads/master
PythonLib/min/encodings/iso8859_3.py
272
""" Python Character Mapping Codec iso8859_3 generated from 'MAPPINGS/ISO8859/8859-3.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-3', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\x80' # 0x80 -> <control> '\x81' # 0x81 -> <control> '\x82' # 0x82 -> <control> '\x83' # 0x83 -> <control> '\x84' # 0x84 -> <control> '\x85' # 0x85 -> <control> '\x86' # 0x86 -> <control> '\x87' # 0x87 -> <control> '\x88' # 0x88 -> <control> '\x89' # 0x89 -> <control> '\x8a' # 0x8A -> <control> '\x8b' # 0x8B -> <control> '\x8c' # 0x8C -> <control> '\x8d' # 0x8D -> <control> '\x8e' # 0x8E -> <control> '\x8f' # 0x8F -> <control> '\x90' # 0x90 -> <control> '\x91' # 0x91 -> <control> '\x92' # 0x92 -> <control> '\x93' # 0x93 -> <control> '\x94' # 0x94 -> <control> '\x95' # 0x95 -> <control> '\x96' # 0x96 -> <control> '\x97' # 0x97 -> <control> '\x98' # 0x98 -> <control> '\x99' # 0x99 -> <control> '\x9a' # 0x9A -> <control> '\x9b' # 0x9B -> <control> '\x9c' # 0x9C -> <control> '\x9d' # 0x9D -> <control> '\x9e' # 0x9E -> <control> '\x9f' # 0x9F -> <control> '\xa0' # 0xA0 -> NO-BREAK SPACE '\u0126' # 0xA1 -> LATIN CAPITAL LETTER H WITH STROKE '\u02d8' # 0xA2 -> BREVE '\xa3' # 0xA3 -> POUND SIGN '\xa4' # 0xA4 -> CURRENCY SIGN '\ufffe' '\u0124' # 0xA6 -> LATIN CAPITAL LETTER H WITH CIRCUMFLEX '\xa7' # 0xA7 -> SECTION SIGN '\xa8' # 0xA8 -> DIAERESIS '\u0130' # 0xA9 -> LATIN CAPITAL LETTER I WITH DOT ABOVE '\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA '\u011e' # 0xAB -> LATIN CAPITAL LETTER G WITH BREVE '\u0134' # 0xAC -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX '\xad' # 0xAD -> SOFT HYPHEN '\ufffe' '\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE '\xb0' # 0xB0 -> DEGREE SIGN '\u0127' # 0xB1 -> LATIN SMALL LETTER H WITH STROKE '\xb2' # 0xB2 -> SUPERSCRIPT TWO '\xb3' # 0xB3 -> SUPERSCRIPT THREE '\xb4' # 0xB4 -> ACUTE ACCENT '\xb5' # 0xB5 -> MICRO SIGN '\u0125' # 0xB6 -> LATIN SMALL LETTER H WITH CIRCUMFLEX '\xb7' # 0xB7 -> MIDDLE DOT '\xb8' # 0xB8 -> CEDILLA '\u0131' # 0xB9 -> LATIN SMALL LETTER DOTLESS I '\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA '\u011f' # 0xBB -> LATIN SMALL LETTER G WITH BREVE '\u0135' # 0xBC -> LATIN SMALL LETTER J WITH CIRCUMFLEX '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF '\ufffe' '\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\ufffe' '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\u010a' # 0xC5 -> LATIN CAPITAL LETTER C WITH DOT ABOVE '\u0108' # 0xC6 -> LATIN CAPITAL LETTER C WITH CIRCUMFLEX '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS '\ufffe' '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\u0120' # 0xD5 -> LATIN CAPITAL LETTER G WITH DOT ABOVE '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xd7' # 0xD7 -> MULTIPLICATION SIGN '\u011c' # 0xD8 -> LATIN CAPITAL LETTER G WITH CIRCUMFLEX '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS '\u016c' # 0xDD -> LATIN CAPITAL LETTER U WITH BREVE '\u015c' # 0xDE -> LATIN CAPITAL LETTER S WITH CIRCUMFLEX '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\ufffe' '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS '\u010b' # 0xE5 -> LATIN SMALL LETTER C WITH DOT ABOVE '\u0109' # 0xE6 -> LATIN SMALL LETTER C WITH CIRCUMFLEX '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS '\ufffe' '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\u0121' # 0xF5 -> LATIN SMALL LETTER G WITH DOT ABOVE '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS '\xf7' # 0xF7 -> DIVISION SIGN '\u011d' # 0xF8 -> LATIN SMALL LETTER G WITH CIRCUMFLEX '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS '\u016d' # 0xFD -> LATIN SMALL LETTER U WITH BREVE '\u015d' # 0xFE -> LATIN SMALL LETTER S WITH CIRCUMFLEX '\u02d9' # 0xFF -> DOT ABOVE ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
tianyi33/simple_blog
refs/heads/master
django/conf/locale/ar/formats.py
234
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j F، Y' TIME_FORMAT = 'g:i:s A' # DATETIME_FORMAT = YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'd‏/m‏/Y' # SHORT_DATETIME_FORMAT = # FIRST_DAY_OF_WEEK = # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior # DATE_INPUT_FORMATS = # TIME_INPUT_FORMATS = # DATETIME_INPUT_FORMATS = DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' # NUMBER_GROUPING =
mano3m/CouchPotatoServer
refs/heads/develop_mano3m
couchpotato/core/helpers/variable.py
2
from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss from couchpotato.core.logger import CPLog import hashlib import os.path import platform import random import re import string import sys log = CPLog(__name__) def link(src, dst): if os.name == 'nt': import ctypes if ctypes.windll.kernel32.CreateHardLinkW(unicode(dst), unicode(src), 0) == 0: raise ctypes.WinError() else: os.link(src, dst) def symlink(src, dst): if os.name == 'nt': import ctypes if ctypes.windll.kernel32.CreateSymbolicLinkW(unicode(dst), unicode(src), 1 if os.path.isdir(src) else 0) in [0, 1280]: raise ctypes.WinError() else: os.symlink(src, dst) def getUserDir(): try: import pwd os.environ['HOME'] = pwd.getpwuid(os.geteuid()).pw_dir except: pass return os.path.expanduser('~') def getDownloadDir(): user_dir = getUserDir() # OSX if 'darwin' in platform.platform().lower(): return os.path.join(user_dir, 'Downloads') if os.name == 'nt': return os.path.join(user_dir, 'Downloads') return user_dir def getDataDir(): # Windows if os.name == 'nt': return os.path.join(os.environ['APPDATA'], 'CouchPotato') user_dir = getUserDir() # OSX if 'darwin' in platform.platform().lower(): return os.path.join(user_dir, 'Library', 'Application Support', 'CouchPotato') # FreeBSD if 'freebsd' in sys.platform: return os.path.join('/usr/local/', 'couchpotato', 'data') # Linux return os.path.join(user_dir, '.couchpotato') def isDict(object): return isinstance(object, dict) def mergeDicts(a, b, prepend_list = False): assert isDict(a), isDict(b) dst = a.copy() stack = [(dst, b)] while stack: current_dst, current_src = stack.pop() for key in current_src: if key not in current_dst: current_dst[key] = current_src[key] else: if isDict(current_src[key]) and isDict(current_dst[key]): stack.append((current_dst[key], current_src[key])) elif isinstance(current_src[key], list) and isinstance(current_dst[key], list): current_dst[key] = current_src[key] + current_dst[key] if prepend_list else current_dst[key] + current_src[key] current_dst[key] = removeListDuplicates(current_dst[key]) else: current_dst[key] = current_src[key] return dst def removeListDuplicates(seq): checked = [] for e in seq: if e not in checked: checked.append(e) return checked def flattenList(l): if isinstance(l, list): return sum(map(flattenList, l)) else: return l def md5(text): return hashlib.md5(ss(text)).hexdigest() def sha1(text): return hashlib.sha1(text).hexdigest() def isLocalIP(ip): ip = ip.lstrip('htps:/') regex = '/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1)$/' return re.search(regex, ip) is not None or 'localhost' in ip or ip[:4] == '127.' def getExt(filename): return os.path.splitext(filename)[1][1:] def cleanHost(host): if not host.startswith(('http://', 'https://')): host = 'http://' + host host = host.rstrip('/') host += '/' return host def getImdb(txt, check_inside = False, multiple = False): if not check_inside: txt = simplifyString(txt) else: txt = ss(txt) if check_inside and os.path.isfile(txt): output = open(txt, 'r') txt = output.read() output.close() try: ids = re.findall('(tt\d{7})', txt) if multiple: return list(set(ids)) if len(ids) > 0 else [] return ids[0] except IndexError: pass return False def tryInt(s): try: return int(s) except: return 0 def tryFloat(s): try: if isinstance(s, str): return float(s) if '.' in s else tryInt(s) else: return float(s) except: return 0 def natsortKey(s): return map(tryInt, re.findall(r'(\d+|\D+)', s)) def natcmp(a, b): return cmp(natsortKey(a), natsortKey(b)) def getTitle(library_dict): try: try: return library_dict['titles'][0]['title'] except: try: for title in library_dict.titles: if title.default: return title.title except: try: return library_dict['info']['titles'][0] except: log.error('Could not get title for %s', library_dict.identifier) return None log.error('Could not get title for %s', library_dict['identifier']) return None except: log.error('Could not get title for library item: %s', library_dict) return None def possibleTitles(raw_title): titles = [ toSafeString(raw_title).lower(), raw_title.lower(), simplifyString(raw_title) ] # replace some chars new_title = raw_title.replace('&', 'and') titles.append(simplifyString(new_title)) return list(set(titles)) def randomString(size = 8, chars = string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for x in range(size)) def splitString(str, split_on = ',', clean = True): list = [x.strip() for x in str.split(split_on)] if str else [] return filter(None, list) if clean else list