repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
mysociety/fixmytransport
refs/heads/master
script/domain-analysis/analyse-domains.py
1
#!/usr/bin/python # This script finds the first problem report from each unique email # address, and classifies it according to whether it was confirmed or # unconfirmed. It outputs a CSV file that groups these results by # domain, tell you how many confirmed emails, bounced emails and # neither-confirmed-nor-bounced emails there were per domain. import psycopg2 import datetime from collections import namedtuple, defaultdict import operator import re import csv connection = psycopg2.connect("dbname=fmt user=fmt") c = connection.cursor() start_date = datetime.date(2012, 4, 10) end_date = datetime.date(2012, 7, 1) c.execute('''SELECT email, status_code FROM users, problems WHERE users.id = problems.reporter_id AND problems.created_at >= %s AND problems.created_at < %s ORDER BY problems.created_at''', (start_date, end_date)) reports_domains = defaultdict(int) unconfirmed_domains = defaultdict(int) bounced_domains = defaultdict(int) already_seen_addresses = set() group_domains = {'hotmail.com': 'hotmail', 'hotmail.co.uk': 'hotmail', 'live.co.uk': 'hotmail', 'live.com': 'hotmail', 'live.com.au': 'hotmail', 'googlemail.com': 'gmail', 'gmail.com': 'gmail', 'yahoo.com': 'yahoo', 'yahoo.co.uk': 'yahoo', 'btinternet.com': 'yahoo'} def get_domain(email_address): domain = re.sub(r'^.*@', '', email_address) return group_domains.get(domain, domain) # This file was was generated with find-bounces.py > bounced-addresses bounce_filename = ('/home/fixmytransport/' + 'bounced-addresses-%s-to-%s') % (start_date, end_date) with open(bounce_filename) as fp: for line in fp: email_address = line.strip().lower() if email_address in already_seen_addresses: continue else: already_seen_addresses.add(email_address) domain = get_domain(email_address) bounced_domains[domain] += 1 already_seen_addresses = set() for row in c: email_address = row[0].lower() if email_address in already_seen_addresses: continue else: already_seen_addresses.add(email_address) status_code = row[1] domain = get_domain(email_address) reports_domains[domain] += 1 if status_code == 0: unconfirmed_domains[domain] += 1 all_domains = set(reports_domains.keys() + unconfirmed_domains.keys() + bounced_domains.keys()) with open('domain-distribution.csv', 'wb') as fp: writer = csv.writer(fp) writer.writerow(['Domain', 'Fine', 'UnconfirmedNotBounced', 'Bounced']) for domain in all_domains: problems = reports_domains.get(domain, 0) unconfirmed = unconfirmed_domains.get(domain, 0) bounced = bounced_domains.get(domain, 0) fine = problems - unconfirmed unconfirmed_not_bounced = unconfirmed - bounced writer.writerow([domain, fine, unconfirmed_not_bounced, bounced])
fzheng/codejam
refs/heads/master
lib/python2.7/site-packages/ipyparallel/controller/dictdb.py
1
"""A Task logger that presents our DB interface, but exists entirely in memory and implemented with dicts. TaskRecords are dicts of the form:: { 'msg_id' : str(uuid), 'client_uuid' : str(uuid), 'engine_uuid' : str(uuid) or None, 'header' : dict(header), 'content': dict(content), 'buffers': list(buffers), 'submitted': datetime or None, 'started': datetime or None, 'completed': datetime or None, 'received': datetime or None, 'resubmitted': str(uuid) or None, 'result_header' : dict(header) or None, 'result_content' : dict(content) or None, 'result_buffers' : list(buffers) or None, } With this info, many of the special categories of tasks can be defined by query, e.g.: * pending: completed is None * client's outstanding: client_uuid = uuid && completed is None * MIA: arrived is None (and completed is None) DictDB supports a subset of mongodb operators:: $lt,$gt,$lte,$gte,$ne,$in,$nin,$all,$mod,$exists """ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import copy from copy import deepcopy # Python can't copy memoryviews, but creating another memoryview works for us copy._deepcopy_dispatch[memoryview] = lambda x, memo: memoryview(x) from datetime import datetime from traitlets.config.configurable import LoggingConfigurable from ipython_genutils.py3compat import iteritems, itervalues from traitlets import Dict, Unicode, Integer, Float filters = { '$lt' : lambda a,b: a < b, '$gt' : lambda a,b: b > a, '$eq' : lambda a,b: a == b, '$ne' : lambda a,b: a != b, '$lte': lambda a,b: a <= b, '$gte': lambda a,b: a >= b, '$in' : lambda a,b: a in b, '$nin': lambda a,b: a not in b, '$all': lambda a,b: all([ a in bb for bb in b ]), '$mod': lambda a,b: a%b[0] == b[1], '$exists' : lambda a,b: (b and a is not None) or (a is None and not b) } class CompositeFilter(object): """Composite filter for matching multiple properties.""" def __init__(self, dikt): self.tests = [] self.values = [] for key, value in iteritems(dikt): self.tests.append(filters[key]) self.values.append(value) def __call__(self, value): for test,check in zip(self.tests, self.values): if not test(value, check): return False return True class BaseDB(LoggingConfigurable): """Empty Parent class so traitlets work on DB.""" # base configurable traits: session = Unicode("") class DictDB(BaseDB): """Basic in-memory dict-based object for saving Task Records. This is the first object to present the DB interface for logging tasks out of memory. The interface is based on MongoDB, so adding a MongoDB backend should be straightforward. """ _records = Dict() _culled_ids = set() # set of ids which have been culled _buffer_bytes = Integer(0) # running total of the bytes in the DB size_limit = Integer(1024**3, config=True, help="""The maximum total size (in bytes) of the buffers stored in the db When the db exceeds this size, the oldest records will be culled until the total size is under size_limit * (1-cull_fraction). default: 1 GB """ ) record_limit = Integer(1024, config=True, help="""The maximum number of records in the db When the history exceeds this size, the first record_limit * cull_fraction records will be culled. """ ) cull_fraction = Float(0.1, config=True, help="""The fraction by which the db should culled when one of the limits is exceeded In general, the db size will spend most of its time with a size in the range: [limit * (1-cull_fraction), limit] for each of size_limit and record_limit. """ ) def _match_one(self, rec, tests): """Check if a specific record matches tests.""" for key,test in iteritems(tests): if not test(rec.get(key, None)): return False return True def _match(self, check): """Find all the matches for a check dict.""" matches = [] tests = {} for k,v in iteritems(check): if isinstance(v, dict): tests[k] = CompositeFilter(v) else: tests[k] = lambda o: o==v for rec in itervalues(self._records): if self._match_one(rec, tests): matches.append(deepcopy(rec)) return matches def _extract_subdict(self, rec, keys): """extract subdict of keys""" d = {} d['msg_id'] = rec['msg_id'] for key in keys: d[key] = rec[key] return deepcopy(d) # methods for monitoring size / culling history def _add_bytes(self, rec): for key in ('buffers', 'result_buffers'): for buf in rec.get(key) or []: self._buffer_bytes += len(buf) self._maybe_cull() def _drop_bytes(self, rec): for key in ('buffers', 'result_buffers'): for buf in rec.get(key) or []: self._buffer_bytes -= len(buf) def _cull_oldest(self, n=1): """cull the oldest N records""" for msg_id in self.get_history()[:n]: self.log.debug("Culling record: %r", msg_id) self._culled_ids.add(msg_id) self.drop_record(msg_id) def _maybe_cull(self): # cull by count: if len(self._records) > self.record_limit: to_cull = int(self.cull_fraction * self.record_limit) self.log.info("%i records exceeds limit of %i, culling oldest %i", len(self._records), self.record_limit, to_cull ) self._cull_oldest(to_cull) # cull by size: if self._buffer_bytes > self.size_limit: limit = self.size_limit * (1 - self.cull_fraction) before = self._buffer_bytes before_count = len(self._records) culled = 0 while self._buffer_bytes > limit: self._cull_oldest(1) culled += 1 self.log.info("%i records with total buffer size %i exceeds limit: %i. Culled oldest %i records.", before_count, before, self.size_limit, culled ) def _check_dates(self, rec): for key in ('submitted', 'started', 'completed'): value = rec.get(key, None) if value is not None and not isinstance(value, datetime): raise ValueError("%s must be None or datetime, not %r" % (key, value)) # public API methods: def add_record(self, msg_id, rec): """Add a new Task Record, by msg_id.""" if msg_id in self._records: raise KeyError("Already have msg_id %r"%(msg_id)) self._check_dates(rec) self._records[msg_id] = rec self._add_bytes(rec) self._maybe_cull() def get_record(self, msg_id): """Get a specific Task Record, by msg_id.""" if msg_id in self._culled_ids: raise KeyError("Record %r has been culled for size" % msg_id) if not msg_id in self._records: raise KeyError("No such msg_id %r"%(msg_id)) return deepcopy(self._records[msg_id]) def update_record(self, msg_id, rec): """Update the data in an existing record.""" if msg_id in self._culled_ids: raise KeyError("Record %r has been culled for size" % msg_id) self._check_dates(rec) _rec = self._records[msg_id] self._drop_bytes(_rec) _rec.update(rec) self._add_bytes(_rec) def drop_matching_records(self, check): """Remove a record from the DB.""" matches = self._match(check) for rec in matches: self._drop_bytes(rec) del self._records[rec['msg_id']] def drop_record(self, msg_id): """Remove a record from the DB.""" rec = self._records[msg_id] self._drop_bytes(rec) del self._records[msg_id] def find_records(self, check, keys=None): """Find records matching a query dict, optionally extracting subset of keys. Returns dict keyed by msg_id of matching records. Parameters ---------- check: dict mongodb-style query argument keys: list of strs [optional] if specified, the subset of keys to extract. msg_id will *always* be included. """ matches = self._match(check) if keys: return [ self._extract_subdict(rec, keys) for rec in matches ] else: return matches def get_history(self): """get all msg_ids, ordered by time submitted.""" msg_ids = self._records.keys() # Remove any that do not have a submitted timestamp. # This is extremely unlikely to happen, # but it seems to come up in some tests on VMs. msg_ids = [ m for m in msg_ids if self._records[m]['submitted'] is not None ] return sorted(msg_ids, key=lambda m: self._records[m]['submitted']) class NoData(KeyError): """Special KeyError to raise when requesting data from NoDB""" def __str__(self): return "NoDB backend doesn't store any data. " "Start the Controller with a DB backend to enable resubmission / result persistence." class NoDB(BaseDB): """A blackhole db backend that actually stores no information. Provides the full DB interface, but raises KeyErrors on any method that tries to access the records. This can be used to minimize the memory footprint of the Hub when its record-keeping functionality is not required. """ def add_record(self, msg_id, record): pass def get_record(self, msg_id): raise NoData() def update_record(self, msg_id, record): pass def drop_matching_records(self, check): pass def drop_record(self, msg_id): pass def find_records(self, check, keys=None): raise NoData() def get_history(self): raise NoData()
anandsimmy/ecommerce
refs/heads/master
src/oscar/apps/dashboard/users/app.py
10
from django.conf.urls import url from oscar.core.application import DashboardApplication from oscar.core.loading import get_class class UserManagementApplication(DashboardApplication): name = None default_permissions = ['is_staff', ] index_view = get_class('dashboard.users.views', 'IndexView') user_detail_view = get_class('dashboard.users.views', 'UserDetailView') password_reset_view = get_class('dashboard.users.views', 'PasswordResetView') alert_list_view = get_class('dashboard.users.views', 'ProductAlertListView') alert_update_view = get_class('dashboard.users.views', 'ProductAlertUpdateView') alert_delete_view = get_class('dashboard.users.views', 'ProductAlertDeleteView') def get_urls(self): urls = [ url(r'^$', self.index_view.as_view(), name='users-index'), url(r'^(?P<pk>-?\d+)/$', self.user_detail_view.as_view(), name='user-detail'), url(r'^(?P<pk>-?\d+)/password-reset/$', self.password_reset_view.as_view(), name='user-password-reset'), # Alerts url(r'^alerts/$', self.alert_list_view.as_view(), name='user-alert-list'), url(r'^alerts/(?P<pk>-?\d+)/delete/$', self.alert_delete_view.as_view(), name='user-alert-delete'), url(r'^alerts/(?P<pk>-?\d+)/update/$', self.alert_update_view.as_view(), name='user-alert-update'), ] return self.post_process_urls(urls) application = UserManagementApplication()
justathoughtor2/atomicApe
refs/heads/encaged
cygwin/lib/python2.7/site-packages/docutils/readers/pep.py
136
# $Id: pep.py 7320 2012-01-19 22:33:02Z milde $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. """ Python Enhancement Proposal (PEP) Reader. """ __docformat__ = 'reStructuredText' from docutils.readers import standalone from docutils.transforms import peps, references, misc, frontmatter from docutils.parsers import rst class Reader(standalone.Reader): supported = ('pep',) """Contexts this reader supports.""" settings_spec = ( 'PEP Reader Option Defaults', 'The --pep-references and --rfc-references options (for the ' 'reStructuredText parser) are on by default.', ()) config_section = 'pep reader' config_section_dependencies = ('readers', 'standalone reader') def get_transforms(self): transforms = standalone.Reader.get_transforms(self) # We have PEP-specific frontmatter handling. transforms.remove(frontmatter.DocTitle) transforms.remove(frontmatter.SectionSubTitle) transforms.remove(frontmatter.DocInfo) transforms.extend([peps.Headers, peps.Contents, peps.TargetNotes]) return transforms settings_default_overrides = {'pep_references': 1, 'rfc_references': 1} inliner_class = rst.states.Inliner def __init__(self, parser=None, parser_name=None): """`parser` should be ``None``.""" if parser is None: parser = rst.Parser(rfc2822=True, inliner=self.inliner_class()) standalone.Reader.__init__(self, parser, '')
xuxiaoxin/micropython
refs/heads/master
tests/basics/seq_unpack.py
111
# Basics a, b = 1, 2 print(a, b) a, b = (1, 2) print(a, b) (a, b) = 1, 2 print(a, b) (a, b) = (1, 2) print(a, b) # Tuples/lists are optimized a, b = [1, 2] print(a, b) [a, b] = 100, 200 print(a, b) # optimised 3-way swap a = 1 b = 2 c = 3 a, b, c = b, c, a print(a, b, c) try: a, b, c = (1, 2) except ValueError: print("ValueError") try: a, b, c = [1, 2, 3, 4] except ValueError: print("ValueError") # Generic iterable object a, b, c = range(3) print(a, b, c) try: a, b, c = range(2) except ValueError: print("ValueError") try: a, b, c = range(4) except ValueError: print("ValueError")
gauravbose/digital-menu
refs/heads/master
digimenu2/django/utils/functional.py
36
import copy import operator import sys import warnings from functools import wraps from django.utils import six from django.utils.deprecation import RemovedInDjango19Warning from django.utils.six.moves import copyreg # You can't trivially replace this with `functools.partial` because this binds # to classes and returns bound instances, whereas functools.partial (on # CPython) is a type and its instances don't bind. def curry(_curried_func, *args, **kwargs): def _curried(*moreargs, **morekwargs): return _curried_func(*(args + moreargs), **dict(kwargs, **morekwargs)) return _curried def memoize(func, cache, num_args): """ Wrap a function so that results for any argument tuple are stored in 'cache'. Note that the args to the function must be usable as dictionary keys. Only the first num_args are considered when creating the key. """ warnings.warn("memoize wrapper is deprecated and will be removed in " "Django 1.9. Use django.utils.lru_cache instead.", RemovedInDjango19Warning, stacklevel=2) @wraps(func) def wrapper(*args): mem_args = args[:num_args] if mem_args in cache: return cache[mem_args] result = func(*args) cache[mem_args] = result return result return wrapper class cached_property(object): """ Decorator that converts a method with a single self argument into a property cached on the instance. Optional ``name`` argument allows you to make cached properties of other methods. (e.g. url = cached_property(get_absolute_url, name='url') ) """ def __init__(self, func, name=None): self.func = func self.__doc__ = getattr(func, '__doc__') self.name = name or func.__name__ def __get__(self, instance, type=None): if instance is None: return self res = instance.__dict__[self.name] = self.func(instance) return res class Promise(object): """ This is just a base class for the proxy class created in the closure of the lazy function. It can be used to recognize promises in code. """ pass def lazy(func, *resultclasses): """ Turns any callable into a lazy evaluated callable. You need to give result classes or types -- at least one is needed so that the automatic forcing of the lazy evaluation code is triggered. Results are not memoized; the function is evaluated on every access. """ @total_ordering class __proxy__(Promise): """ Encapsulate a function call and act as a proxy for methods that are called on the result of that function. The function is not evaluated until one of the methods on the result is called. """ __prepared = False def __init__(self, args, kw): self.__args = args self.__kw = kw if not self.__prepared: self.__prepare_class__() self.__prepared = True def __reduce__(self): return ( _lazy_proxy_unpickle, (func, self.__args, self.__kw) + resultclasses ) @classmethod def __prepare_class__(cls): for resultclass in resultclasses: for type_ in resultclass.mro(): for method_name in type_.__dict__.keys(): # All __promise__ return the same wrapper method, they # look up the correct implementation when called. if hasattr(cls, method_name): continue meth = cls.__promise__(method_name) setattr(cls, method_name, meth) cls._delegate_bytes = bytes in resultclasses cls._delegate_text = six.text_type in resultclasses assert not (cls._delegate_bytes and cls._delegate_text), ( "Cannot call lazy() with both bytes and text return types.") if cls._delegate_text: if six.PY3: cls.__str__ = cls.__text_cast else: cls.__unicode__ = cls.__text_cast cls.__str__ = cls.__bytes_cast_encoded elif cls._delegate_bytes: if six.PY3: cls.__bytes__ = cls.__bytes_cast else: cls.__str__ = cls.__bytes_cast @classmethod def __promise__(cls, method_name): # Builds a wrapper around some magic method def __wrapper__(self, *args, **kw): # Automatically triggers the evaluation of a lazy value and # applies the given magic method of the result type. res = func(*self.__args, **self.__kw) return getattr(res, method_name)(*args, **kw) return __wrapper__ def __text_cast(self): return func(*self.__args, **self.__kw) def __bytes_cast(self): return bytes(func(*self.__args, **self.__kw)) def __bytes_cast_encoded(self): return func(*self.__args, **self.__kw).encode('utf-8') def __cast(self): if self._delegate_bytes: return self.__bytes_cast() elif self._delegate_text: return self.__text_cast() else: return func(*self.__args, **self.__kw) def __ne__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() != other def __eq__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() == other def __lt__(self, other): if isinstance(other, Promise): other = other.__cast() return self.__cast() < other def __hash__(self): return hash(self.__cast()) def __mod__(self, rhs): if self._delegate_bytes and six.PY2: return bytes(self) % rhs elif self._delegate_text: return six.text_type(self) % rhs return self.__cast() % rhs def __deepcopy__(self, memo): # Instances of this class are effectively immutable. It's just a # collection of functions. So we don't need to do anything # complicated for copying. memo[id(self)] = self return self @wraps(func) def __wrapper__(*args, **kw): # Creates the proxy object, instead of the actual value. return __proxy__(args, kw) return __wrapper__ def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses): return lazy(func, *resultclasses)(*args, **kwargs) def allow_lazy(func, *resultclasses): """ A decorator that allows a function to be called with one or more lazy arguments. If none of the args are lazy, the function is evaluated immediately, otherwise a __proxy__ is returned that will evaluate the function when needed. """ lazy_func = lazy(func, *resultclasses) @wraps(func) def wrapper(*args, **kwargs): for arg in list(args) + list(six.itervalues(kwargs)): if isinstance(arg, Promise): break else: return func(*args, **kwargs) return lazy_func(*args, **kwargs) return wrapper empty = object() def new_method_proxy(func): def inner(self, *args): if self._wrapped is empty: self._setup() return func(self._wrapped, *args) return inner class LazyObject(object): """ A wrapper for another class that can be used to delay instantiation of the wrapped class. By subclassing, you have the opportunity to intercept and alter the instantiation. If you don't need to do that, use SimpleLazyObject. """ # Avoid infinite recursion when tracing __init__ (#19456). _wrapped = None def __init__(self): self._wrapped = empty __getattr__ = new_method_proxy(getattr) def __setattr__(self, name, value): if name == "_wrapped": # Assign to __dict__ to avoid infinite __setattr__ loops. self.__dict__["_wrapped"] = value else: if self._wrapped is empty: self._setup() setattr(self._wrapped, name, value) def __delattr__(self, name): if name == "_wrapped": raise TypeError("can't delete _wrapped.") if self._wrapped is empty: self._setup() delattr(self._wrapped, name) def _setup(self): """ Must be implemented by subclasses to initialize the wrapped object. """ raise NotImplementedError('subclasses of LazyObject must provide a _setup() method') # Because we have messed with __class__ below, we confuse pickle as to what # class we are pickling. It also appears to stop __reduce__ from being # called. So, we define __getstate__ in a way that cooperates with the way # that pickle interprets this class. This fails when the wrapped class is # a builtin, but it is better than nothing. def __getstate__(self): if self._wrapped is empty: self._setup() return self._wrapped.__dict__ # Python 3.3 will call __reduce__ when pickling; this method is needed # to serialize and deserialize correctly. @classmethod def __newobj__(cls, *args): return cls.__new__(cls, *args) def __reduce_ex__(self, proto): if proto >= 2: # On Py3, since the default protocol is 3, pickle uses the # ``__newobj__`` method (& more efficient opcodes) for writing. return (self.__newobj__, (self.__class__,), self.__getstate__()) else: # On Py2, the default protocol is 0 (for back-compat) & the above # code fails miserably (see regression test). Instead, we return # exactly what's returned if there's no ``__reduce__`` method at # all. return (copyreg._reconstructor, (self.__class__, object, None), self.__getstate__()) def __deepcopy__(self, memo): if self._wrapped is empty: # We have to use type(self), not self.__class__, because the # latter is proxied. result = type(self)() memo[id(self)] = result return result return copy.deepcopy(self._wrapped, memo) if six.PY3: __bytes__ = new_method_proxy(bytes) __str__ = new_method_proxy(str) __bool__ = new_method_proxy(bool) else: __str__ = new_method_proxy(str) __unicode__ = new_method_proxy(unicode) __nonzero__ = new_method_proxy(bool) # Introspection support __dir__ = new_method_proxy(dir) # Need to pretend to be the wrapped class, for the sake of objects that # care about this (especially in equality tests) __class__ = property(new_method_proxy(operator.attrgetter("__class__"))) __eq__ = new_method_proxy(operator.eq) __ne__ = new_method_proxy(operator.ne) __hash__ = new_method_proxy(hash) # Dictionary methods support __getitem__ = new_method_proxy(operator.getitem) __setitem__ = new_method_proxy(operator.setitem) __delitem__ = new_method_proxy(operator.delitem) __len__ = new_method_proxy(len) __contains__ = new_method_proxy(operator.contains) # Workaround for http://bugs.python.org/issue12370 _super = super class SimpleLazyObject(LazyObject): """ A lazy object initialized from any function. Designed for compound objects of unknown type. For builtins or objects of known type, use django.utils.functional.lazy. """ def __init__(self, func): """ Pass in a callable that returns the object to be wrapped. If copies are made of the resulting SimpleLazyObject, which can happen in various circumstances within Django, then you must ensure that the callable can be safely run more than once and will return the same value. """ self.__dict__['_setupfunc'] = func _super(SimpleLazyObject, self).__init__() def _setup(self): self._wrapped = self._setupfunc() # Return a meaningful representation of the lazy object for debugging # without evaluating the wrapped object. def __repr__(self): if self._wrapped is empty: repr_attr = self._setupfunc else: repr_attr = self._wrapped return '<%s: %r>' % (type(self).__name__, repr_attr) def __deepcopy__(self, memo): if self._wrapped is empty: # We have to use SimpleLazyObject, not self.__class__, because the # latter is proxied. result = SimpleLazyObject(self._setupfunc) memo[id(self)] = result return result return copy.deepcopy(self._wrapped, memo) class lazy_property(property): """ A property that works with subclasses by wrapping the decorated functions of the base class. """ def __new__(cls, fget=None, fset=None, fdel=None, doc=None): if fget is not None: @wraps(fget) def fget(instance, instance_type=None, name=fget.__name__): return getattr(instance, name)() if fset is not None: @wraps(fset) def fset(instance, value, name=fset.__name__): return getattr(instance, name)(value) if fdel is not None: @wraps(fdel) def fdel(instance, name=fdel.__name__): return getattr(instance, name)() return property(fget, fset, fdel, doc) def partition(predicate, values): """ Splits the values into two sets, based on the return value of the function (True/False). e.g.: >>> partition(lambda x: x > 3, range(5)) [0, 1, 2, 3], [4] """ results = ([], []) for item in values: results[predicate(item)].append(item) return results if sys.version_info >= (2, 7, 2): from functools import total_ordering else: # For Python < 2.7.2. total_ordering in versions prior to 2.7.2 is buggy. # See http://bugs.python.org/issue10042 for details. For these versions use # code borrowed from Python 2.7.3. def total_ordering(cls): """Class decorator that fills in missing ordering methods""" convert = { '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)), ('__le__', lambda self, other: self < other or self == other), ('__ge__', lambda self, other: not self < other)], '__le__': [('__ge__', lambda self, other: not self <= other or self == other), ('__lt__', lambda self, other: self <= other and not self == other), ('__gt__', lambda self, other: not self <= other)], '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)), ('__ge__', lambda self, other: self > other or self == other), ('__le__', lambda self, other: not self > other)], '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other), ('__gt__', lambda self, other: self >= other and not self == other), ('__lt__', lambda self, other: not self >= other)] } roots = set(dir(cls)) & set(convert) if not roots: raise ValueError('must define at least one ordering operation: < > <= >=') root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ for opname, opfunc in convert[root]: if opname not in roots: opfunc.__name__ = opname opfunc.__doc__ = getattr(int, opname).__doc__ setattr(cls, opname, opfunc) return cls
Bysmyyr/chromium-crosswalk
refs/heads/master
tools/perf/metrics/cpu_unittest.py
69
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from metrics import cpu # Testing private method. # pylint: disable=W0212 class CpuMetricTest(unittest.TestCase): def testSubtractCpuStats(self): # The result computed is a ratio of cpu time used to time elapsed. start = {'Browser': {'CpuProcessTime': 0, 'TotalTime': 0}} end = {'Browser': {'CpuProcessTime': 5, 'TotalTime': 20}} self.assertEqual({'Browser': 0.25}, cpu._SubtractCpuStats(end, start)) # An error is thrown if the args are called in the wrong order. self.assertRaises(AssertionError, cpu._SubtractCpuStats, start, end) # An error is thrown if there's a process type in end that's not in start. end['Renderer'] = {'CpuProcessTime': 2, 'TotalTime': 20} self.assertRaises(AssertionError, cpu._SubtractCpuStats, end, start) # A process type will be ignored if there's an empty dict for start or end. start['Renderer'] = {} self.assertEqual({'Browser': 0.25}, cpu._SubtractCpuStats(end, start)) # Results for multiple process types can be computed. start['Renderer'] = {'CpuProcessTime': 0, 'TotalTime': 0} self.assertEqual({'Browser': 0.25, 'Renderer': 0.1}, cpu._SubtractCpuStats(end, start)) # Test 32-bit overflow. start = {'Browser': {'CpuProcessTime': 0, 'TotalTime': 2**32 - 20}} end = {'Browser': {'CpuProcessTime': 5, 'TotalTime': 20}} self.assertEqual({'Browser': 0.125}, cpu._SubtractCpuStats(end, start)) self.assertRaises(AssertionError, cpu._SubtractCpuStats, start, end)
mgaleae/android_kernel_samsung_smdk4412_utouch
refs/heads/master
tools/perf/scripts/python/sctop.py
11180
# system call top # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Periodically displays system-wide system call totals, broken down by # syscall. If a [comm] arg is specified, only syscalls called by # [comm] are displayed. If an [interval] arg is specified, the display # will be refreshed every [interval] seconds. The default interval is # 3 seconds. import os, sys, thread, time sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s sctop.py [comm] [interval]\n"; for_comm = None default_interval = 3 interval = default_interval if len(sys.argv) > 3: sys.exit(usage) if len(sys.argv) > 2: for_comm = sys.argv[1] interval = int(sys.argv[2]) elif len(sys.argv) > 1: try: interval = int(sys.argv[1]) except ValueError: for_comm = sys.argv[1] interval = default_interval syscalls = autodict() def trace_begin(): thread.start_new_thread(print_syscall_totals, (interval,)) pass def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(interval): while 1: clear_term() if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): try: print "%-40s %10d\n" % (syscall_name(id), val), except TypeError: pass syscalls.clear() time.sleep(interval)
disperse/ripping-yarns
refs/heads/master
module_system-1.166/process_dialogs.py
1
import string import types from module_info import * from module_triggers import * from module_dialogs import * from process_common import * from process_operations import * speaker_pos = 0 ipt_token_pos = 1 sentence_conditions_pos = 2 text_pos = 3 opt_token_pos = 4 sentence_consequences_pos = 5 sentence_voice_over_pos = 6 #------------------------------------------------------- def save_dialog_states(dialog_states): file = open(export_dir + "dialog_states.txt","w") for dialog_state in dialog_states: file.write("%s\n"%dialog_state) file.close() #def compile_variables(cookies_list): # for trigger in triggers: # for consequence in trigger[trigger_consequences_pos]: # compile_statement(consequence,cookies_list) # for sentence in sentences: # for consequence in sentence[sentence_consequences_pos]: # compile_statement(consequence,cookies_list) # for trigger in triggers: # for condition in trigger[trigger_conditions_pos]: # compile_statement(condition,cookies_list) # for sentence in sentences: # for condition in sentence[sentence_conditions_pos]: # compile_statement(condition,cookies_list) # return cookies_list def save_triggers(variable_list,variable_uses,triggers,tag_uses,quick_strings): file = open(export_dir + "triggers.txt","w") file.write("triggersfile version 1\n") file.write("%d\n"%len(triggers)) for i in xrange(len(triggers)): trigger = triggers[i] file.write("%f %f %f "%(trigger[trigger_check_pos],trigger[trigger_delay_pos],trigger[trigger_rearm_pos])) save_statement_block(file,0,1,trigger[trigger_conditions_pos] , variable_list, variable_uses,tag_uses,quick_strings) save_statement_block(file,0,1,trigger[trigger_consequences_pos], variable_list, variable_uses,tag_uses,quick_strings) # for condition in trigger[trigger_conditions_pos]: # save_operation(file,condition,variable_list) # file.write(" %d "%(len(trigger[trigger_consequences_pos]))) # for consequence in trigger[trigger_consequences_pos]: # save_operation(file,consequence,variable_list) file.write("\n") file.close() #================================================================= def compile_sentence_tokens(sentences): input_tokens = [] output_tokens = [] dialog_states = ["start","party_encounter","prisoner_liberated","enemy_defeated","party_relieved","event_triggered","close_window","trade","exchange_members", "trade_prisoners","buy_mercenaries","view_char","training","member_chat","prisoner_chat"] dialog_state_usages = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] for sentence in sentences: output_token_id = -1 output_token = sentence[opt_token_pos] found = 0 for i_t in xrange(len(dialog_states)): if output_token == dialog_states[i_t]: output_token_id = i_t found = 1 break if not found: dialog_states.append(output_token) dialog_state_usages.append(0) output_token_id = len(dialog_states) - 1 output_tokens.append(output_token_id) for sentence in sentences: input_token_id = -1 input_token = sentence[ipt_token_pos] found = 0 for i_t in xrange(len(dialog_states)): if input_token == dialog_states[i_t]: input_token_id = i_t dialog_state_usages[i_t] = dialog_state_usages[i_t] + 1 found = 1 break if not found: print sentence[ipt_token_pos] print sentence[text_pos] print sentence[opt_token_pos] print "**********************************************************************************" print "ERROR: INPUT TOKEN NOT FOUND:" + input_token print "**********************************************************************************" print "**********************************************************************************" input_tokens.append(input_token_id) save_dialog_states(dialog_states) for i_t in xrange(len(dialog_states)): if dialog_state_usages[i_t] == 0: print "ERROR: Output token not found: " + dialog_states[i_t] return (input_tokens, output_tokens) def create_auto_id(sentence,auto_ids): text = convert_to_identifier(sentence[text_pos]) done = 0 i = 20 lt = len(text) if (i > lt): i = lt auto_id = "dlga_" + text[0:i] done = 0 if auto_ids.has_key(auto_id) and (auto_ids[auto_id] == text): done = 1 while (i <= lt) and not done: auto_id = "dlga_" + text[0:i] if auto_ids.has_key(auto_id): if auto_ids[auto_id] == text: done = 1 else: i += 1 else: done = 1 auto_ids[auto_id] = text if not done: number = 1 new_auto_id = auto_id + str(number) while auto_ids.has_key(new_auto_id): number += 1 new_auto_id = auto_id + str(number) auto_id = new_auto_id auto_ids[auto_id] = text return auto_id def create_auto_id2(sentence,auto_ids): text = sentence[text_pos] token_ipt = convert_to_identifier(sentence[ipt_token_pos]) token_opt = convert_to_identifier(sentence[opt_token_pos]) done = 0 auto_id = "dlga_" + token_ipt + ":" + token_opt done = 0 if not auto_ids.has_key(auto_id): done = 1 else: if auto_ids.has_key(auto_id) and (auto_ids[auto_id] == text): done = 1 if not done: number = 1 new_auto_id = auto_id + "." + str(number) while auto_ids.has_key(new_auto_id): number += 1 new_auto_id = auto_id + "." + str(number) auto_id = new_auto_id auto_ids[auto_id] = text return auto_id def save_sentences(variable_list,variable_uses,sentences,tag_uses,quick_strings,input_states,output_states): file = open(export_dir + "conversation.txt","w") file.write("dialogsfile version 2\n") file.write("%d\n"%len(sentences)) # Create an empty dictionary auto_ids = {} for i in xrange(len(sentences)): sentence = sentences[i] try: dialog_id = create_auto_id2(sentence,auto_ids) file.write("%s %d %d "%(dialog_id,sentence[speaker_pos],input_states[i])) save_statement_block(file, 0, 1, sentence[sentence_conditions_pos], variable_list,variable_uses,tag_uses,quick_strings) file.write("%s "%(string.replace(sentence[text_pos]," ","_"))) if (len(sentence[text_pos]) == 0): file.write("NO_TEXT ") file.write(" %d "%(output_states[i])) save_statement_block(file, 0, 1, sentence[sentence_consequences_pos], variable_list,variable_uses,tag_uses,quick_strings) if (len(sentence) > sentence_voice_over_pos): file.write("%s "%sentence[sentence_voice_over_pos]) else: file.write("NO_VOICEOVER ") file.write("\n") except: print "Error in dialog line:" print sentence file.close() # Registered cookies is a list which enables the order of cookies to remain fixed across changes. # In order to remove cookies not used anymore, edit the cookies_registery.py and remove all entries. print "exporting triggers..." variable_uses = [] variables = load_variables(export_dir,variable_uses) tag_uses = load_tag_uses(export_dir) quick_strings = load_quick_strings(export_dir) #compile_variables(variables) save_triggers(variables,variable_uses,triggers,tag_uses,quick_strings) print "exporting dialogs..." (input_states,output_states) = compile_sentence_tokens(dialogs) save_sentences(variables,variable_uses,dialogs,tag_uses,quick_strings,input_states,output_states) save_variables(export_dir,variables,variable_uses) save_tag_uses(export_dir, tag_uses) save_quick_strings(export_dir,quick_strings) #print "finished."
GheRivero/ansible
refs/heads/devel
lib/ansible/plugins/action/ce.py
24
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys import copy from ansible import constants as C from ansible.module_utils._text import to_text from ansible.module_utils.connection import Connection from ansible.plugins.action.normal import ActionModule as _ActionModule from ansible.module_utils.network.cloudengine.ce import ce_provider_spec from ansible.module_utils.network.common.utils import load_provider try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class ActionModule(_ActionModule): def run(self, tmp=None, task_vars=None): del tmp # tmp no longer has any effect if self._play_context.connection != 'local': return dict( failed=True, msg='invalid connection specified, expected connection=local, ' 'got %s' % self._play_context.connection ) provider = load_provider(ce_provider_spec, self._task.args) transport = provider['transport'] or 'cli' display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr) if transport == 'cli': pc = copy.deepcopy(self._play_context) pc.connection = 'network_cli' pc.network_os = 'ce' pc.remote_addr = provider['host'] or self._play_context.remote_addr pc.port = int(provider['port'] or self._play_context.port or 22) pc.remote_user = provider['username'] or self._play_context.connection_user pc.password = provider['password'] or self._play_context.password pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) self._task.args['provider'] = provider.update( host=pc.remote_addr, port=pc.port, username=pc.remote_user, password=pc.password ) display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr) connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin) socket_path = connection.run() display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) if not socket_path: return {'failed': True, 'msg': 'unable to open shell. Please see: ' + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} # make sure we are in the right cli context which should be # enable mode and not config module conn = Connection(socket_path) out = conn.get_prompt() while to_text(out, errors='surrogate_then_replace').strip().endswith(']'): display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr) conn.send_command('exit') out = conn.get_prompt() task_vars['ansible_socket'] = socket_path # make sure a transport value is set in args self._task.args['transport'] = transport self._task.args['provider'] = provider result = super(ActionModule, self).run(task_vars=task_vars) return result
maxdeliso/elevatorSim
refs/heads/master
Lib/test/json_tests/test_indent.py
60
import textwrap from io import StringIO from test.json_tests import PyTest, CTest class TestIndent: def test_indent(self): h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth', {'nifty': 87}, {'field': 'yes', 'morefield': False} ] expect = textwrap.dedent("""\ [ \t[ \t\t"blorpie" \t], \t[ \t\t"whoops" \t], \t[], \t"d-shtaeou", \t"d-nthiouh", \t"i-vhbjkhnth", \t{ \t\t"nifty": 87 \t}, \t{ \t\t"field": "yes", \t\t"morefield": false \t} ]""") d1 = self.dumps(h) d2 = self.dumps(h, indent=2, sort_keys=True, separators=(',', ': ')) d3 = self.dumps(h, indent='\t', sort_keys=True, separators=(',', ': ')) h1 = self.loads(d1) h2 = self.loads(d2) h3 = self.loads(d3) self.assertEqual(h1, h) self.assertEqual(h2, h) self.assertEqual(h3, h) self.assertEqual(d2, expect.expandtabs(2)) self.assertEqual(d3, expect) def test_indent0(self): h = {3: 1} def check(indent, expected): d1 = self.dumps(h, indent=indent) self.assertEqual(d1, expected) sio = StringIO() self.json.dump(h, sio, indent=indent) self.assertEqual(sio.getvalue(), expected) # indent=0 should emit newlines check(0, '{\n"3": 1\n}') # indent=None is more compact check(None, '{"3": 1}') class TestPyIndent(TestIndent, PyTest): pass class TestCIndent(TestIndent, CTest): pass
hell-sing/hacker-rank
refs/heads/master
Python/Strings/String_Split_and_Join.py
2
def split_and_join(line): # write your code here line = line.split(" ") line = "-".join(line) return line
pombreda/sulley
refs/heads/master
unit_tests/primitives.py
6
from sulley import * def run (): signed_tests() string_tests() fuzz_extension_tests() # clear out the requests. blocks.REQUESTS = {} blocks.CURRENT = None ######################################################################################################################## def signed_tests (): s_initialize("UNIT TEST 1") s_byte(0, format="ascii", signed=True, name="byte_1") s_byte(0xff/2, format="ascii", signed=True, name="byte_2") s_byte(0xff/2+1, format="ascii", signed=True, name="byte_3") s_byte(0xff, format="ascii", signed=True, name="byte_4") s_word(0, format="ascii", signed=True, name="word_1") s_word(0xffff/2, format="ascii", signed=True, name="word_2") s_word(0xffff/2+1, format="ascii", signed=True, name="word_3") s_word(0xffff, format="ascii", signed=True, name="word_4") s_dword(0, format="ascii", signed=True, name="dword_1") s_dword(0xffffffff/2, format="ascii", signed=True, name="dword_2") s_dword(0xffffffff/2+1, format="ascii", signed=True, name="dword_3") s_dword(0xffffffff, format="ascii", signed=True, name="dword_4") s_qword(0, format="ascii", signed=True, name="qword_1") s_qword(0xffffffffffffffff/2, format="ascii", signed=True, name="qword_2") s_qword(0xffffffffffffffff/2+1, format="ascii", signed=True, name="qword_3") s_qword(0xffffffffffffffff, format="ascii", signed=True, name="qword_4") req = s_get("UNIT TEST 1") assert(req.names["byte_1"].render() == "0") assert(req.names["byte_2"].render() == "127") assert(req.names["byte_3"].render() == "-128") assert(req.names["byte_4"].render() == "-1") assert(req.names["word_1"].render() == "0") assert(req.names["word_2"].render() == "32767") assert(req.names["word_3"].render() == "-32768") assert(req.names["word_4"].render() == "-1") assert(req.names["dword_1"].render() == "0") assert(req.names["dword_2"].render() == "2147483647") assert(req.names["dword_3"].render() == "-2147483648") assert(req.names["dword_4"].render() == "-1") assert(req.names["qword_1"].render() == "0") assert(req.names["qword_2"].render() == "9223372036854775807") assert(req.names["qword_3"].render() == "-9223372036854775808") assert(req.names["qword_4"].render() == "-1") ######################################################################################################################## def string_tests (): s_initialize("STRING UNIT TEST 1") s_string("foo", size=200, name="sized_string") req = s_get("STRING UNIT TEST 1") assert(len(req.names["sized_string"].render()) == 3) # check that string padding and truncation are working correctly. for i in xrange(0, 50): s_mutate() assert(len(req.names["sized_string"].render()) == 200) ######################################################################################################################## def fuzz_extension_tests (): import shutil # backup existing fuzz extension libraries. try: shutil.move(".fuzz_strings", ".fuzz_strings_backup") shutil.move(".fuzz_ints", ".fuzz_ints_backup") except: pass # create extension libraries for unit test. fh = open(".fuzz_strings", "w+") fh.write("pedram\n") fh.write("amini\n") fh.close() fh = open(".fuzz_ints", "w+") fh.write("deadbeef\n") fh.write("0xc0cac01a\n") fh.close() s_initialize("EXTENSION TEST") s_string("foo", name="string") s_int(200, name="int") s_char("A", name="char") req = s_get("EXTENSION TEST") # these should be here now. assert(0xdeadbeef in req.names["int"].fuzz_library) assert(0xc0cac01a in req.names["int"].fuzz_library) # these should not as a char is too small to store them. assert(0xdeadbeef not in req.names["char"].fuzz_library) assert(0xc0cac01a not in req.names["char"].fuzz_library) # these should be here now. assert("pedram" in req.names["string"].fuzz_library) assert("amini" in req.names["string"].fuzz_library) # restore existing fuzz extension libraries. try: shutil.move(".fuzz_strings_backup", ".fuzz_strings") shutil.move(".fuzz_ints_backup", ".fuzz_ints") except: pass
danmcp/origin
refs/heads/master
vendor/k8s.io/kubernetes/hack/lookup_pull.py
194
#!/usr/bin/env python # Copyright 2015 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Script to print out PR info in release note format. import json import sys import urllib2 PULLQUERY=("https://api.github.com/repos/" "kubernetes/kubernetes/pulls/{pull}") LOGIN="login" TITLE="title" USER="user" def print_pulls(pulls): for pull in pulls: d = json.loads(urllib2.urlopen(PULLQUERY.format(pull=pull)).read()) print "* {title} #{pull} ({author})".format( title=d[TITLE], pull=pull, author=d[USER][LOGIN]) if __name__ == "__main__": if len(sys.argv) < 2: print ("Usage: {cmd} <pulls>...: Prints out short " + "markdown description for PRs appropriate for release notes.") sys.exit(1) print_pulls(sys.argv[1:])
benpatterson/edx-platform
refs/heads/master
cms/urls.py
2
from django.conf import settings from django.conf.urls import patterns, include, url # There is a course creators admin table. from ratelimitbackend import admin admin.autodiscover() # pylint: disable=bad-continuation # Pattern to match a course key or a library key COURSELIKE_KEY_PATTERN = r'(?P<course_key_string>({}|{}))'.format( r'[^/]+/[^/]+/[^/]+', r'[^/:]+:[^/+]+\+[^/+]+(\+[^/]+)?' ) # Pattern to match a library key only LIBRARY_KEY_PATTERN = r'(?P<library_key_string>library-v1:[^/+]+\+[^/+]+)' urlpatterns = patterns( '', url(r'^transcripts/upload$', 'contentstore.views.upload_transcripts', name='upload_transcripts'), url(r'^transcripts/download$', 'contentstore.views.download_transcripts', name='download_transcripts'), url(r'^transcripts/check$', 'contentstore.views.check_transcripts', name='check_transcripts'), url(r'^transcripts/choose$', 'contentstore.views.choose_transcripts', name='choose_transcripts'), url(r'^transcripts/replace$', 'contentstore.views.replace_transcripts', name='replace_transcripts'), url(r'^transcripts/rename$', 'contentstore.views.rename_transcripts', name='rename_transcripts'), url(r'^transcripts/save$', 'contentstore.views.save_transcripts', name='save_transcripts'), url(r'^preview/xblock/(?P<usage_key_string>.*?)/handler/(?P<handler>[^/]*)(?:/(?P<suffix>.*))?$', 'contentstore.views.preview_handler', name='preview_handler'), url(r'^xblock/(?P<usage_key_string>.*?)/handler/(?P<handler>[^/]*)(?:/(?P<suffix>.*))?$', 'contentstore.views.component_handler', name='component_handler'), url(r'^xblock/resource/(?P<block_type>[^/]*)/(?P<uri>.*)$', 'contentstore.views.xblock.xblock_resource', name='xblock_resource_url'), # temporary landing page for a course url(r'^edge/(?P<org>[^/]+)/(?P<course>[^/]+)/course/(?P<coursename>[^/]+)$', 'contentstore.views.landing', name='landing'), url(r'^not_found$', 'contentstore.views.not_found', name='not_found'), url(r'^server_error$', 'contentstore.views.server_error', name='server_error'), # temporary landing page for edge url(r'^edge$', 'contentstore.views.edge', name='edge'), # noop to squelch ajax errors url(r'^event$', 'contentstore.views.event', name='event'), url(r'^xmodule/', include('pipeline_js.urls')), url(r'^heartbeat$', include('heartbeat.urls')), url(r'^user_api/', include('openedx.core.djangoapps.user_api.legacy_urls')), ) # User creation and updating views urlpatterns += patterns( '', url(r'^create_account$', 'student.views.create_account', name='create_account'), url(r'^activate/(?P<key>[^/]*)$', 'student.views.activate_account', name='activate'), # ajax view that actually does the work url(r'^login_post$', 'student.views.login_user', name='login_post'), url(r'^logout$', 'student.views.logout_user', name='logout'), ) # restful api urlpatterns += patterns( 'contentstore.views', url(r'^$', 'howitworks', name='homepage'), url(r'^howitworks$', 'howitworks'), url(r'^signup$', 'signup', name='signup'), url(r'^signin$', 'login_page', name='login'), url(r'^request_course_creator$', 'request_course_creator'), url(r'^course_team/{}(?:/(?P<email>.+))?$'.format(COURSELIKE_KEY_PATTERN), 'course_team_handler'), url(r'^course_info/{}$'.format(settings.COURSE_KEY_PATTERN), 'course_info_handler'), url( r'^course_info_update/{}/(?P<provided_id>\d+)?$'.format(settings.COURSE_KEY_PATTERN), 'course_info_update_handler' ), url(r'^home/?$', 'course_listing', name='home'), url( r'^course/{}/search_reindex?$'.format(settings.COURSE_KEY_PATTERN), 'course_search_index_handler', name='course_search_index_handler' ), url(r'^course/{}?$'.format(settings.COURSE_KEY_PATTERN), 'course_handler', name='course_handler'), url(r'^course_notifications/{}/(?P<action_state_id>\d+)?$'.format(settings.COURSE_KEY_PATTERN), 'course_notifications_handler'), url(r'^course_rerun/{}$'.format(settings.COURSE_KEY_PATTERN), 'course_rerun_handler', name='course_rerun_handler'), url(r'^container/{}$'.format(settings.USAGE_KEY_PATTERN), 'container_handler'), url(r'^checklists/{}/(?P<checklist_index>\d+)?$'.format(settings.COURSE_KEY_PATTERN), 'checklists_handler'), url(r'^orphan/{}$'.format(settings.COURSE_KEY_PATTERN), 'orphan_handler'), url(r'^assets/{}/{}?$'.format(settings.COURSE_KEY_PATTERN, settings.ASSET_KEY_PATTERN), 'assets_handler'), url(r'^import/{}$'.format(COURSELIKE_KEY_PATTERN), 'import_handler'), url(r'^import_status/{}/(?P<filename>.+)$'.format(COURSELIKE_KEY_PATTERN), 'import_status_handler'), url(r'^export/{}$'.format(COURSELIKE_KEY_PATTERN), 'export_handler'), url(r'^xblock/outline/{}$'.format(settings.USAGE_KEY_PATTERN), 'xblock_outline_handler'), url(r'^xblock/container/{}$'.format(settings.USAGE_KEY_PATTERN), 'xblock_container_handler'), url(r'^xblock/{}/(?P<view_name>[^/]+)$'.format(settings.USAGE_KEY_PATTERN), 'xblock_view_handler'), url(r'^xblock/{}?$'.format(settings.USAGE_KEY_PATTERN), 'xblock_handler'), url(r'^tabs/{}$'.format(settings.COURSE_KEY_PATTERN), 'tabs_handler'), url(r'^settings/details/{}$'.format(settings.COURSE_KEY_PATTERN), 'settings_handler'), url(r'^settings/grading/{}(/)?(?P<grader_index>\d+)?$'.format(settings.COURSE_KEY_PATTERN), 'grading_handler'), url(r'^settings/advanced/{}$'.format(settings.COURSE_KEY_PATTERN), 'advanced_settings_handler'), url(r'^textbooks/{}$'.format(settings.COURSE_KEY_PATTERN), 'textbooks_list_handler'), url(r'^textbooks/{}/(?P<textbook_id>\d[^/]*)$'.format(settings.COURSE_KEY_PATTERN), 'textbooks_detail_handler'), url(r'^videos/{}$'.format(settings.COURSE_KEY_PATTERN), 'videos_handler'), url(r'^video_encodings_download/{}$'.format(settings.COURSE_KEY_PATTERN), 'video_encodings_download'), url(r'^group_configurations/{}$'.format(settings.COURSE_KEY_PATTERN), 'group_configurations_list_handler'), url(r'^group_configurations/{}/(?P<group_configuration_id>\d+)(/)?(?P<group_id>\d+)?$'.format( settings.COURSE_KEY_PATTERN), 'group_configurations_detail_handler'), url(r'^api/val/v0/', include('edxval.urls')), ) JS_INFO_DICT = { 'domain': 'djangojs', # We need to explicitly include external Django apps that are not in LOCALE_PATHS. 'packages': ('openassessment',), } urlpatterns += patterns( '', # Serve catalog of localized strings to be rendered by Javascript url(r'^i18n.js$', 'django.views.i18n.javascript_catalog', JS_INFO_DICT), ) if settings.FEATURES.get('ENABLE_CONTENT_LIBRARIES'): urlpatterns += ( url(r'^library/{}?$'.format(LIBRARY_KEY_PATTERN), 'contentstore.views.library_handler', name='library_handler'), url(r'^library/{}/team/$'.format(LIBRARY_KEY_PATTERN), 'contentstore.views.manage_library_users', name='manage_library_users'), ) if settings.FEATURES.get('ENABLE_EXPORT_GIT'): urlpatterns += (url( r'^export_git/{}$'.format( settings.COURSE_KEY_PATTERN, ), 'contentstore.views.export_git', name='export_git', ),) if settings.FEATURES.get('ENABLE_SERVICE_STATUS'): urlpatterns += patterns( '', url(r'^status/', include('service_status.urls')), ) if settings.FEATURES.get('AUTH_USE_CAS'): urlpatterns += ( url(r'^cas-auth/login/$', 'external_auth.views.cas_login', name="cas-login"), url(r'^cas-auth/logout/$', 'django_cas.views.logout', {'next_page': '/'}, name="cas-logout"), ) urlpatterns += patterns('', url(r'^admin/', include(admin.site.urls)),) # enable automatic login if settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING'): urlpatterns += ( url(r'^auto_auth$', 'student.views.auto_auth'), ) # enable entrance exams if settings.FEATURES.get('ENTRANCE_EXAMS'): urlpatterns += ( url(r'^course/{}/entrance_exam/?$'.format(settings.COURSE_KEY_PATTERN), 'contentstore.views.entrance_exam'), ) # Enable Web/HTML Certificates if settings.FEATURES.get('CERTIFICATES_HTML_VIEW'): urlpatterns += ( url(r'^certificates/activation/{}/'.format(settings.COURSE_KEY_PATTERN), 'contentstore.views.certificates.certificate_activation_handler'), url(r'^certificates/{}/(?P<certificate_id>\d+)/signatories/(?P<signatory_id>\d+)?$'.format( settings.COURSE_KEY_PATTERN), 'contentstore.views.certificates.signatory_detail_handler'), url(r'^certificates/{}/(?P<certificate_id>\d+)?$'.format(settings.COURSE_KEY_PATTERN), 'contentstore.views.certificates.certificates_detail_handler'), url(r'^certificates/{}$'.format(settings.COURSE_KEY_PATTERN), 'contentstore.views.certificates.certificates_list_handler') ) if settings.DEBUG: try: from .urls_dev import urlpatterns as dev_urlpatterns urlpatterns += dev_urlpatterns except ImportError: pass import debug_toolbar urlpatterns += ( url(r'^__debug__/', include(debug_toolbar.urls)), ) # Custom error pages # pylint: disable=invalid-name handler404 = 'contentstore.views.render_404' handler500 = 'contentstore.views.render_500' # display error page templates, for testing purposes urlpatterns += ( url(r'404', handler404), url(r'500', handler500), )
tranx/pyffmpeg
refs/heads/master
examples/playvideo_qt_alsa.py
1
# -*- coding: utf-8 -*- ## Simple demo for pyffmpegb ## ## Copyright -- Bertrand Nouvel 2009 ## import your modules from pyffmpeg import * from PyQt4 import QtCore from PyQt4 import QtGui import sys, numpy, time import alsaaudio try: LazyDisplayQt__imgconvarray={ 1:QtGui.QImage.Format_Indexed8, 3:QtGui.QImage.Format_RGB888, 4:QtGui.QImage.Format_RGB32 } except: LazyDisplayQt__imgconvarray={ 1:QtGui.QImage.Format_Indexed8, 4:QtGui.QImage.Format_RGB32 } qapp = QtGui.QApplication(sys.argv) qapp.processEvents() class AlsaSoundLazyPlayer: def __init__(self,rate=44100,channels=2,fps=25): self.fps=fps self._rate=rate self._channels=channels self._d = alsaaudio.PCM() self._d.setchannels(channels) self._d.setformat(alsaaudio.PCM_FORMAT_S16_LE) self._d.setperiodsize((rate*channels)//fps) self._d.setrate(rate) def push_nowait(self,stamped_buffer): self._d.write(stamped_buffer[0].data) def push_wait(self,stamped_buffer): self._d.write(stamped_buffer[0].data) time.sleep(0.96/self.fps) class LazyDisplayQt(QtGui.QMainWindow): imgconvarray=LazyDisplayQt__imgconvarray def __init__(self, *args): QtGui.QMainWindow.__init__(self, *args) self._i=numpy.zeros((1,1,4),dtype=numpy.uint8) self.i=QtGui.QImage(self._i.data,self._i.shape[1],self._i.shape[0],self.imgconvarray[self._i.shape[2]]) self.show() def __del__(self): self.hide() def f(self,thearray): self._i=thearray.astype(numpy.uint8).copy('C') self.i=QtGui.QImage(self._i.data,self._i.shape[1],self._i.shape[0],self.imgconvarray[self._i.shape[2]]) self.update() qapp.processEvents() def paintEvent(self, ev): self.p = QtGui.QPainter() self.p.begin(self) self.p.drawImage(QtCore.QRect(0,0,self.width(),self.height()), self.i, QtCore.QRect(0,0,self.i.width(),self.i.height())) self.p.end() TS_VIDEO_RGB24={ 'video1':(0, -1, {'pixel_format':PixelFormats.RGB24}), 'audio1':(1,-1,{})} TS_VIDEO_BGR24={ 'video1':(0, -1, {'pixel_format':PixelFormats.BGR24}), 'audio1':(1,-1,{})} ## create the reader object mp=FFMpegReader(0,False) ## open an audio video file vf=sys.argv[1] mp.open(vf,TS_VIDEO_RGB24) tracks=mp.get_tracks() ## connect video and audio to their respective device ld=LazyDisplayQt() tracks[0].set_observer(ld.f) ap=AlsaSoundLazyPlayer(tracks[1].get_samplerate(),tracks[1].get_channels(),tracks[0].get_fps()) tracks[1].set_observer(ap.push_wait) #tracks[0].seek_to_seconds(10) ## play the movie ! mp.run()
nervous-laughter/qiime2
refs/heads/master
qiime2/core/type/grammar.py
3
# ---------------------------------------------------------------------------- # Copyright (c) 2016-2017, QIIME 2 development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- import types import itertools from qiime2.core.util import tuplize from ..util import ImmutableBase class _TypeBase(ImmutableBase): """Provides reflexive methods.""" def __ne__(self, other): return not self == other def __rmod__(self, predicate): raise TypeError("Predicate must be applied to the right-hand side of" " a type expression.") def __ror__(self, other): return self | other # union should be associative def __rand__(self, other): return self & other # intersection should be associative class CompositeType(_TypeBase): def __init__(self, name, field_names): # These classes aren't user-facing, but some light validation avoids # accidental issues. However, we don't want to waste a lot of time with # validation here, validation should happen elsewhere. if not len(field_names): raise ValueError("`field_names` cannot be an empty array-like.") self.name = name self.field_names = field_names self._freeze_() def __mod__(self, predicate): raise TypeError("Cannot apply predicate %r, %r is missing arguments" " for its fields." % (predicate, self)) def __or__(self, other): raise TypeError("Cannot union with %r, %r is missing arguments" " for its fields." % (other, self)) def __and__(self, other): raise TypeError("Cannot intersect with %r, %r is missing arguments" " for its fields." % (other, self)) def __getitem__(self, fields): fields = tuplize(fields) if len(fields) != len(self.field_names): raise TypeError("%r takes %d field(s), %d provided." % (self, len(self.field_names), len(fields))) for args in zip(self.field_names, fields): self._validate_field_(*args) return self._apply_fields_(fields=fields) def __repr__(self): return "%s[%s]" % (self.name, ', '.join('{%s}' % f for f in self.field_names)) def _validate_field_(self, name, value): """Called when a field is provided to a `CompositeType`. This method is designed to be overridden to influence the behavior of the grammar. It is recommended to call super as the default implementation includes useful type checks and errors. Parameters ---------- name : str The name of the field being set value : TypeExpression The value of the field being set Raises ------ TypeError Raised when the field is rejected. By default this is when a field is not provided a `TypeExpression`. """ if not isinstance(value, TypeExpression): if isinstance(value, self.__class__): raise TypeError("Incomplete type %r provided as a field to %r" % (value, self)) raise TypeError("%r cannot be used as a field to %r (not a type)." % (value, self)) def _apply_fields_(self, fields): """Called when a `CompositeType` is promoted to a `TypeExpression`. This method is designed to be overridden to influence the behaviour of the grammar. An overriding method should ensure that `self.name` is propogated and that the provided `fields` are passed. Parameters ---------- fields : Tuple[TypeExpression, ...] The fields which should be provided to the `TypeExpression` Returns ------- TypeExpression Typically this will return a subclass of `TypeExpression`. """ return TypeExpression(self.name, fields=fields) def iter_symbols(self): yield self.name def is_concrete(self): return False class TypeExpression(_TypeBase): def __init__(self, name, fields=(), predicate=None): self.name = name self.predicate = predicate self.fields = fields self._freeze_() def __hash__(self): return (hash(self.__class__.__name__) ^ hash(self.name) ^ hash(self.predicate) ^ hash(self.fields)) def __eq__(self, other): # Deep equality, but not semantic equality. if type(self) is not type(other): return NotImplemented return (self.name == other.name and self.predicate == other.predicate and self.fields == other.fields) def equals(self, other): # Different from __eq__ which has to match hashing but can't # consider semantic equality return self <= other <= self def __repr__(self): result = self.name if self.fields: result += '[%s]' % ', '.join(repr(f) for f in self.fields) if self.predicate: result += ' %% %r' % self.predicate return result def __getitem__(self, fields): raise TypeError("%r has no empty fields (not subscriptable)." % self) def _apply_fields_(self, fields): return self.__class__(self.name, fields=fields, predicate=self.predicate) def __contains__(self, value): return (self._is_element_(value) and ((not self.predicate) or value in self.predicate)) def _is_element_(self, value): return False def __mod__(self, predicate): if self.predicate: raise TypeError("%r already has a predicate." % self) if predicate is None: return self self._validate_predicate_(predicate) return self._apply_predicate_(predicate=predicate) def _validate_predicate_(self, predicate): if not isinstance(predicate, Predicate): raise TypeError("%r is not a predicate." % predicate) def _apply_predicate_(self, predicate): return self.__class__(self.name, fields=self.fields, predicate=predicate) def __or__(self, other): self._validate_union_(other, handshake=False) if self == other: return self return self._build_union_((self, other)) def _validate_union_(self, other, handshake=False): if not isinstance(other, TypeExpression): if isinstance(other, CompositeType): raise TypeError("Cannot union an incomplete type %r with %r." % (other, self)) else: raise TypeError("%r is not a type expression." % other) if not handshake: other._validate_union_(self, handshake=True) def _build_union_(self, members): return UnionTypeExpression(members) def __and__(self, other): self._validate_intersection_(other, handshake=False) if self == other: return other return self._build_intersection_((self, other)) def _validate_intersection_(self, other, handshake=False): if not isinstance(other, TypeExpression): if isinstance(other, CompositeType): raise TypeError("Cannot intersect an incomplete type %r with" " %r." % (other, self)) else: raise TypeError("%r is not a type expression." % other) if not handshake: other._validate_intersection_(self, handshake=True) def _build_intersection_(self, members): return IntersectionTypeExpression(members) def __le__(self, other): return all(any(s._aug_is_subtype(o) for o in other) for s in self) def __ge__(self, other): return all(any(o._aug_is_subtype(s) for s in self) for o in other) def _aug_is_subtype(self, other): r = self._is_subtype_(other) if r is NotImplemented: return other._is_supertype_(self) return r def _is_subtype_(self, other): if self.name != other.name: return False for f1, f2 in itertools.zip_longest(self.fields, other.fields): if not (f1 <= f2): return False if other.predicate and not self.predicate <= other.predicate: return False return True def _is_supertype_(self, other): # Invoked only when `other`'s `_is_subtype_` returned `NotImplemented` # that really shouldn't be needed most of the time. raise NotImplementedError def __iter__(self): yield from set(self._apply_fields_(fields=fields) for fields in itertools.product(*self.fields)) def is_concrete(self): return len(list(self)) == 1 def iter_symbols(self): yield self.name for field in self.fields: yield from field.iter_symbols() def to_ast(self): return { "type": 'expression', "name": self.name, "predicate": self.predicate.to_ast() if self.predicate else {}, "fields": [field.to_ast() for field in self.fields] } class _SetOperationBase(TypeExpression): _operator = '?' # Used for repr only - ? chosen as it is not a Python op. def __init__(self, members): m = [] for member in members: # We can flatten the object a little, which will avoid excessive # recursion (it would look like a cons-list otherwise) if type(member) is type(self): m.extend(member.members) else: m.append(member) self.members = frozenset(m) super().__init__('') # Unions/intersections do not have a name def __hash__(self): return super().__hash__() ^ hash(self.members) def __eq__(self, other): super_eq = super().__eq__(other) if super_eq is NotImplemented: return NotImplemented return super_eq and self.members == other.members def __repr__(self): return (" %s " % self._operator) \ .join(sorted([repr(m) for m in self.members])) def _validate_predicate_(self, predicate): raise TypeError("Cannot apply predicates to union/intersection types.") def to_ast(self): return { 'members': [m.to_ast() for m in self.members] } def __iter__(self): yield from set(itertools.chain.from_iterable(self.members)) class UnionTypeExpression(_SetOperationBase): _operator = '|' def _validate_intersection_(self, other, handshake=False): raise TypeError("Cannot intersect %r with %r." % (self, other)) def _build_union_(self, members): return self.__class__(members) def to_ast(self): r = super().to_ast() r['type'] = 'union' return r class Predicate(_TypeBase): def __init__(self, *args, **kwargs): self._truthy = any(map(bool, args)) or any(map(bool, kwargs.values())) self._freeze_() def __hash__(self): # This trivially satisfies the property: # x == x => hash(x) == hash(x) # Subclasses ought to override this with something less... collision-y. return 0 def __eq__(self, other): raise NotImplementedError def __contains__(self, value): return self._is_element_(value) def _is_element_(self, value): return True def __bool__(self): return self._truthy def __le__(self, other): if other is None: other = self.__class__() return self._is_subtype_(other) def __ge__(self, other): if other is None: other = self.__class__() return other._is_subtype_(self) def _aug_is_subtype(self, other): r = self._is_subtype_(other) if r is NotImplemented: return other._is_supertype_(self) return r def _is_subtype_(self, other): raise NotImplementedError def _is_supertype_(self, other): raise NotImplementedError def to_ast(self): return { 'type': 'predicate', 'name': self.__class__.__name__ } # TODO: finish these classes: class IntersectionTypeExpression(_SetOperationBase): _operator = '&' def _validate_union_(self, other, handshake=False): raise TypeError("Cannot union %r with %r." % (self, other)) def _build_intersection_(self, members): return self.__class__(members) def to_ast(self): r = super().to_ast() r['type'] = 'intersection' return r class MappingTypeExpression(TypeExpression): def __init__(self, name, mapping): if type(mapping) is not dict: # we really only want dict literals raise ValueError() if type(name) is not str: raise ValueError() for key in mapping: self._validate_member_(key) for value in mapping.values(): self._validate_member_(value) # Read only proxy of mapping, mutation to `mapping` will be reflected # but there isn't much we can do about that. Good use of this object # would involve a dict literal anyway. self.mapping = types.MappingProxyType(mapping) super().__init__(name) def __hash__(self): return super().__hash__() ^ hash(frozenset(self.mapping.items())) def __eq__(self, other): super_eq = super().__eq__(other) if super_eq is NotImplemented: return NotImplemented return super_eq and (set(self.mapping.items()) == set(other.mapping.items())) def _validate_predicate_(self, predicate): raise TypeError("Cannot apply predicates to type variables.") def _validate_intersection_(self, other, handshake=False): if type(self) != type(other): raise TypeError() if set(self.mapping) != set(other.mapping): raise TypeError() super()._validate_intersection_(other, handshake=handshake) def _validate_union_(self, other, handshake=False): # This has a reasonable definition (ensure disjoint sets on left-hand) # the opposite of intersection, but there isn't really a good use-case # for it at this time. raise TypeError("Cannot union type variables.") def to_ast(self): return { "type": "map", "mapping": [list(item) for item in self.mapping.items()] }
canaryhealth/jstc
refs/heads/master
jstc/engines/base.py
1
# -*- coding: utf-8 -*- #------------------------------------------------------------------------------ # file: $Id$ # auth: Philip J Grabner <phil@canary.md> # date: 2016/09/12 # copy: (C) Copyright 2016-EOT Canary Health, Inc., All Rights Reserved. #------------------------------------------------------------------------------ import re #------------------------------------------------------------------------------ class Engine(object): mimetype = None extensions = None open_markers = ('<',) close_markers = ('/>', '>') #---------------------------------------------------------------------------- def __init__(self, *args, **kw): super(Engine, self).__init__(*args, **kw) self._ws_cre = self.whitespace_cre() #---------------------------------------------------------------------------- def whitespace_cre(self): return re.compile( '(?P<space>^|\\s)?(?P<close>' + '|'.join([re.escape(m) for m in self.close_markers]) + ')' + '(\\s*\n\\s*' + '(?P<open>' + '|'.join([re.escape(m) for m in self.open_markers]) + ')|$)', flags = re.DOTALL) #---------------------------------------------------------------------------- def whitespace_sub(self, match): space = match.group('space') return match.group('close') + (space or '') + (match.group('open') or '') #---------------------------------------------------------------------------- def whitespace(self, text, attrs): ''' Remove "ignorable" whitespace from `text`. Note that `text` will already have been dedented and stripped. ''' # todo: ok, this is a *SUPER* simple ignorable whitespace # detection algorithm... this needs to be refactored # somehow... return self._ws_cre.sub(self.whitespace_sub, text) #---------------------------------------------------------------------------- def precompile(self, text, attrs): ''' Pre-compiles the JavaScript template `text` for delivery to a JavaScript enabled client in a "pre-compiled" format. Pre-compilation means that the template character stream has been, at minimum, parsed into tokens and typically rendered into JavaScript native syntax in order to accelerate template parsing on the client via highly optimized JavaScript parsing code. If pre-compilation is not available (e.g. because the specified template type does not support pre-compilation or the necessary pre-requisites for pre-compilation are not available), then a `jstc.api.PrecompilerUnavailable` exception (or subclass) should be raised. The `attrs` specifies template attributes; see `jstc.compiler.Compiler.compile` for details. ''' raise NotImplementedError() #------------------------------------------------------------------------------ # end of $Id$ # $ChangeLog$ #------------------------------------------------------------------------------
imapp-pl/golem
refs/heads/develop
tests/golem/environments/test_environments_manager.py
3
import unittest from golem.environments.environmentsmanager import EnvironmentsManager from golem.environments.environment import Environment import logging logger = logging.getLogger(__name__) class TestEnvironmentsManager(unittest.TestCase): def test_get_environment_by_id(self): em = EnvironmentsManager() env1 = Environment() env2 = Environment() env3 = Environment() env1.get_id = lambda : "Env1" env2.get_id = lambda : "Env2" env3.get_id = lambda : "Env3" em.add_environment(env1) em.add_environment(env2) em.add_environment(env3) self.assertTrue(env1 == em.get_environment_by_id("Env1")) self.assertTrue(env2 == em.get_environment_by_id("Env2")) self.assertTrue(env3 == em.get_environment_by_id("Env3"))
cloudify-cosmo/cloudify-chef-plugin
refs/heads/master
chef_plugin/operations.py
2
######## # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. from cloudify import context from cloudify.decorators import operation as _operation from chef_plugin.chef_client import run_chef EXPECTED_OP_PREFIXES = ( 'cloudify.interfaces.lifecycle', 'cloudify.interfaces.relationship_lifecycle') def _extract_op(ctx): prefix, _, op = ctx.operation.name.rpartition('.') if prefix not in EXPECTED_OP_PREFIXES: ctx.logger.warn("Node operation is expected to start with '{0}' " "but starts with '{1}'".format( ' or '.join(EXPECTED_OP_PREFIXES), prefix)) return op @_operation def operation(ctx, **kwargs): if ctx.type == context.NODE_INSTANCE: properties = ctx.node.properties else: properties = ctx.source.node.properties if 'runlist' in properties['chef_config']: ctx.logger.info("Using explicitly provided Chef runlist") runlist = properties['chef_config']['runlist'] else: op = _extract_op(ctx) if op not in properties['chef_config']['runlists']: ctx.logger.warn("No Chef runlist for operation {0}".format(op)) ctx.logger.info("Using Chef runlist for operation {0}".format(op)) runlist = properties['chef_config']['runlists'].get(op) if isinstance(runlist, list): runlist = ','.join(runlist) ctx.logger.info("Chef runlist: {0}".format(runlist)) run_chef(ctx, runlist)
Ednilsonpalhares/SCEFA
refs/heads/master
SCEFA/urls.py
2
from django.contrib.staticfiles.urls import staticfiles_urlpatterns from django.conf.urls import url,include from django.contrib import admin urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'',include('appPonto.urls')), url(r'', include('appAlunos.urls')), url(r'', include('appPortas.urls')), ] urlpatterns += staticfiles_urlpatterns()
Jionglun/2015cd_midterm
refs/heads/master
static/Brython3.1.1-20150328-091302/Lib/test/test_re.py
718
# FIXME: brython: implement test.support #from test.support import verbose, run_unittest, gc_collect, bigmemtest, _2G, \ # cpython_only verbose = True # FIXME: brython: Not used in this module ? #import io import re # FIXME: brython: implement re.Scanner #from re import Scanner import sre_constants import sys import string import traceback # FIXME: brython: implement _weakref #from weakref import proxy # Misc tests from Tim Peters' re.doc # WARNING: Don't change details in these tests if you don't know # what you're doing. Some of these tests were carefully modeled to # cover most of the code. import unittest class ReTests(unittest.TestCase): # FIXME: brython: implement test.support # def test_keep_buffer(self): # # See bug 14212 # b = bytearray(b'x') # it = re.finditer(b'a', b) # with self.assertRaises(BufferError): # b.extend(b'x'*400) # list(it) # del it # gc_collect() # b.extend(b'x'*400) # FIXME: brython: implement _weakref # def test_weakref(self): # s = 'QabbbcR' # x = re.compile('ab+c') # y = proxy(x) # self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR')) def test_search_star_plus(self): self.assertEqual(re.search('x*', 'axx').span(0), (0, 0)) self.assertEqual(re.search('x*', 'axx').span(), (0, 0)) self.assertEqual(re.search('x+', 'axx').span(0), (1, 3)) self.assertEqual(re.search('x+', 'axx').span(), (1, 3)) self.assertEqual(re.search('x', 'aaa'), None) self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0)) self.assertEqual(re.match('a*', 'xxx').span(), (0, 0)) self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3)) self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3)) self.assertEqual(re.match('a+', 'xxx'), None) def bump_num(self, matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1) def test_basic_re_sub(self): self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x') self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'), '9.3 -3 24x100y') self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3), '9.3 -3 23x99y') self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n') self.assertEqual(re.sub('.', r"\n", 'x'), '\n') s = r"\1\1" self.assertEqual(re.sub('(.)', s, 'x'), 'xx') self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s) self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s) self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx') self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx') self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx') self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx') self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'), '\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D') self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a') self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), (chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7))) self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest') def test_bug_449964(self): # fails for group followed by other escape self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'), 'xx\bxx\b') def test_bug_449000(self): # Test for sub() on escaped characters self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'), 'abc\ndef\n') self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'), 'abc\ndef\n') self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'), 'abc\ndef\n') self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'), 'abc\ndef\n') def test_bug_1661(self): # Verify that flags do not get silently ignored with compiled patterns pattern = re.compile('.') self.assertRaises(ValueError, re.match, pattern, 'A', re.I) self.assertRaises(ValueError, re.search, pattern, 'A', re.I) self.assertRaises(ValueError, re.findall, pattern, 'A', re.I) self.assertRaises(ValueError, re.compile, pattern, re.I) def test_bug_3629(self): # A regex that triggered a bug in the sre-code validator re.compile("(?P<quote>)(?(quote))") def test_sub_template_numeric_escape(self): # bug 776311 and friends self.assertEqual(re.sub('x', r'\0', 'x'), '\0') self.assertEqual(re.sub('x', r'\000', 'x'), '\000') self.assertEqual(re.sub('x', r'\001', 'x'), '\001') self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8') self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9') self.assertEqual(re.sub('x', r'\111', 'x'), '\111') self.assertEqual(re.sub('x', r'\117', 'x'), '\117') self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111') self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1') self.assertEqual(re.sub('x', r'\00', 'x'), '\x00') self.assertEqual(re.sub('x', r'\07', 'x'), '\x07') self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8') self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9') self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a') self.assertEqual(re.sub('x', r'\400', 'x'), '\0') self.assertEqual(re.sub('x', r'\777', 'x'), '\377') self.assertRaises(re.error, re.sub, 'x', r'\1', 'x') self.assertRaises(re.error, re.sub, 'x', r'\8', 'x') self.assertRaises(re.error, re.sub, 'x', r'\9', 'x') self.assertRaises(re.error, re.sub, 'x', r'\11', 'x') self.assertRaises(re.error, re.sub, 'x', r'\18', 'x') self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x') self.assertRaises(re.error, re.sub, 'x', r'\90', 'x') self.assertRaises(re.error, re.sub, 'x', r'\99', 'x') self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8' self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x') self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1' self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0' # in python2.3 (etc), these loop endlessly in sre_parser.py self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x') self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'), 'xz8') self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'), 'xza') def test_qualified_re_sub(self): self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb') self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa') def test_bug_114660(self): self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'), 'hello there') def test_bug_462270(self): # Test for empty sub() behaviour, see SF bug #462270 self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-') self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d') def test_symbolic_groups(self): re.compile('(?P<a>x)(?P=a)(?(a)y)') re.compile('(?P<a1>x)(?P=a1)(?(a1)y)') self.assertRaises(re.error, re.compile, '(?P<a>)(?P<a>)') self.assertRaises(re.error, re.compile, '(?Px)') self.assertRaises(re.error, re.compile, '(?P=)') self.assertRaises(re.error, re.compile, '(?P=1)') self.assertRaises(re.error, re.compile, '(?P=a)') self.assertRaises(re.error, re.compile, '(?P=a1)') self.assertRaises(re.error, re.compile, '(?P=a.)') self.assertRaises(re.error, re.compile, '(?P<)') self.assertRaises(re.error, re.compile, '(?P<>)') self.assertRaises(re.error, re.compile, '(?P<1>)') self.assertRaises(re.error, re.compile, '(?P<a.>)') self.assertRaises(re.error, re.compile, '(?())') self.assertRaises(re.error, re.compile, '(?(a))') self.assertRaises(re.error, re.compile, '(?(1a))') self.assertRaises(re.error, re.compile, '(?(a.))') # New valid/invalid identifiers in Python 3 re.compile('(?P<µ>x)(?P=µ)(?(µ)y)') re.compile('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)(?P=𝔘𝔫𝔦𝔠𝔬𝔡𝔢)(?(𝔘𝔫𝔦𝔠𝔬𝔡𝔢)y)') self.assertRaises(re.error, re.compile, '(?P<©>x)') def test_symbolic_refs(self): self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<>', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx') self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx') # New valid/invalid identifiers in Python 3 self.assertEqual(re.sub('(?P<µ>x)', r'\g<µ>', 'xx'), 'xx') self.assertEqual(re.sub('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)', r'\g<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>', 'xx'), 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', r'\g<©>', 'xx') def test_re_subn(self): self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2)) self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1)) self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0)) self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4)) self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2)) def test_re_split(self): self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c']) self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c']) self.assertEqual(re.split("(:*)", ":a:b::c"), ['', ':', 'a', ':', 'b', '::', 'c']) self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c']) self.assertEqual(re.split("(:)*", ":a:b::c"), ['', ':', 'a', ':', 'b', ':', 'c']) self.assertEqual(re.split("([b:]+)", ":a:b::c"), ['', ':', 'a', ':b::', 'c']) self.assertEqual(re.split("(b)|(:+)", ":a:b::c"), ['', None, ':', 'a', None, ':', '', 'b', None, '', None, '::', 'c']) self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"), ['', 'a', '', '', 'c']) def test_qualified_re_split(self): self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c']) self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d']) self.assertEqual(re.split("(:)", ":a:b::c", 2), ['', ':', 'a', ':', 'b::c']) self.assertEqual(re.split("(:*)", ":a:b::c", 2), ['', ':', 'a', ':', 'b::c']) def test_re_findall(self): self.assertEqual(re.findall(":+", "abc"), []) self.assertEqual(re.findall(":+", "a:b::c:::d"), [":", "::", ":::"]) self.assertEqual(re.findall("(:+)", "a:b::c:::d"), [":", "::", ":::"]) self.assertEqual(re.findall("(:)(:*)", "a:b::c:::d"), [(":", ""), (":", ":"), (":", "::")]) def test_bug_117612(self): self.assertEqual(re.findall(r"(a|(b))", "aba"), [("a", ""),("b", "b"),("a", "")]) def test_re_match(self): self.assertEqual(re.match('a', 'a').groups(), ()) self.assertEqual(re.match('(a)', 'a').groups(), ('a',)) self.assertEqual(re.match(r'(a)', 'a').group(0), 'a') self.assertEqual(re.match(r'(a)', 'a').group(1), 'a') self.assertEqual(re.match(r'(a)', 'a').group(1, 1), ('a', 'a')) pat = re.compile('((a)|(b))(c)?') self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None)) self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None)) self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c')) self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c')) self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c')) # A single group m = re.match('(a)', 'a') self.assertEqual(m.group(0), 'a') self.assertEqual(m.group(0), 'a') self.assertEqual(m.group(1), 'a') self.assertEqual(m.group(1, 1), ('a', 'a')) pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?') self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None)) self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'), (None, 'b', None)) self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c')) def test_re_groupref_exists(self): self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(), ('(', 'a')) self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(), (None, 'a')) self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None) self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None) self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(), ('a', 'b')) self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(), (None, 'd')) self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(), (None, 'd')) self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(), ('a', '')) # Tests for bug #1177831: exercise groups other than the first group p = re.compile('(?P<g1>a)(?P<g2>b)?((?(g2)c|d))') self.assertEqual(p.match('abc').groups(), ('a', 'b', 'c')) self.assertEqual(p.match('ad').groups(), ('a', None, 'd')) self.assertEqual(p.match('abd'), None) self.assertEqual(p.match('ac'), None) def test_re_groupref(self): self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(), ('|', 'a')) self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(), (None, 'a')) self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None) self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None) self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(), ('a', 'a')) self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(), (None, None)) def test_groupdict(self): self.assertEqual(re.match('(?P<first>first) (?P<second>second)', 'first second').groupdict(), {'first':'first', 'second':'second'}) def test_expand(self): self.assertEqual(re.match("(?P<first>first) (?P<second>second)", "first second") .expand(r"\2 \1 \g<second> \g<first>"), "second first second first") def test_repeat_minmax(self): self.assertEqual(re.match("^(\w){1}$", "abc"), None) self.assertEqual(re.match("^(\w){1}?$", "abc"), None) self.assertEqual(re.match("^(\w){1,2}$", "abc"), None) self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None) self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c") self.assertEqual(re.match("^x{1}$", "xxx"), None) self.assertEqual(re.match("^x{1}?$", "xxx"), None) self.assertEqual(re.match("^x{1,2}$", "xxx"), None) self.assertEqual(re.match("^x{1,2}?$", "xxx"), None) self.assertNotEqual(re.match("^x{3}$", "xxx"), None) self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None) self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None) self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None) self.assertNotEqual(re.match("^x{3}?$", "xxx"), None) self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None) self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None) self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None) self.assertEqual(re.match("^x{}$", "xxx"), None) self.assertNotEqual(re.match("^x{}$", "x{}"), None) def test_getattr(self): self.assertEqual(re.compile("(?i)(a)(b)").pattern, "(?i)(a)(b)") self.assertEqual(re.compile("(?i)(a)(b)").flags, re.I | re.U) self.assertEqual(re.compile("(?i)(a)(b)").groups, 2) self.assertEqual(re.compile("(?i)(a)(b)").groupindex, {}) self.assertEqual(re.compile("(?i)(?P<first>a)(?P<other>b)").groupindex, {'first': 1, 'other': 2}) self.assertEqual(re.match("(a)", "a").pos, 0) self.assertEqual(re.match("(a)", "a").endpos, 1) self.assertEqual(re.match("(a)", "a").string, "a") self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1))) self.assertNotEqual(re.match("(a)", "a").re, None) def test_special_escapes(self): self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx").group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd").group(1), "bx") self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx", re.LOCALE).group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd", re.LOCALE).group(1), "bx") self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx", re.UNICODE).group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd", re.UNICODE).group(1), "bx") self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None) self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx").group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd").group(1), "bx") self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None) self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a").group(0), "1aa! a") self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a", re.LOCALE).group(0), "1aa! a") self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a", re.UNICODE).group(0), "1aa! a") def test_string_boundaries(self): # See http://bugs.python.org/issue10713 self.assertEqual(re.search(r"\b(abc)\b", "abc").group(1), "abc") # There's a word boundary at the start of a string. self.assertTrue(re.match(r"\b", "abc")) # A non-empty string includes a non-boundary zero-length match. self.assertTrue(re.search(r"\B", "abc")) # There is no non-boundary match at the start of a string. self.assertFalse(re.match(r"\B", "abc")) # However, an empty string contains no word boundaries, and also no # non-boundaries. self.assertEqual(re.search(r"\B", ""), None) # This one is questionable and different from the perlre behaviour, # but describes current behavior. self.assertEqual(re.search(r"\b", ""), None) # A single word-character string has two boundaries, but no # non-boundary gaps. self.assertEqual(len(re.findall(r"\b", "a")), 2) self.assertEqual(len(re.findall(r"\B", "a")), 0) # If there are no words, there are no boundaries self.assertEqual(len(re.findall(r"\b", " ")), 0) self.assertEqual(len(re.findall(r"\b", " ")), 0) # Can match around the whitespace. self.assertEqual(len(re.findall(r"\B", " ")), 2) def test_bigcharset(self): self.assertEqual(re.match("([\u2222\u2223])", "\u2222").group(1), "\u2222") self.assertEqual(re.match("([\u2222\u2223])", "\u2222", re.UNICODE).group(1), "\u2222") def test_big_codesize(self): # Issue #1160 r = re.compile('|'.join(('%d'%x for x in range(10000)))) self.assertIsNotNone(r.match('1000')) self.assertIsNotNone(r.match('9999')) def test_anyall(self): self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0), "a\nb") self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0), "a\n\nb") def test_non_consuming(self): self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a") self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a") self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a") self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a") self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a") self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a") self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a") self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a") self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a") self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a") self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a") def test_ignore_case(self): self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b") self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb") self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b") self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb") self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a") self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa") self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a") self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa") def test_category(self): self.assertEqual(re.match(r"(\s)", " ").group(1), " ") def test_getlower(self): import _sre self.assertEqual(_sre.getlower(ord('A'), 0), ord('a')) self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a')) self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a')) self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") def test_not_literal(self): self.assertEqual(re.search("\s([^a])", " b").group(1), "b") self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb") def test_search_coverage(self): self.assertEqual(re.search("\s(b)", " b").group(1), "b") self.assertEqual(re.search("a\s", "a ").group(0), "a ") def assertMatch(self, pattern, text, match=None, span=None, matcher=re.match): if match is None and span is None: # the pattern matches the whole text match = text span = (0, len(text)) elif match is None or span is None: raise ValueError('If match is not None, span should be specified ' '(and vice versa).') m = matcher(pattern, text) self.assertTrue(m) self.assertEqual(m.group(), match) self.assertEqual(m.span(), span) def test_re_escape(self): alnum_chars = string.ascii_letters + string.digits + '_' p = ''.join(chr(i) for i in range(256)) for c in p: if c in alnum_chars: self.assertEqual(re.escape(c), c) elif c == '\x00': self.assertEqual(re.escape(c), '\\000') else: self.assertEqual(re.escape(c), '\\' + c) self.assertMatch(re.escape(c), c) self.assertMatch(re.escape(p), p) def test_re_escape_byte(self): alnum_chars = (string.ascii_letters + string.digits + '_').encode('ascii') p = bytes(range(256)) for i in p: b = bytes([i]) if b in alnum_chars: self.assertEqual(re.escape(b), b) elif i == 0: self.assertEqual(re.escape(b), b'\\000') else: self.assertEqual(re.escape(b), b'\\' + b) self.assertMatch(re.escape(b), b) self.assertMatch(re.escape(p), p) def test_re_escape_non_ascii(self): s = 'xxx\u2620\u2620\u2620xxx' s_escaped = re.escape(s) self.assertEqual(s_escaped, 'xxx\\\u2620\\\u2620\\\u2620xxx') self.assertMatch(s_escaped, s) self.assertMatch('.%s+.' % re.escape('\u2620'), s, 'x\u2620\u2620\u2620x', (2, 7), re.search) def test_re_escape_non_ascii_bytes(self): b = 'y\u2620y\u2620y'.encode('utf-8') b_escaped = re.escape(b) self.assertEqual(b_escaped, b'y\\\xe2\\\x98\\\xa0y\\\xe2\\\x98\\\xa0y') self.assertMatch(b_escaped, b) res = re.findall(re.escape('\u2620'.encode('utf-8')), b) self.assertEqual(len(res), 2) def pickle_test(self, pickle): oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)') s = pickle.dumps(oldpat) newpat = pickle.loads(s) self.assertEqual(oldpat, newpat) def test_constants(self): self.assertEqual(re.I, re.IGNORECASE) self.assertEqual(re.L, re.LOCALE) self.assertEqual(re.M, re.MULTILINE) self.assertEqual(re.S, re.DOTALL) self.assertEqual(re.X, re.VERBOSE) def test_flags(self): for flag in [re.I, re.M, re.X, re.S, re.L]: self.assertNotEqual(re.compile('^pattern$', flag), None) def test_sre_character_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]: if i < 256: self.assertIsNotNone(re.match(r"\%03o" % i, chr(i))) self.assertIsNotNone(re.match(r"\%03o0" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"\%03o8" % i, chr(i)+"8")) self.assertIsNotNone(re.match(r"\x%02x" % i, chr(i))) self.assertIsNotNone(re.match(r"\x%02x0" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"\x%02xz" % i, chr(i)+"z")) if i < 0x10000: self.assertIsNotNone(re.match(r"\u%04x" % i, chr(i))) self.assertIsNotNone(re.match(r"\u%04x0" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"\u%04xz" % i, chr(i)+"z")) self.assertIsNotNone(re.match(r"\U%08x" % i, chr(i))) self.assertIsNotNone(re.match(r"\U%08x0" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"\U%08xz" % i, chr(i)+"z")) self.assertIsNotNone(re.match(r"\0", "\000")) self.assertIsNotNone(re.match(r"\08", "\0008")) self.assertIsNotNone(re.match(r"\01", "\001")) self.assertIsNotNone(re.match(r"\018", "\0018")) self.assertIsNotNone(re.match(r"\567", chr(0o167))) self.assertRaises(re.error, re.match, r"\911", "") self.assertRaises(re.error, re.match, r"\x1", "") self.assertRaises(re.error, re.match, r"\x1z", "") self.assertRaises(re.error, re.match, r"\u123", "") self.assertRaises(re.error, re.match, r"\u123z", "") self.assertRaises(re.error, re.match, r"\U0001234", "") self.assertRaises(re.error, re.match, r"\U0001234z", "") self.assertRaises(re.error, re.match, r"\U00110000", "") def test_sre_character_class_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]: if i < 256: self.assertIsNotNone(re.match(r"[\%o]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\%o8]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\%03o]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\%03o0]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\%03o8]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\x%02x]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\x%02x0]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\x%02xz]" % i, chr(i))) if i < 0x10000: self.assertIsNotNone(re.match(r"[\u%04x]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\u%04x0]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\u%04xz]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\U%08x]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\U%08x0]" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"[\U%08xz]" % i, chr(i)+"z")) self.assertIsNotNone(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e")) self.assertRaises(re.error, re.match, r"[\911]", "") self.assertRaises(re.error, re.match, r"[\x1z]", "") self.assertRaises(re.error, re.match, r"[\u123z]", "") self.assertRaises(re.error, re.match, r"[\U0001234z]", "") self.assertRaises(re.error, re.match, r"[\U00110000]", "") def test_sre_byte_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255]: self.assertIsNotNone(re.match((r"\%03o" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"\%03o0" % i).encode(), bytes([i])+b"0")) self.assertIsNotNone(re.match((r"\%03o8" % i).encode(), bytes([i])+b"8")) self.assertIsNotNone(re.match((r"\x%02x" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"\x%02x0" % i).encode(), bytes([i])+b"0")) self.assertIsNotNone(re.match((r"\x%02xz" % i).encode(), bytes([i])+b"z")) self.assertIsNotNone(re.match(br"\u", b'u')) self.assertIsNotNone(re.match(br"\U", b'U')) self.assertIsNotNone(re.match(br"\0", b"\000")) self.assertIsNotNone(re.match(br"\08", b"\0008")) self.assertIsNotNone(re.match(br"\01", b"\001")) self.assertIsNotNone(re.match(br"\018", b"\0018")) self.assertIsNotNone(re.match(br"\567", bytes([0o167]))) self.assertRaises(re.error, re.match, br"\911", b"") self.assertRaises(re.error, re.match, br"\x1", b"") self.assertRaises(re.error, re.match, br"\x1z", b"") def test_sre_byte_class_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255]: self.assertIsNotNone(re.match((r"[\%o]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\%o8]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\%03o]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\%03o0]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\%03o8]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\x%02x]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\x%02x0]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\x%02xz]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match(br"[\u]", b'u')) self.assertIsNotNone(re.match(br"[\U]", b'U')) self.assertRaises(re.error, re.match, br"[\911]", "") self.assertRaises(re.error, re.match, br"[\x1z]", "") def test_bug_113254(self): self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1) self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1) self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1)) def test_bug_527371(self): # bug described in patches 527371/672491 self.assertEqual(re.match(r'(a)?a','a').lastindex, None) self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1) self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a') self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a') self.assertEqual(re.match("((a))", "a").lastindex, 1) def test_bug_545855(self): # bug 545855 -- This pattern failed to cause a compile error as it # should, instead provoking a TypeError. self.assertRaises(re.error, re.compile, 'foo[a-') def test_bug_418626(self): # bugs 418626 at al. -- Testing Greg Chapman's addition of op code # SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of # pattern '*?' on a long string. self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001) self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0), 20003) self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001) # non-simple '*?' still used to hit the recursion limit, before the # non-recursive scheme was implemented. self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001) def test_bug_612074(self): pat="["+re.escape("\u2039")+"]" self.assertEqual(re.compile(pat) and 1, 1) def test_stack_overflow(self): # nasty cases that used to overflow the straightforward recursive # implementation of repeated groups. self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x') self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x') self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x') def test_unlimited_zero_width_repeat(self): # Issue #9669 self.assertIsNone(re.match(r'(?:a?)*y', 'z')) self.assertIsNone(re.match(r'(?:a?)+y', 'z')) self.assertIsNone(re.match(r'(?:a?){2,}y', 'z')) self.assertIsNone(re.match(r'(?:a?)*?y', 'z')) self.assertIsNone(re.match(r'(?:a?)+?y', 'z')) self.assertIsNone(re.match(r'(?:a?){2,}?y', 'z')) # def test_scanner(self): # def s_ident(scanner, token): return token # def s_operator(scanner, token): return "op%s" % token # def s_float(scanner, token): return float(token) # def s_int(scanner, token): return int(token) # # scanner = Scanner([ # (r"[a-zA-Z_]\w*", s_ident), # (r"\d+\.\d*", s_float), # (r"\d+", s_int), # (r"=|\+|-|\*|/", s_operator), # (r"\s+", None), # ]) # # self.assertNotEqual(scanner.scanner.scanner("").pattern, None) # # self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"), # (['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5, # 'op+', 'bar'], '')) def test_bug_448951(self): # bug 448951 (similar to 429357, but with single char match) # (Also test greedy matches.) for op in '','?','*': self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(), (None, None)) self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(), ('a:', 'a')) def test_bug_725106(self): # capturing groups in alternatives in repeats self.assertEqual(re.match('^((a)|b)*', 'abc').groups(), ('b', 'a')) self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(), ('c', 'b')) self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(), ('b', None)) self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(), ('b', None)) self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(), ('b', 'a')) self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(), ('c', 'b')) self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(), ('b', None)) self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(), ('b', None)) def test_bug_725149(self): # mark_stack_base restoring before restoring marks self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(), ('a', None)) self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(), ('a', None, None)) def test_bug_764548(self): # bug 764548, re.compile() barfs on str/unicode subclasses class my_unicode(str): pass pat = re.compile(my_unicode("abc")) self.assertEqual(pat.match("xyz"), None) def test_finditer(self): iter = re.finditer(r":+", "a:b::c:::d") self.assertEqual([item.group(0) for item in iter], [":", "::", ":::"]) pat = re.compile(r":+") iter = pat.finditer("a:b::c:::d", 1, 10) self.assertEqual([item.group(0) for item in iter], [":", "::", ":::"]) pat = re.compile(r":+") iter = pat.finditer("a:b::c:::d", pos=1, endpos=10) self.assertEqual([item.group(0) for item in iter], [":", "::", ":::"]) pat = re.compile(r":+") iter = pat.finditer("a:b::c:::d", endpos=10, pos=1) self.assertEqual([item.group(0) for item in iter], [":", "::", ":::"]) pat = re.compile(r":+") iter = pat.finditer("a:b::c:::d", pos=3, endpos=8) self.assertEqual([item.group(0) for item in iter], ["::", "::"]) def test_bug_926075(self): self.assertTrue(re.compile('bug_926075') is not re.compile(b'bug_926075')) def test_bug_931848(self): pattern = eval('"[\u002E\u3002\uFF0E\uFF61]"') self.assertEqual(re.compile(pattern).split("a.b.c"), ['a','b','c']) def test_bug_581080(self): iter = re.finditer(r"\s", "a b") self.assertEqual(next(iter).span(), (1,2)) self.assertRaises(StopIteration, next, iter) scanner = re.compile(r"\s").scanner("a b") self.assertEqual(scanner.search().span(), (1, 2)) self.assertEqual(scanner.search(), None) def test_bug_817234(self): iter = re.finditer(r".*", "asdf") self.assertEqual(next(iter).span(), (0, 4)) self.assertEqual(next(iter).span(), (4, 4)) self.assertRaises(StopIteration, next, iter) def test_bug_6561(self): # '\d' should match characters in Unicode category 'Nd' # (Number, Decimal Digit), but not those in 'Nl' (Number, # Letter) or 'No' (Number, Other). decimal_digits = [ '\u0037', # '\N{DIGIT SEVEN}', category 'Nd' '\u0e58', # '\N{THAI DIGIT SIX}', category 'Nd' '\uff10', # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd' ] for x in decimal_digits: self.assertEqual(re.match('^\d$', x).group(0), x) not_decimal_digits = [ '\u2165', # '\N{ROMAN NUMERAL SIX}', category 'Nl' '\u3039', # '\N{HANGZHOU NUMERAL TWENTY}', category 'Nl' '\u2082', # '\N{SUBSCRIPT TWO}', category 'No' '\u32b4', # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No' ] for x in not_decimal_digits: self.assertIsNone(re.match('^\d$', x)) def test_empty_array(self): # SF buf 1647541 import array for typecode in 'bBuhHiIlLfd': a = array.array(typecode) self.assertEqual(re.compile(b"bla").match(a), None) self.assertEqual(re.compile(b"").match(a).groups(), ()) def test_inline_flags(self): # Bug #1700 upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Bellow lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Bellow p = re.compile(upper_char, re.I | re.U) q = p.match(lower_char) self.assertNotEqual(q, None) p = re.compile(lower_char, re.I | re.U) q = p.match(upper_char) self.assertNotEqual(q, None) p = re.compile('(?i)' + upper_char, re.U) q = p.match(lower_char) self.assertNotEqual(q, None) p = re.compile('(?i)' + lower_char, re.U) q = p.match(upper_char) self.assertNotEqual(q, None) p = re.compile('(?iu)' + upper_char) q = p.match(lower_char) self.assertNotEqual(q, None) p = re.compile('(?iu)' + lower_char) q = p.match(upper_char) self.assertNotEqual(q, None) def test_dollar_matches_twice(self): "$ matches the end of string, and just before the terminating \n" pattern = re.compile('$') self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#') self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#') self.assertEqual(pattern.sub('#', '\n'), '#\n#') pattern = re.compile('$', re.MULTILINE) self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#' ) self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#') self.assertEqual(pattern.sub('#', '\n'), '#\n#') def test_bytes_str_mixing(self): # Mixing str and bytes is disallowed pat = re.compile('.') bpat = re.compile(b'.') self.assertRaises(TypeError, pat.match, b'b') self.assertRaises(TypeError, bpat.match, 'b') self.assertRaises(TypeError, pat.sub, b'b', 'c') self.assertRaises(TypeError, pat.sub, 'b', b'c') self.assertRaises(TypeError, pat.sub, b'b', b'c') self.assertRaises(TypeError, bpat.sub, b'b', 'c') self.assertRaises(TypeError, bpat.sub, 'b', b'c') self.assertRaises(TypeError, bpat.sub, 'b', 'c') def test_ascii_and_unicode_flag(self): # String patterns for flags in (0, re.UNICODE): pat = re.compile('\xc0', flags | re.IGNORECASE) self.assertNotEqual(pat.match('\xe0'), None) pat = re.compile('\w', flags) self.assertNotEqual(pat.match('\xe0'), None) pat = re.compile('\xc0', re.ASCII | re.IGNORECASE) self.assertEqual(pat.match('\xe0'), None) pat = re.compile('(?a)\xc0', re.IGNORECASE) self.assertEqual(pat.match('\xe0'), None) pat = re.compile('\w', re.ASCII) self.assertEqual(pat.match('\xe0'), None) pat = re.compile('(?a)\w') self.assertEqual(pat.match('\xe0'), None) # Bytes patterns for flags in (0, re.ASCII): pat = re.compile(b'\xc0', re.IGNORECASE) self.assertEqual(pat.match(b'\xe0'), None) pat = re.compile(b'\w') self.assertEqual(pat.match(b'\xe0'), None) # Incompatibilities self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE) self.assertRaises(ValueError, re.compile, b'(?u)\w') self.assertRaises(ValueError, re.compile, '\w', re.UNICODE | re.ASCII) self.assertRaises(ValueError, re.compile, '(?u)\w', re.ASCII) self.assertRaises(ValueError, re.compile, '(?a)\w', re.UNICODE) self.assertRaises(ValueError, re.compile, '(?au)\w') def test_bug_6509(self): # Replacement strings of both types must parse properly. # all strings pat = re.compile('a(\w)') self.assertEqual(pat.sub('b\\1', 'ac'), 'bc') pat = re.compile('a(.)') self.assertEqual(pat.sub('b\\1', 'a\u1234'), 'b\u1234') pat = re.compile('..') self.assertEqual(pat.sub(lambda m: 'str', 'a5'), 'str') # all bytes pat = re.compile(b'a(\w)') self.assertEqual(pat.sub(b'b\\1', b'ac'), b'bc') pat = re.compile(b'a(.)') self.assertEqual(pat.sub(b'b\\1', b'a\xCD'), b'b\xCD') pat = re.compile(b'..') self.assertEqual(pat.sub(lambda m: b'bytes', b'a5'), b'bytes') def test_dealloc(self): # issue 3299: check for segfault in debug build import _sre # the overflow limit is different on wide and narrow builds and it # depends on the definition of SRE_CODE (see sre.h). # 2**128 should be big enough to overflow on both. For smaller values # a RuntimeError is raised instead of OverflowError. long_overflow = 2**128 self.assertRaises(TypeError, re.finditer, "a", {}) self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow]) self.assertRaises(TypeError, _sre.compile, {}, 0, []) def test_search_dot_unicode(self): self.assertIsNotNone(re.search("123.*-", '123abc-')) self.assertIsNotNone(re.search("123.*-", '123\xe9-')) self.assertIsNotNone(re.search("123.*-", '123\u20ac-')) self.assertIsNotNone(re.search("123.*-", '123\U0010ffff-')) self.assertIsNotNone(re.search("123.*-", '123\xe9\u20ac\U0010ffff-')) def test_compile(self): # Test return value when given string and pattern as parameter pattern = re.compile('random pattern') self.assertIsInstance(pattern, re._pattern_type) same_pattern = re.compile(pattern) self.assertIsInstance(same_pattern, re._pattern_type) self.assertIs(same_pattern, pattern) # Test behaviour when not given a string or pattern as parameter self.assertRaises(TypeError, re.compile, 0) def test_bug_13899(self): # Issue #13899: re pattern r"[\A]" should work like "A" but matches # nothing. Ditto B and Z. self.assertEqual(re.findall(r'[\A\B\b\C\Z]', 'AB\bCZ'), ['A', 'B', '\b', 'C', 'Z']) # FIXME: brython: implement test.support # @bigmemtest(size=_2G, memuse=1) # def test_large_search(self, size): # # Issue #10182: indices were 32-bit-truncated. # s = 'a' * size # m = re.search('$', s) # self.assertIsNotNone(m) # self.assertEqual(m.start(), size) # self.assertEqual(m.end(), size) # FIXME: brython: implement test.support # The huge memuse is because of re.sub() using a list and a join() # to create the replacement result. # @bigmemtest(size=_2G, memuse=16 + 2) # def test_large_subn(self, size): # # Issue #10182: indices were 32-bit-truncated. # s = 'a' * size # r, n = re.subn('', '', s) # self.assertEqual(r, s) # self.assertEqual(n, size + 1) def test_bug_16688(self): # Issue 16688: Backreferences make case-insensitive regex fail on # non-ASCII strings. self.assertEqual(re.findall(r"(?i)(a)\1", "aa \u0100"), ['a']) self.assertEqual(re.match(r"(?s).{1,3}", "\u0100\u0100").span(), (0, 2)) def test_repeat_minmax_overflow(self): # Issue #13169 string = "x" * 100000 self.assertEqual(re.match(r".{65535}", string).span(), (0, 65535)) self.assertEqual(re.match(r".{,65535}", string).span(), (0, 65535)) self.assertEqual(re.match(r".{65535,}?", string).span(), (0, 65535)) self.assertEqual(re.match(r".{65536}", string).span(), (0, 65536)) self.assertEqual(re.match(r".{,65536}", string).span(), (0, 65536)) self.assertEqual(re.match(r".{65536,}?", string).span(), (0, 65536)) # 2**128 should be big enough to overflow both SRE_CODE and Py_ssize_t. self.assertRaises(OverflowError, re.compile, r".{%d}" % 2**128) self.assertRaises(OverflowError, re.compile, r".{,%d}" % 2**128) self.assertRaises(OverflowError, re.compile, r".{%d,}?" % 2**128) self.assertRaises(OverflowError, re.compile, r".{%d,%d}" % (2**129, 2**128)) # FIXME: brython: implement test.support # @cpython_only # def test_repeat_minmax_overflow_maxrepeat(self): # try: # from _sre import MAXREPEAT # except ImportError: # self.skipTest('requires _sre.MAXREPEAT constant') # string = "x" * 100000 # self.assertIsNone(re.match(r".{%d}" % (MAXREPEAT - 1), string)) # self.assertEqual(re.match(r".{,%d}" % (MAXREPEAT - 1), string).span(), # (0, 100000)) # self.assertIsNone(re.match(r".{%d,}?" % (MAXREPEAT - 1), string)) # self.assertRaises(OverflowError, re.compile, r".{%d}" % MAXREPEAT) # self.assertRaises(OverflowError, re.compile, r".{,%d}" % MAXREPEAT) # self.assertRaises(OverflowError, re.compile, r".{%d,}?" % MAXREPEAT) def test_backref_group_name_in_exception(self): # Issue 17341: Poor error message when compiling invalid regex with self.assertRaisesRegex(sre_constants.error, '<foo>'): re.compile('(?P=<foo>)') def test_group_name_in_exception(self): # Issue 17341: Poor error message when compiling invalid regex with self.assertRaisesRegex(sre_constants.error, '\?foo'): re.compile('(?P<?foo>)') def run_re_tests(): from test.re_tests import tests, SUCCEED, FAIL, SYNTAX_ERROR if verbose: print('Running re_tests test suite') else: # To save time, only run the first and last 10 tests #tests = tests[:10] + tests[-10:] pass for t in tests: sys.stdout.flush() pattern = s = outcome = repl = expected = None if len(t) == 5: pattern, s, outcome, repl, expected = t elif len(t) == 3: pattern, s, outcome = t else: raise ValueError('Test tuples should have 3 or 5 fields', t) try: obj = re.compile(pattern) except re.error: if outcome == SYNTAX_ERROR: pass # Expected a syntax error else: print('=== Syntax error:', t) except KeyboardInterrupt: raise KeyboardInterrupt except: print('*** Unexpected error ***', t) if verbose: traceback.print_exc(file=sys.stdout) else: try: result = obj.search(s) except re.error as msg: print('=== Unexpected exception', t, repr(msg)) if outcome == SYNTAX_ERROR: # This should have been a syntax error; forget it. pass elif outcome == FAIL: if result is None: pass # No match, as expected else: print('=== Succeeded incorrectly', t) elif outcome == SUCCEED: if result is not None: # Matched, as expected, so now we compute the # result string and compare it to our expected result. start, end = result.span(0) vardict={'found': result.group(0), 'groups': result.group(), 'flags': result.re.flags} for i in range(1, 100): try: gi = result.group(i) # Special hack because else the string concat fails: if gi is None: gi = "None" except IndexError: gi = "Error" vardict['g%d' % i] = gi for i in result.re.groupindex.keys(): try: gi = result.group(i) if gi is None: gi = "None" except IndexError: gi = "Error" vardict[i] = gi repl = eval(repl, vardict) if repl != expected: print('=== grouping error', t, end=' ') print(repr(repl) + ' should be ' + repr(expected)) else: print('=== Failed incorrectly', t) # Try the match with both pattern and string converted to # bytes, and check that it still succeeds. try: bpat = bytes(pattern, "ascii") bs = bytes(s, "ascii") except UnicodeEncodeError: # skip non-ascii tests pass else: try: bpat = re.compile(bpat) except Exception: print('=== Fails on bytes pattern compile', t) if verbose: traceback.print_exc(file=sys.stdout) else: bytes_result = bpat.search(bs) if bytes_result is None: print('=== Fails on bytes pattern match', t) # Try the match with the search area limited to the extent # of the match and see if it still succeeds. \B will # break (because it won't match at the end or start of a # string), so we'll ignore patterns that feature it. if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \ and result is not None: obj = re.compile(pattern) result = obj.search(s, result.start(0), result.end(0) + 1) if result is None: print('=== Failed on range-limited match', t) # Try the match with IGNORECASE enabled, and check that it # still succeeds. obj = re.compile(pattern, re.IGNORECASE) result = obj.search(s) if result is None: print('=== Fails on case-insensitive match', t) # Try the match with LOCALE enabled, and check that it # still succeeds. if '(?u)' not in pattern: obj = re.compile(pattern, re.LOCALE) result = obj.search(s) if result is None: print('=== Fails on locale-sensitive match', t) # Try the match with UNICODE locale enabled, and check # that it still succeeds. obj = re.compile(pattern, re.UNICODE) result = obj.search(s) if result is None: print('=== Fails on unicode-sensitive match', t) def test_main(): # FIXME: brython: implement test.support # run_unittest(ReTests) run_re_tests() if __name__ == "__main__": test_main()
cyx1231st/nova
refs/heads/eventually-consistent-host-state-mitaka
nova/api/openstack/compute/schemas/create_backup.py
21
# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types create_backup = { 'type': 'object', 'properties': { 'createBackup': { 'type': 'object', 'properties': { 'name': parameter_types.name, 'backup_type': { 'type': 'string', }, 'rotation': parameter_types.non_negative_integer, 'metadata': { 'type': 'object', } }, 'required': ['name', 'backup_type', 'rotation'], 'additionalProperties': False, }, }, 'required': ['createBackup'], 'additionalProperties': False, } create_backup_v20 = copy.deepcopy(create_backup) create_backup_v20['properties'][ 'createBackup']['properties']['name'] = (parameter_types. name_with_leading_trailing_spaces)
Ideabin/Gist
refs/heads/master
gist.py
1
# -*- coding: utf-8 -*- import sublime import sublime_plugin import os import sys import json import functools import webbrowser import tempfile import traceback import threading import shutil PY3 = sys.version > '3' if PY3: from .request import * from .settings import * from .helpers import * from . import frontmatter else: from request import * from settings import * from helpers import * def plugin_loaded(): settings.loaded_settings = sublime.load_settings('Gist.sublime-settings') settings.get = settings.loaded_settings.get settings.set = settings.loaded_settings.set def catch_errors(fn): @functools.wraps(fn) def _fn(*args, **kwargs): try: return fn(*args, **kwargs) except MissingCredentialsException: sublime.error_message("Gist: GitHub token isn't provided in Gist.sublime-settings file. All other authorization methods is deprecated.") user_settings_path = os.path.join(sublime.packages_path(), 'User', 'Gist.sublime-settings') if not os.path.exists(user_settings_path): default_settings_path = os.path.join(sublime.packages_path(), 'Gist', 'Gist.sublime-settings') shutil.copy(default_settings_path, user_settings_path) sublime.active_window().open_file(user_settings_path) except: traceback.print_exc() sublime.error_message("Gist: unknown error (please, report a bug!)") return _fn def create_gist(public, description, files): for filename, text in list(files.items()): if not text: sublime.error_message("Gist: Unable to create a Gist with empty content") return file_data = dict((filename, {'content': frontmatter.loads(text).content}) for filename, text in list(files.items())) data = json.dumps({'description': description, 'public': public, 'files': file_data}) gist = api_request(settings.GISTS_URL, data) return gist def update_gist(gist_url, file_changes, auth_token=None, https_proxy=None, new_description=None): request = {'files': file_changes} # print('Request:', request) if new_description is not None: request['description'] = new_description data = json.dumps(request) # print('Data:', data) result = api_request(gist_url, data, token=auth_token, https_proxy=https_proxy, method="PATCH") if PY3: sublime.status_message("Gist updated") # can only be called by main thread in sublime text 2 # print('Result:', result) return result def open_gist(gist_url): gist = api_request(gist_url) # print('Gist:', gist) files = sorted(gist['files'].keys()) for gist_filename in files: allowedTypes = ['text', 'application'] type = gist['files'][gist_filename]['type'].split('/')[0] if type not in allowedTypes: continue view = sublime.active_window().new_file() gistify_view(view, gist, gist_filename) if PY3: view.run_command('append', { 'characters': gist['files'][gist_filename]['content'], }) else: edit = view.begin_edit() view.insert(edit, 0, gist['files'][gist_filename]['content']) view.end_edit(edit) if settings.get('supress_save_dialog'): view.set_scratch(True) if settings.get('save-update-hook'): view.retarget(tempfile.gettempdir() + '/' + gist_filename) # Save over it (to stop us reloading from that file in case it exists) # But don't actually do a gist update view.settings().set('do-update', False) view.run_command('save') set_syntax(view, gist['files'][gist_filename]) def insert_gist(gist_url): gist = api_request(gist_url) files = sorted(gist['files'].keys()) for gist_filename in files: view = sublime.active_window().active_view() is_auto_indent = view.settings().get('auto_indent') if PY3: if is_auto_indent == True: view.settings().set('auto_indent',False) view.run_command('insert', { 'characters': gist['files'][gist_filename]['content'], }) view.settings().set('auto_indent',True) else: view.run_command('insert', { 'characters': gist['files'][gist_filename]['content'], }) else: edit = view.begin_edit() for region in view.sel(): view.replace(edit, region, gist['files'][gist_filename]['content']) view.end_edit(edit) def insert_gist_embed(gist_url): gist = api_request(gist_url) files = sorted(gist['files'].keys()) for gist_filename in files: view = sublime.active_window().active_view() template = '<script src="{0}"></script>'.format(gist['files'][gist_filename]['raw_url']) if PY3: view.run_command('insert', { 'characters': template, }) else: edit = view.begin_edit() for region in view.sel(): view.replace(edit, region, template) view.end_edit(edit) class GistCommand(sublime_plugin.TextCommand): public = True def mode(self): return "Public" if self.public else "Private" @catch_errors def run(self, edit): regions = [region for region in self.view.sel() if not region.empty()] if len(regions) == 0: regions = [sublime.Region(0, self.view.size())] gistify = True else: gistify = False region_data = [self.view.substr(region) for region in regions] window = self.view.window() def on_gist_description(description): filename = os.path.basename(self.view.file_name() if self.view.file_name() else '') @catch_errors def on_gist_filename(filename): # We need to figure out the filenames. Right now, the following logic is used: # If there's only 1 selection, just pass whatever the user typed to Github. It'll rename empty files for us. # If there are multiple selections and user entered a filename, rename the files from foo.js to # foo (1).js, foo (2).js, etc. # If there are multiple selections and user didn't enter anything, post the files as # $SyntaxName 1, $SyntaxName 2, etc. if len(region_data) == 1: gist_data = {filename: region_data[0]} else: if filename: (namepart, extpart) = os.path.splitext(filename) make_filename = lambda num: "%s (%d)%s" % (namepart, num, extpart) else: syntax_name, _ = os.path.splitext(os.path.basename(self.view.settings().get('syntax'))) make_filename = lambda num: "%s %d" % (syntax_name, num) gist_data = dict((make_filename(idx), data) for idx, data in enumerate(region_data, 1)) gist = create_gist(self.public, description, gist_data) if not gist: return gist_html_url = gist['html_url'] sublime.set_clipboard(gist_html_url) sublime.status_message("%s Gist: %s" % (self.mode(), gist_html_url)) # Todo: PY3 check required? self.view.run_command('gist_set_id', {'gistid': gist['id']}) if gistify: gistify_view(self.view, gist, list(gist['files'].keys())[0]) # else: # open_gist(gist['url']) window.show_input_panel('Gist File Name: (optional):', filename, on_gist_filename, None, None) desc = frontmatter.loads(region_data[0]).get('desc') if desc: on_gist_description(desc) else: window.show_input_panel("Gist Description (optional):", '', on_gist_description, None, None) class GistViewCommand(object): """A base class for commands operating on a gistified view""" def is_enabled(self): return self.gist_url() is not None def gist_url(self): return self.view.settings().get("gist_url") def gist_html_url(self): return self.view.settings().get("gist_html_url") def gist_filename(self): return self.view.settings().get("gist_filename") def gist_description(self): return self.view.settings().get("gist_description") class GistSetId(sublime_plugin.TextCommand): def run(self, edit, gistid): region = self.view.find(r'(?s)^\s*---(.*)---\s*$', 0) content = self.view.substr(region).replace('id:', 'id: ' + gistid) self.view.replace(edit, region, content) class GistCopyUrl(GistViewCommand, sublime_plugin.TextCommand): def run(self, edit): sublime.set_clipboard(self.gist_html_url()) class GistOpenBrowser(GistViewCommand, sublime_plugin.TextCommand): def run(self, edit): webbrowser.open(self.gist_html_url()) class GistRenameFileCommand(GistViewCommand, sublime_plugin.TextCommand): def run(self, edit): old_filename = self.gist_filename() @catch_errors def on_filename(filename): if filename and filename != old_filename: text = self.view.substr(sublime.Region(0, self.view.size())) file_changes = {old_filename: {'filename': filename, 'content': text}} new_gist = update_gist(self.gist_url(), file_changes) gistify_view(self.view, new_gist, filename) sublime.status_message('Gist file renamed') self.view.window().show_input_panel('New File Name:', old_filename, on_filename, None, None) class GistChangeDescriptionCommand(GistViewCommand, sublime_plugin.TextCommand): def run(self, edit): @catch_errors def on_gist_description(description): if description and description != self.gist_description(): gist_url = self.gist_url() new_gist = update_gist(gist_url, {}, description) for window in sublime.windows(): for view in window.views(): if view.settings().get('gist_url') == gist_url: gistify_view(view, new_gist, view.settings().get('gist_filename')) sublime.status_message('Gist description changed') self.view.window().show_input_panel('New Description:', self.gist_description() or '', on_gist_description, None, None) class GistUpdateFileCommand(GistViewCommand, sublime_plugin.TextCommand): @catch_errors def run(self, edit): text = self.view.substr(sublime.Region(0, self.view.size())) changes = {self.gist_filename(): {'content': text}} update_gist(self.gist_url(), changes) sublime.status_message("Gist updated") class GistDeleteFileCommand(GistViewCommand, sublime_plugin.TextCommand): @catch_errors def run(self, edit): changes = {self.gist_filename(): None} update_gist(self.gist_url(), changes) ungistify_view(self.view) sublime.status_message("Gist file deleted") class GistDeleteCommand(GistViewCommand, sublime_plugin.TextCommand): @catch_errors def run(self, edit): gist_url = self.gist_url() api_request(gist_url, method='DELETE') for window in sublime.windows(): for view in window.views(): if view.settings().get("gist_url") == gist_url: ungistify_view(view) sublime.status_message("Gist deleted") class GistPrivateCommand(GistCommand): public = False class GistListCommandBase(object): gists = orgs = users = [] @catch_errors def run(self, *args): filtered = gists_filter(api_request(settings.GISTS_URL)) filtered_stars = gists_filter(api_request(settings.STARRED_GISTS_URL)) self.gists = filtered[0] + filtered_stars[0] gist_names = filtered[1] + list(map(lambda x: [u"★ " + x[0]], filtered_stars[1])) if settings.get('include_users'): self.users = list(settings.get('include_users')) gist_names = [["> " + user] for user in self.users] + gist_names if settings.get('include_orgs'): if settings.get('include_orgs') == True: self.orgs = [org.get("login") for org in api_request(settings.ORGS_URL)] else: self.orgs = settings.get('include_orgs') gist_names = [["> " + org] for org in self.orgs] + gist_names # print(gist_names) def on_gist_num(num): offOrgs = len(self.orgs) offUsers = offOrgs + len(self.users) if num < 0: pass elif num < offOrgs: self.gists = [] members = [member.get("login") for member in api_request(settings.ORG_MEMBERS_URL % self.orgs[num])] for member in members: self.gists += api_request(settings.USER_GISTS_URL % member) filtered = gists_filter(self.gists) self.gists = filtered[0] gist_names = filtered[1] # print(gist_names) self.orgs = self.users = [] self.get_window().show_quick_panel(gist_names, on_gist_num) elif num < offUsers: filtered = gists_filter(api_request(settings.USER_GISTS_URL % self.users[num - offOrgs])) self.gists = filtered[0] gist_names = filtered[1] # print(gist_names) self.orgs = self.users = [] self.get_window().show_quick_panel(gist_names, on_gist_num) else: self.handle_gist(self.gists[num - offUsers]) self.get_window().show_quick_panel(gist_names, on_gist_num) class GistListCommand(GistListCommandBase, sublime_plugin.WindowCommand): @catch_errors def handle_gist(self, gist): open_gist(gist['url']) def get_window(self): return self.window class GistListener(GistViewCommand, sublime_plugin.EventListener): @catch_errors def on_pre_save(self, view): if view.settings().get('gist_filename') != None: if settings.get('save-update-hook'): # we ignore the first update, it happens upon loading a gist if not view.settings().get('do-update'): view.settings().set('do-update', True) return text = view.substr(sublime.Region(0, view.size())) changes = {view.settings().get('gist_filename'): {'content': text}} gist_url = view.settings().get('gist_url') # Start update_gist in a thread so we don't stall the save threading.Thread(target=update_gist, args=(gist_url, changes, settings.get('token'), settings.get('https_proxy'))).start() class InsertGistListCommand(GistListCommandBase, sublime_plugin.WindowCommand): @catch_errors def handle_gist(self, gist): insert_gist(gist['url']) def get_window(self): return self.window class InsertGistEmbedListCommand(GistListCommandBase, sublime_plugin.WindowCommand): @catch_errors def handle_gist(self, gist): insert_gist_embed(gist['url']) def get_window(self): return self.window class GistAddFileCommand(GistListCommandBase, sublime_plugin.TextCommand): def is_enabled(self): return self.view.settings().get('gist_url') is None def handle_gist(self, gist): @catch_errors def on_filename(filename): if filename: text = self.view.substr(sublime.Region(0, self.view.size())) changes = {filename: {'content': text}} new_gist = update_gist(gist['url'], changes) gistify_view(self.view, new_gist, filename) sublime.status_message("File added to Gist") filename = os.path.basename(self.view.file_name() if self.view.file_name() else '') self.view.window().show_input_panel('File Name:', filename, on_filename, None, None) def get_window(self): return self.view.window()
youssef-emad/shogun
refs/heads/develop
applications/asp/signal_detectors.py
26
# # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Written (W) 2006-2009 Soeren Sonnenburg # Written (W) 2007 Gunnar Raetsch # Copyright (C) 2006-2009 Fraunhofer Institute FIRST and Max-Planck-Society # import sys import numpy import seqdict from shogun.Classifier import LibSVM from shogun.Features import StringCharFeatures,DNA from shogun.Kernel import WeightedDegreeStringKernel from shogun.Library import DynamicIntArray class svm_splice_model(object): def __init__(self, order, traindat, alphas, b, (window_left,offset,window_right), consensus): f=StringCharFeatures(traindat, DNA) wd_kernel = WeightedDegreeStringKernel(f,f, int(order)) wd_kernel.io.set_target_to_stdout() self.svm=LibSVM() self.svm.set_kernel(wd_kernel) self.svm.set_alphas(alphas) self.svm.set_support_vectors(numpy.arange(len(alphas), dtype=numpy.int32)) self.svm.set_bias(b) self.svm.io.set_target_to_stdout() self.svm.parallel.set_num_threads(self.svm.parallel.get_num_cpus()) self.svm.set_linadd_enabled(True) self.svm.set_batch_computation_enabled(True) self.window_left=int(window_left) self.window_right=int(window_right) self.consensus=consensus self.wd_kernel=wd_kernel self.traindat=f self.offset=offset def get_positions(self, sequence): positions=list() for cons in self.consensus: l=sequence.find(cons) while l>-1: if l<len(sequence)-self.window_right and l>self.window_left: positions.append(l+self.offset) l=sequence.find(cons, l+1) positions.sort() return positions def get_predictions_from_seqdict(self, seqdic, site): """ we need to generate a huge test features object containing all locations found in each seqdict-sequence and each location (this is necessary to efficiently (==fast,low memory) compute the splice outputs """ seqlen=self.window_right+self.window_left+2 for s in seqdic: position_list=DynamicIntArray() sequence=s.seq positions=s.preds[site].positions for j in xrange(len(positions)): i=positions[j] - self.offset -self.window_left position_list.append_element(i) t=StringCharFeatures([sequence], DNA) t.obtain_by_position_list(seqlen, position_list) self.wd_kernel.init(self.traindat, t) self.wd_kernel.io.enable_progress() l=self.svm.apply().get_values() self.wd_kernel.cleanup() sys.stdout.write("\n...done...\n") num=len(s.preds[site].positions) scores= num * [0] for j in xrange(num): scores[j]=l[j] s.preds[site].set_scores(scores) def get_positions_from_seqdict(self, seqdic, site): for d in seqdic: positions=list() sequence=d.seq for cons in self.consensus: l=sequence.find(cons) while l>-1: if l<len(sequence)-self.window_right-2 and l>self.window_left: positions.append(l+self.offset) l=sequence.find(cons, l+1) positions.sort() d.preds[site].set_positions(positions) def get_predictions(self, sequence, positions): seqlen=self.window_right+self.window_left+2 num=len(positions) position_list=DynamicIntArray() for j in xrange(num): i=positions[j] - self.offset - self.window_left position_list.append_element(i) t=StringCharFeatures([sequence], DNA) t.obtain_by_position_list(seqlen, position_list) self.wd_kernel.init(self.traindat, t) del t self.wd_kernel.io.enable_progress() l=self.svm.apply().get_values() self.wd_kernel.cleanup() sys.stdout.write("\n...done...\n") return l class signal_detectors(object): def __init__(self, model): don_consensus=['GC','GT'] self.acceptor=svm_splice_model(model.acc_splice_order, model.acc_splice_svs, numpy.array(model.acc_splice_alphas).flatten(), model.acc_splice_b, (model.acc_splice_window_left-2, 2, model.acc_splice_window_right+2), ['AG']) self.donor=svm_splice_model(model.don_splice_order, model.don_splice_svs, numpy.array(model.don_splice_alphas).flatten(), model.don_splice_b, (model.don_splice_window_left+1, 0, model.don_splice_window_right-1), don_consensus) def set_sequence(self, seq): self.acceptor.set_sequence(seq) self.donor.set_sequence(seq) def predict_acceptor_sites(self, seq): pos=self.acceptor.get_positions(seq) sys.stdout.write("computing svm output for acceptor positions\n") pred=self.acceptor.get_predictions(seq, pos) return (pos,pred) def predict_donor_sites(self,seq): pos=self.donor.get_positions(seq) sys.stdout.write("computing svm output for donor positions\n") pred=self.donor.get_predictions(seq, pos) return (pos,pred) def predict_acceptor_sites_from_seqdict(self, seqs): self.acceptor.get_positions_from_seqdict(seqs, 'acceptor') sys.stdout.write("computing svm output for acceptor positions\n") self.acceptor.get_predictions_from_seqdict(seqs, 'acceptor') def predict_donor_sites_from_seqdict(self, seqs): self.donor.get_positions_from_seqdict(seqs, 'donor') sys.stdout.write("computing svm output for donor positions\n") self.donor.get_predictions_from_seqdict(seqs, 'donor') def clear_acceptor(): del self.acceptor self.acceptor=None def clear_donor(): del self.acceptor self.acceptor=None
TNT-Samuel/Coding-Projects
refs/heads/master
DNS Server/Source - Copy/Lib/site-packages/toolz/tests/test_compatibility.py
28
from toolz.compatibility import map, filter, iteritems, iterkeys, itervalues def test_map_filter_are_lazy(): def bad(x): raise Exception() map(bad, [1, 2, 3]) filter(bad, [1, 2, 3]) def test_dict_iteration(): d = {'a': 1, 'b': 2, 'c': 3} assert not isinstance(iteritems(d), list) assert not isinstance(iterkeys(d), list) assert not isinstance(itervalues(d), list) assert set(iteritems(d)) == set(d.items()) assert set(iterkeys(d)) == set(d.keys()) assert set(itervalues(d)) == set(d.values())
FelixZYY/gyp
refs/heads/master
pylib/gyp/win_tool.py
33
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility functions for Windows builds. These functions are executed via gyp-win-tool when using the ninja generator. """ import os import re import shutil import subprocess import stat import string import sys BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # A regex matching an argument corresponding to the output filename passed to # link.exe. _LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE) def main(args): executor = WinTool() exit_code = executor.Dispatch(args) if exit_code is not None: sys.exit(exit_code) class WinTool(object): """This class performs all the Windows tooling steps. The methods can either be executed directly, or dispatched from an argument list.""" def _UseSeparateMspdbsrv(self, env, args): """Allows to use a unique instance of mspdbsrv.exe per linker instead of a shared one.""" if len(args) < 1: raise Exception("Not enough arguments") if args[0] != 'link.exe': return # Use the output filename passed to the linker to generate an endpoint name # for mspdbsrv.exe. endpoint_name = None for arg in args: m = _LINK_EXE_OUT_ARG.match(arg) if m: endpoint_name = re.sub(r'\W+', '', '%s_%d' % (m.group('out'), os.getpid())) break if endpoint_name is None: return # Adds the appropriate environment variable. This will be read by link.exe # to know which instance of mspdbsrv.exe it should connect to (if it's # not set then the default endpoint is used). env['_MSPDBSRV_ENDPOINT_'] = endpoint_name def Dispatch(self, args): """Dispatches a string command to a method.""" if len(args) < 1: raise Exception("Not enough arguments") method = "Exec%s" % self._CommandifyName(args[0]) return getattr(self, method)(*args[1:]) def _CommandifyName(self, name_string): """Transforms a tool name like recursive-mirror to RecursiveMirror.""" return name_string.title().replace('-', '') def _GetEnv(self, arch): """Gets the saved environment from a file for a given architecture.""" # The environment is saved as an "environment block" (see CreateProcess # and msvs_emulation for details). We convert to a dict here. # Drop last 2 NULs, one for list terminator, one for trailing vs. separator. pairs = open(arch).read()[:-2].split('\0') kvs = [item.split('=', 1) for item in pairs] return dict(kvs) def ExecStamp(self, path): """Simple stamp command.""" open(path, 'w').close() def ExecRecursiveMirror(self, source, dest): """Emulation of rm -rf out && cp -af in out.""" if os.path.exists(dest): if os.path.isdir(dest): def _on_error(fn, path, excinfo): # The operation failed, possibly because the file is set to # read-only. If that's why, make it writable and try the op again. if not os.access(path, os.W_OK): os.chmod(path, stat.S_IWRITE) fn(path) shutil.rmtree(dest, onerror=_on_error) else: if not os.access(dest, os.W_OK): # Attempt to make the file writable before deleting it. os.chmod(dest, stat.S_IWRITE) os.unlink(dest) if os.path.isdir(source): shutil.copytree(source, dest) else: shutil.copy2(source, dest) def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args): """Filter diagnostic output from link that looks like: ' Creating library ui.dll.lib and object ui.dll.exp' This happens when there are exports from the dll or exe. """ env = self._GetEnv(arch) if use_separate_mspdbsrv == 'True': self._UseSeparateMspdbsrv(env, args) link = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = link.communicate() for line in out.splitlines(): if not line.startswith(' Creating library '): print line return link.returncode def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname, mt, rc, intermediate_manifest, *manifests): """A wrapper for handling creating a manifest resource and then executing a link command.""" # The 'normal' way to do manifests is to have link generate a manifest # based on gathering dependencies from the object files, then merge that # manifest with other manifests supplied as sources, convert the merged # manifest to a resource, and then *relink*, including the compiled # version of the manifest resource. This breaks incremental linking, and # is generally overly complicated. Instead, we merge all the manifests # provided (along with one that includes what would normally be in the # linker-generated one, see msvs_emulation.py), and include that into the # first and only link. We still tell link to generate a manifest, but we # only use that to assert that our simpler process did not miss anything. variables = { 'python': sys.executable, 'arch': arch, 'out': out, 'ldcmd': ldcmd, 'resname': resname, 'mt': mt, 'rc': rc, 'intermediate_manifest': intermediate_manifest, 'manifests': ' '.join(manifests), } add_to_ld = '' if manifests: subprocess.check_call( '%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo ' '-manifest %(manifests)s -out:%(out)s.manifest' % variables) if embed_manifest == 'True': subprocess.check_call( '%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest' ' %(out)s.manifest.rc %(resname)s' % variables) subprocess.check_call( '%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s ' '%(out)s.manifest.rc' % variables) add_to_ld = ' %(out)s.manifest.res' % variables subprocess.check_call(ldcmd + add_to_ld) # Run mt.exe on the theoretically complete manifest we generated, merging # it with the one the linker generated to confirm that the linker # generated one does not add anything. This is strictly unnecessary for # correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not # used in a #pragma comment. if manifests: # Merge the intermediate one with ours to .assert.manifest, then check # that .assert.manifest is identical to ours. subprocess.check_call( '%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo ' '-manifest %(out)s.manifest %(intermediate_manifest)s ' '-out:%(out)s.assert.manifest' % variables) assert_manifest = '%(out)s.assert.manifest' % variables our_manifest = '%(out)s.manifest' % variables # Load and normalize the manifests. mt.exe sometimes removes whitespace, # and sometimes doesn't unfortunately. with open(our_manifest, 'rb') as our_f: with open(assert_manifest, 'rb') as assert_f: our_data = our_f.read().translate(None, string.whitespace) assert_data = assert_f.read().translate(None, string.whitespace) if our_data != assert_data: os.unlink(out) def dump(filename): sys.stderr.write('%s\n-----\n' % filename) with open(filename, 'rb') as f: sys.stderr.write(f.read() + '\n-----\n') dump(intermediate_manifest) dump(our_manifest) dump(assert_manifest) sys.stderr.write( 'Linker generated manifest "%s" added to final manifest "%s" ' '(result in "%s"). ' 'Were /MANIFEST switches used in #pragma statements? ' % ( intermediate_manifest, our_manifest, assert_manifest)) return 1 def ExecManifestWrapper(self, arch, *args): """Run manifest tool with environment set. Strip out undesirable warning (some XML blocks are recognized by the OS loader, but not the manifest tool).""" env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if line and 'manifest authoring warning 81010002' not in line: print line return popen.returncode def ExecManifestToRc(self, arch, *args): """Creates a resource file pointing a SxS assembly manifest. |args| is tuple containing path to resource file, path to manifest file and resource name which can be "1" (for executables) or "2" (for DLLs).""" manifest_path, resource_path, resource_name = args with open(resource_path, 'wb') as output: output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % ( resource_name, os.path.abspath(manifest_path).replace('\\', '/'))) def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl, *flags): """Filter noisy filenames output from MIDL compile step that isn't quietable via command line flags. """ args = ['midl', '/nologo'] + list(flags) + [ '/out', outdir, '/tlb', tlb, '/h', h, '/dlldata', dlldata, '/iid', iid, '/proxy', proxy, idl] env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() # Filter junk out of stdout, and write filtered versions. Output we want # to filter is pairs of lines that look like this: # Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl # objidl.idl lines = out.splitlines() prefixes = ('Processing ', '64 bit Processing ') processing = set(os.path.basename(x) for x in lines if x.startswith(prefixes)) for line in lines: if not line.startswith(prefixes) and line not in processing: print line return popen.returncode def ExecAsmWrapper(self, arch, *args): """Filter logo banner from invocations of asm.exe.""" env = self._GetEnv(arch) # MSVS doesn't assemble x64 asm files. if arch == 'environment.x64': return 0 popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if (not line.startswith('Copyright (C) Microsoft Corporation') and not line.startswith('Microsoft (R) Macro Assembler') and not line.startswith(' Assembling: ') and line): print line return popen.returncode def ExecRcWrapper(self, arch, *args): """Filter logo banner from invocations of rc.exe. Older versions of RC don't support the /nologo flag.""" env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and not line.startswith('Copyright (C) Microsoft Corporation') and line): print line return popen.returncode def ExecActionWrapper(self, arch, rspfile, *dir): """Runs an action command line from a response file using the environment for |arch|. If |dir| is supplied, use that as the working directory.""" env = self._GetEnv(arch) # TODO(scottmg): This is a temporary hack to get some specific variables # through to actions that are set after gyp-time. http://crbug.com/333738. for k, v in os.environ.iteritems(): if k not in env: env[k] = v args = open(rspfile).read() dir = dir[0] if dir else None return subprocess.call(args, shell=True, env=env, cwd=dir) def ExecClCompile(self, project_dir, selected_files): """Executed by msvs-ninja projects when the 'ClCompile' target is used to build selected C/C++ files.""" project_dir = os.path.relpath(project_dir, BASE_DIR) selected_files = selected_files.split(';') ninja_targets = [os.path.join(project_dir, filename) + '^^' for filename in selected_files] cmd = ['ninja.exe'] cmd.extend(ninja_targets) return subprocess.call(cmd, shell=True, cwd=BASE_DIR) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
tchellomello/home-assistant
refs/heads/dev
homeassistant/components/ads/cover.py
7
"""Support for ADS covers.""" import logging import voluptuous as vol from homeassistant.components.cover import ( ATTR_POSITION, DEVICE_CLASSES_SCHEMA, PLATFORM_SCHEMA, SUPPORT_CLOSE, SUPPORT_OPEN, SUPPORT_SET_POSITION, SUPPORT_STOP, CoverEntity, ) from homeassistant.const import CONF_DEVICE_CLASS, CONF_NAME import homeassistant.helpers.config_validation as cv from . import ( CONF_ADS_VAR, CONF_ADS_VAR_POSITION, DATA_ADS, STATE_KEY_POSITION, STATE_KEY_STATE, AdsEntity, ) _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "ADS Cover" CONF_ADS_VAR_SET_POS = "adsvar_set_position" CONF_ADS_VAR_OPEN = "adsvar_open" CONF_ADS_VAR_CLOSE = "adsvar_close" CONF_ADS_VAR_STOP = "adsvar_stop" PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Optional(CONF_ADS_VAR): cv.string, vol.Optional(CONF_ADS_VAR_POSITION): cv.string, vol.Optional(CONF_ADS_VAR_SET_POS): cv.string, vol.Optional(CONF_ADS_VAR_CLOSE): cv.string, vol.Optional(CONF_ADS_VAR_OPEN): cv.string, vol.Optional(CONF_ADS_VAR_STOP): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the cover platform for ADS.""" ads_hub = hass.data[DATA_ADS] ads_var_is_closed = config.get(CONF_ADS_VAR) ads_var_position = config.get(CONF_ADS_VAR_POSITION) ads_var_pos_set = config.get(CONF_ADS_VAR_SET_POS) ads_var_open = config.get(CONF_ADS_VAR_OPEN) ads_var_close = config.get(CONF_ADS_VAR_CLOSE) ads_var_stop = config.get(CONF_ADS_VAR_STOP) name = config[CONF_NAME] device_class = config.get(CONF_DEVICE_CLASS) add_entities( [ AdsCover( ads_hub, ads_var_is_closed, ads_var_position, ads_var_pos_set, ads_var_open, ads_var_close, ads_var_stop, name, device_class, ) ] ) class AdsCover(AdsEntity, CoverEntity): """Representation of ADS cover.""" def __init__( self, ads_hub, ads_var_is_closed, ads_var_position, ads_var_pos_set, ads_var_open, ads_var_close, ads_var_stop, name, device_class, ): """Initialize AdsCover entity.""" super().__init__(ads_hub, name, ads_var_is_closed) if self._ads_var is None: if ads_var_position is not None: self._unique_id = ads_var_position elif ads_var_pos_set is not None: self._unique_id = ads_var_pos_set elif ads_var_open is not None: self._unique_id = ads_var_open self._state_dict[STATE_KEY_POSITION] = None self._ads_var_position = ads_var_position self._ads_var_pos_set = ads_var_pos_set self._ads_var_open = ads_var_open self._ads_var_close = ads_var_close self._ads_var_stop = ads_var_stop self._device_class = device_class async def async_added_to_hass(self): """Register device notification.""" if self._ads_var is not None: await self.async_initialize_device( self._ads_var, self._ads_hub.PLCTYPE_BOOL ) if self._ads_var_position is not None: await self.async_initialize_device( self._ads_var_position, self._ads_hub.PLCTYPE_BYTE, STATE_KEY_POSITION ) @property def device_class(self): """Return the class of this cover.""" return self._device_class @property def is_closed(self): """Return if the cover is closed.""" if self._ads_var is not None: return self._state_dict[STATE_KEY_STATE] if self._ads_var_position is not None: return self._state_dict[STATE_KEY_POSITION] == 0 return None @property def current_cover_position(self): """Return current position of cover.""" return self._state_dict[STATE_KEY_POSITION] @property def supported_features(self): """Flag supported features.""" supported_features = SUPPORT_OPEN | SUPPORT_CLOSE if self._ads_var_stop is not None: supported_features |= SUPPORT_STOP if self._ads_var_pos_set is not None: supported_features |= SUPPORT_SET_POSITION return supported_features def stop_cover(self, **kwargs): """Fire the stop action.""" if self._ads_var_stop: self._ads_hub.write_by_name( self._ads_var_stop, True, self._ads_hub.PLCTYPE_BOOL ) def set_cover_position(self, **kwargs): """Set cover position.""" position = kwargs[ATTR_POSITION] if self._ads_var_pos_set is not None: self._ads_hub.write_by_name( self._ads_var_pos_set, position, self._ads_hub.PLCTYPE_BYTE ) def open_cover(self, **kwargs): """Move the cover up.""" if self._ads_var_open is not None: self._ads_hub.write_by_name( self._ads_var_open, True, self._ads_hub.PLCTYPE_BOOL ) elif self._ads_var_pos_set is not None: self.set_cover_position(position=100) def close_cover(self, **kwargs): """Move the cover down.""" if self._ads_var_close is not None: self._ads_hub.write_by_name( self._ads_var_close, True, self._ads_hub.PLCTYPE_BOOL ) elif self._ads_var_pos_set is not None: self.set_cover_position(position=0) @property def available(self): """Return False if state has not been updated yet.""" if self._ads_var is not None or self._ads_var_position is not None: return ( self._state_dict[STATE_KEY_STATE] is not None or self._state_dict[STATE_KEY_POSITION] is not None ) return True
medunigraz/outpost
refs/heads/master
src/outpost/django/typo3/migrations/0004_auto_20170906_1021.py
1
# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2017-09-06 08:21 from __future__ import unicode_literals from django.db import migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('typo3', '0003_auto_20170818_1538'), ] forward = [ ''' DROP VIEW IF EXISTS "public"."typo3_event"; ''', ''' DROP VIEW IF EXISTS "public"."typo3_news"; ''', ''' CREATE MATERIALIZED VIEW "public"."typo3_event" AS SELECT uid AS id, pid AS source_id, to_date(start_date, 'YYYYMMDD') + CASE allday WHEN 1 THEN interval '0 hours' ELSE start_time * interval '1 seconds' END AS "start", to_date(end_date, 'YYYYMMDD') + CASE allday WHEN 1 THEN interval '24 hours' ELSE end_time * interval '1 seconds' END AS "end", allday::boolean AS allday, html_unescape(title) AS title, calendar_id, CASE WHEN category_id > 0 THEN category_id ELSE NULL END AS category_id, organizer, location, html_unescape(teaser) AS teaser, html_unescape(description) AS description, CASE WHEN sys_language_uid > 0 THEN sys_language_uid ELSE NULL END AS language_id, tx_mugcal_register::boolean AS register, CASE WHEN tx_mugcal_registration_end > 0 THEN to_timestamp(tx_mugcal_registration_end) ELSE NULL END AS registration_end, tx_mugcal_attendingfees::boolean AS attending_fees, tx_mugcal_www AS link, tx_mugcal_dfppoints AS dfp_points, tx_mugcal_contact AS contact, tx_mugcal_contact_email AS email, to_timestamp(tstamp) AS last_modified FROM "typo3"."event" WHERE start_date != '0' AND end_date != '0' AND (starttime = 0 OR to_timestamp(starttime) > NOW()) AND to_date(end_date, 'YYYYMMDD') + CASE allday WHEN 1 THEN interval '24 hours' ELSE end_time * interval '1 seconds' END > NOW() AND deleted = 0 AND hidden = 0 WITH DATA; '''.format(typo3_fileadmin=settings.OUTPOST.get('typo3_fileadmin')), ''' CREATE MATERIALIZED VIEW "public"."typo3_news" AS SELECT uid AS id, pid AS source_id, CASE WHEN sys_language_uid > 0 THEN sys_language_uid ELSE NULL END AS language_id, CASE datetime WHEN 0 THEN NULL ELSE to_timestamp(datetime) END AS datetime, html_unescape(title) AS title, html_unescape(teaser) AS teaser, html_unescape(bodytext) AS body, CASE starttime WHEN 0 THEN NULL ELSE to_timestamp(starttime) END AS start, CASE endtime WHEN 0 THEN NULL ELSE to_timestamp(endtime) END AS end, author, author_email AS email, keywords, tags, istopnews = 1 AS topnews, to_timestamp(tstamp) AS last_modified FROM "typo3"."news" WHERE (starttime = 0 OR to_timestamp(starttime) < NOW()) AND (endtime = 0 OR to_timestamp(endtime) > NOW()) AND deleted = 0 AND hidden = 0 WITH DATA; '''.format(typo3_fileadmin=settings.OUTPOST.get('typo3_fileadmin')), ] reverse = [ ''' DROP MATERIALIZED VIEW IF EXISTS "public"."typo3_event"; ''', ''' DROP MATERIALIZED VIEW IF EXISTS "public"."typo3_news"; ''', ''' CREATE VIEW "public"."typo3_event" AS SELECT uid AS id, to_date(start_date, 'YYYYMMDD') + CASE allday WHEN 1 THEN interval '0 hours' ELSE start_time * interval '1 seconds' END AS "start", to_date(end_date, 'YYYYMMDD') + CASE allday WHEN 1 THEN interval '24 hours' ELSE end_time * interval '1 seconds' END AS "end", allday::boolean AS allday, title, calendar_id, CASE WHEN category_id > 0 THEN category_id ELSE NULL END AS category_id, organizer, location, teaser, description, CASE WHEN sys_language_uid > 0 THEN sys_language_uid ELSE NULL END AS language_id, tx_mugcal_register::boolean AS register, CASE WHEN tx_mugcal_registration_end > 0 THEN to_timestamp(tx_mugcal_registration_end) ELSE NULL END AS registration_end, tx_mugcal_attendingfees::boolean AS attending_fees, tx_mugcal_www AS url, tx_mugcal_dfppoints AS dfp_points, tx_mugcal_contact AS contact, tx_mugcal_contact_email AS email, tx_mugcal_target AS target FROM "typo3"."event" WHERE start_date != '0' AND end_date != '0' AND (starttime = 0 OR to_timestamp(starttime) > NOW()) AND to_date(end_date, 'YYYYMMDD') + CASE allday WHEN 1 THEN interval '24 hours' ELSE end_time * interval '1 seconds' END > NOW() AND deleted = 0 AND hidden = 0; ''', ''' CREATE VIEW "public"."typo3_news" AS SELECT uid AS id, pid AS page, CASE WHEN sys_language_uid > 0 THEN sys_language_uid ELSE NULL END AS language_id, CASE datetime WHEN 0 THEN NULL ELSE to_timestamp(datetime) END AS datetime, title, teaser, bodytext AS body, author, author_email AS email, keywords, tags, istopnews = 1 AS topnews FROM "typo3"."news" WHERE (starttime = 0 OR to_timestamp(starttime) < NOW()) AND (endtime = 0 OR to_timestamp(endtime) > NOW()) AND deleted = 0 AND hidden = 0; ''', ] operations = [ migrations.RunSQL( forward, reverse ) ]
SlimRoms/kernel_motorola_ghost
refs/heads/lp5.1
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
12527
# Util.py - Python extension for perf script, miscellaneous utility code # # Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com> # # This software may be distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. import errno, os FUTEX_WAIT = 0 FUTEX_WAKE = 1 FUTEX_PRIVATE_FLAG = 128 FUTEX_CLOCK_REALTIME = 256 FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME) NSECS_PER_SEC = 1000000000 def avg(total, n): return total / n def nsecs(secs, nsecs): return secs * NSECS_PER_SEC + nsecs def nsecs_secs(nsecs): return nsecs / NSECS_PER_SEC def nsecs_nsecs(nsecs): return nsecs % NSECS_PER_SEC def nsecs_str(nsecs): str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)), return str def add_stats(dict, key, value): if not dict.has_key(key): dict[key] = (value, value, value, 1) else: min, max, avg, count = dict[key] if value < min: min = value if value > max: max = value avg = (avg + value) / 2 dict[key] = (min, max, avg, count + 1) def clear_term(): print("\x1b[H\x1b[2J") audit_package_warned = False try: import audit machine_to_id = { 'x86_64': audit.MACH_86_64, 'alpha' : audit.MACH_ALPHA, 'ia64' : audit.MACH_IA64, 'ppc' : audit.MACH_PPC, 'ppc64' : audit.MACH_PPC64, 's390' : audit.MACH_S390, 's390x' : audit.MACH_S390X, 'i386' : audit.MACH_X86, 'i586' : audit.MACH_X86, 'i686' : audit.MACH_X86, } try: machine_to_id['armeb'] = audit.MACH_ARMEB except: pass machine_id = machine_to_id[os.uname()[4]] except: if not audit_package_warned: audit_package_warned = True print "Install the audit-libs-python package to get syscall names" def syscall_name(id): try: return audit.audit_syscall_to_name(id, machine_id) except: return str(id) def strerror(nr): try: return errno.errorcode[abs(nr)] except: return "Unknown %d errno" % nr
pyocd/pyOCD
refs/heads/develop
pyocd/target/builtin/target_MK28FN2M0xxx15.py
3
# pyOCD debugger # Copyright (c) 2016,2018 Arm Limited # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..family.target_kinetis import Kinetis from ..family.flash_kinetis import Flash_Kinetis from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap) from ...debug.svd.loader import SVDFile FLASH_ALGO = { 'load_address' : 0x20000000, 'instructions' : [ 0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2, 0xb510482e, 0x5120f24c, 0xf64d81c1, 0x81c11128, 0xf0218801, 0x80010101, 0x44484829, 0xf856f000, 0xbf182800, 0xbd102001, 0x47702000, 0xb5104824, 0x44484924, 0xf926f000, 0x4821b920, 0x44482100, 0xf9daf000, 0x684a4920, 0x0270f442, 0xbd10604a, 0x4c1bb570, 0x444c4605, 0x4b1a4601, 0x68e24620, 0xf88ef000, 0x2300b928, 0x46204629, 0xf00068e2, 0x4915f91f, 0xf442684a, 0x604a0270, 0xb570bd70, 0x460b460c, 0x46014606, 0xb084480d, 0x44484615, 0xf8b8f000, 0x2000b958, 0xe9cd2101, 0x90021000, 0x462b4807, 0x46314622, 0xf0004448, 0x4906f963, 0xf442684a, 0x604a0270, 0xbd70b004, 0x40052000, 0x00000004, 0x6b65666b, 0x4001f000, 0xbf042800, 0x47702004, 0x6cc94926, 0x0e094a26, 0xf832447a, 0x03091011, 0x2064bf04, 0x22004770, 0x2100e9c0, 0x60812104, 0x60c10289, 0x780b491f, 0x7c80f44f, 0xf303fa0c, 0x78c96103, 0x1205e9c0, 0x47704610, 0xbf0e2800, 0x61812004, 0x47702000, 0xbf042800, 0x47702004, 0x42191e5b, 0x421abf0e, 0x47702065, 0x428b6803, 0x6840d806, 0x44184411, 0xbf244288, 0x47702000, 0x47702066, 0x4288490c, 0x206bbf14, 0x47702000, 0x290fb140, 0x2a04d802, 0xe005d104, 0xbf982913, 0xd0012a08, 0x47702004, 0x47702000, 0x40048000, 0x0000036c, 0x40020028, 0x6b65666b, 0x4df0e92d, 0x46154606, 0x4618460c, 0xffdcf7ff, 0xbf182800, 0x8df0e8bd, 0x462a2310, 0x46304621, 0xffbcf7ff, 0xbf180007, 0x8df0e8bd, 0x1e451960, 0xfbb568f0, 0xfb00f1f0, 0xb1125211, 0x43481c49, 0x42ac1e45, 0xf8dfd817, 0x44f88034, 0xb030f8df, 0x0a09f04f, 0x0000f8d8, 0xf88b6004, 0xf000a007, 0x4607f917, 0x280069b0, 0x4780bf18, 0x68f0b91f, 0x42ac4404, 0x4638d9ee, 0x8df0e8bd, 0x0000027a, 0x40020000, 0xbf042a00, 0x47702004, 0x4df0e92d, 0x4614461d, 0x4607460e, 0x462a2308, 0xff7ef7ff, 0x0b00ea5f, 0xe8bdbf18, 0x2d008df0, 0xf8dfbf1e, 0x44f8804c, 0x0a07f04f, 0xf8d8d01c, 0x60060000, 0x1000f8d8, 0x0b04f854, 0xf8d86048, 0xf8541000, 0x60880b04, 0xf880480a, 0xf000a007, 0x4683f8d9, 0x280069b8, 0x4780bf18, 0x0f00f1bb, 0x3608d102, 0xd1e23d08, 0xe8bd4658, 0x00008df0, 0x00000212, 0x40020000, 0x4604b510, 0xf7ff4608, 0x2800ff5d, 0xbd10bf18, 0xbf042c00, 0xbd102004, 0x49032044, 0xe8bd71c8, 0xf0004010, 0x0000b8b3, 0x40020000, 0x4df0e92d, 0x4614469a, 0x4605460e, 0xf7ff2310, 0x2800ff2d, 0xe8bdbf18, 0xe9d58df0, 0xfbb00101, 0x4270f8f1, 0x0100f1c8, 0x42474008, 0xbf0842b7, 0x2c004447, 0xf8dfbf18, 0xd01cb044, 0x42a51bbd, 0x4625bf88, 0x490e0928, 0x68094479, 0x2101600e, 0x1007f88b, 0xf88b0a01, 0xf88b100b, 0xf88b000a, 0xf000a009, 0x2800f87d, 0xe8bdbf18, 0x1b648df0, 0x4447442e, 0x2000d1e2, 0x8df0e8bd, 0x40020000, 0x0000014c, 0xbf122800, 0x20042a00, 0x29084770, 0xe8dfd215, 0x0604f001, 0x0c0a0806, 0x68c0100e, 0x6840e00a, 0x6880e008, 0x6800e006, 0x2001e004, 0x6900e002, 0x6940e000, 0x20006010, 0x206a4770, 0x00004770, 0xbf042b00, 0x47702004, 0x4df0e92d, 0xe9dd461c, 0x46158709, 0x2304460e, 0xa020f8dd, 0xfec4f7ff, 0xbf182800, 0x8df0e8bd, 0xbf1a2d00, 0xb04cf8df, 0xe8bd44fb, 0xf8db8df0, 0x60060000, 0x21024810, 0xf88071c1, 0xf8dba00b, 0x68201000, 0xf0006088, 0xb150f825, 0x0f00f1b8, 0xf8c8bf18, 0x2f006000, 0x2100bf1c, 0xe8bd6039, 0x1f2d8df0, 0x0404f104, 0x0604f106, 0xe8bdd1df, 0x00008df0, 0x000000a0, 0x40020000, 0xbf042800, 0x47702004, 0x48022240, 0x718171c2, 0xb802f000, 0x40020000, 0x2170480c, 0x21807001, 0x78017001, 0x0f80f011, 0x7800d0fb, 0x0f20f010, 0x2067bf1c, 0xf0104770, 0xbf1c0f10, 0x47702068, 0x0001f010, 0x2069bf18, 0x00004770, 0x40020000, 0x40020004, 0x00000000, 0x00080000, 0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000, 0x02000000, 0x00000000, ], 'pc_init' : 0x20000021, 'pc_unInit': 0x20000049, 'pc_program_page': 0x2000009F, 'pc_erase_sector': 0x20000071, 'pc_eraseAll' : 0x2000004D, 'static_base' : 0x20000000 + 0x00000020 + 0x0000046c, 'begin_stack' : 0x20000000 + 0x00000800, 'begin_data' : 0x20000000 + 0x00000A00, 'analyzer_supported' : True, 'analyzer_address' : 0x1ffff000, # Analyzer 0x1ffff000..0x1ffff600 'page_buffers' : [0x20003000, 0x20004000], # Enable double buffering 'min_program_length' : 8, } class K28F15(Kinetis): MEMORY_MAP = MemoryMap( FlashRegion( start=0, length=0x200000, blocksize=0x1000, is_boot_memory=True, algo=FLASH_ALGO, flash_class=Flash_Kinetis), RamRegion( start=0x1ffC0000, length=0x80000), RamRegion( start=0x34000000, length=0x80000), RamRegion( start=0x14000000, length=0x1000) ) def __init__(self, session): super(K28F15, self).__init__(session, self.MEMORY_MAP) self._svd_location = SVDFile.from_builtin("MK28FA15.xml")
madslonnberg/blog
refs/heads/master
node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/bbcode.py
362
# -*- coding: utf-8 -*- """ pygments.formatters.bbcode ~~~~~~~~~~~~~~~~~~~~~~~~~~ BBcode formatter. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.formatter import Formatter from pygments.util import get_bool_opt __all__ = ['BBCodeFormatter'] class BBCodeFormatter(Formatter): """ Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there. This formatter has no support for background colors and borders, as there are no common BBcode tags for that. Some board systems (e.g. phpBB) don't support colors in their [code] tag, so you can't use the highlighting together with that tag. Text in a [code] tag usually is shown with a monospace font (which this formatter can do with the ``monofont`` option) and no spaces (which you need for indentation) are removed. Additional options accepted: `style` The style to use, can be a string or a Style subclass (default: ``'default'``). `codetag` If set to true, put the output into ``[code]`` tags (default: ``false``) `monofont` If set to true, add a tag to show the code with a monospace font (default: ``false``). """ name = 'BBCode' aliases = ['bbcode', 'bb'] filenames = [] def __init__(self, **options): Formatter.__init__(self, **options) self._code = get_bool_opt(options, 'codetag', False) self._mono = get_bool_opt(options, 'monofont', False) self.styles = {} self._make_styles() def _make_styles(self): for ttype, ndef in self.style: start = end = '' if ndef['color']: start += '[color=#%s]' % ndef['color'] end = '[/color]' + end if ndef['bold']: start += '[b]' end = '[/b]' + end if ndef['italic']: start += '[i]' end = '[/i]' + end if ndef['underline']: start += '[u]' end = '[/u]' + end # there are no common BBcodes for background-color and border self.styles[ttype] = start, end def format_unencoded(self, tokensource, outfile): if self._code: outfile.write('[code]') if self._mono: outfile.write('[font=monospace]') lastval = '' lasttype = None for ttype, value in tokensource: while ttype not in self.styles: ttype = ttype.parent if ttype == lasttype: lastval += value else: if lastval: start, end = self.styles[lasttype] outfile.write(''.join((start, lastval, end))) lastval = value lasttype = ttype if lastval: start, end = self.styles[lasttype] outfile.write(''.join((start, lastval, end))) if self._mono: outfile.write('[/font]') if self._code: outfile.write('[/code]') if self._code or self._mono: outfile.write('\n')
dsajkl/123
refs/heads/master
common/lib/xmodule/xmodule/open_ended_grading_classes/__init__.py
248
__author__ = 'vik'
nreimers/truecaser
refs/heads/master
EvaluateTruecaser.py
1
from Truecaser import * import cPickle import nltk import string def evaluateTrueCaser(testSentences, wordCasingLookup, uniDist, backwardBiDist, forwardBiDist, trigramDist): correctTokens = 0 totalTokens = 0 for sentence in testSentences: tokensCorrect = nltk.word_tokenize(sentence) tokens = [token.lower() for token in tokensCorrect] tokensTrueCase = getTrueCase(tokens, 'title', wordCasingLookup, uniDist, backwardBiDist, forwardBiDist, trigramDist) perfectMatch = True for idx in xrange(len(tokensCorrect)): totalTokens += 1 if tokensCorrect[idx] == tokensTrueCase[idx]: correctTokens += 1 else: perfectMatch = False if not perfectMatch: print tokensCorrect print tokensTrueCase print "-------------------" print "Accuracy: %.2f%%" % (correctTokens / float(totalTokens)*100) def defaultTruecaserEvaluation(wordCasingLookup, uniDist, backwardBiDist, forwardBiDist, trigramDist): testSentences = [ "Its website was launched on February 4, 2004 by Mark Zuckerberg with his Harvard College roommates and fellow students Eduardo Saverin, Andrew McCollum, Dustin Moskovitz, and Chris Hughes." ,"Facebook is a for-profit corporation and online social networking service based in Menlo Park, California, United States. " ,"The founders had initially limited the website's membership to Harvard students, but later expanded it to colleges in the Boston area, the Ivy League, and Stanford University. " ,"It gradually added support for students at various other universities and later to high school students. " ,"Since 2006, anyone in general aged 13 and older has been allowed to become a registered user of the website, though variations exist in the minimum age requirement, depending on applicable local laws." ,"Its name comes from the face book directories often given to American university students." ,"Because of the large volume of data that users submit to the service, Facebook has come under scrutiny for their privacy policies. Facebook, Inc. held its initial public offering in February 2012 and began selling stock to the public three months later, reaching an original peak market capitalization of $104 billion." ,"Zuckerberg wrote a program called Facemash on October 28, 2003 while attending Harvard University as a sophomore (second year student)." ,"Apple Inc. is an American multinational technology company headquartered in Cupertino, California, that designs, develops, and sells consumer electronics, computer software, and online services." ,"Its hardware products include the iPhone smartphone, the iPad tablet computer, the Mac personal computer, the iPod portable media player, and the Apple Watch smartwatch." ,"Apple's consumer software includes the OS X and iOS operating systems, the iTunes media player, the Safari web browser, and the iLife and iWork creativity and productivity suites." ,"Its online services include the iTunes Store, the iOS App Store and Mac App Store, and iCloud." ,"Microsoft Corporation (commonly referred to as Microsoft) is an American multinational technology company headquartered in Redmond, Washington, that develops, manufactures, licenses, supports and sells computer software, consumer electronics and personal computers and services." ,"Its best known software products are the Microsoft Windows line of operating systems, Microsoft Office office suite, and Internet Explorer and Edge web browsers." ,"Its flagship hardware products are the Xbox game consoles and the Microsoft Surface tablet lineup." ,"It is the world's largest software maker by revenue, and one of the world's most valuable companies." ,"Google is an American multinational technology company specializing in Internet-related services and products." ,"These include online advertising technologies, search, cloud computing, and software." ,"Most of its profits are derived from AdWords, an online advertising service that places advertising near the list of search results." ,"Rapid growth since incorporation has triggered a chain of products, acquisitions and partnerships beyond Google's core search engine (Google Search)." ,"It offers online productivity software (Google Docs) including email (Gmail), a cloud storage service (Google Drive) and a social networking service (Google+)." ,"Desktop products include applications for web browsing (Google Chrome), organizing and editing photos (Google Photos), and instant messaging and video chat (Hangouts)." ,"The company leads the development of the Android mobile operating system and the browser-only Chrome OS for a class of netbooks known as Chromebooks and desktop PCs known as Chromeboxes." ,"Google has moved increasingly into communications hardware, partnering with major electronics manufacturers[20] in the production of its \"high-quality low-cost\" Nexus devices." ,"In 2012, a fiber-optic infrastructure was installed in Kansas City to facilitate a Google Fiber broadband service." ,"WhatsApp Messenger is a proprietary cross-platform, encrypted, instant messaging client for smartphones." ,"It uses the Internet to send text messages, documents, images, video, user location and audio messages to other users using standard cellular mobile numbers." ,"As of February 2016, WhatsApp had a user base of one billion, making it the most popular messaging application." ,"WhatsApp Inc., based in Mountain View, California, United States, was acquired by Facebook Inc. on February 19, 2014, for approximately US$19.3 billion" ,"Barack Hussein Obama II (born August 4, 1961) is an American politician serving as the 44th President of the United States." ,"He is the first African American to hold the office, as well as the first president born outside of the continental United States." ,"Born in Honolulu, Hawaii, Obama is a graduate of Columbia University and Harvard Law School, where he served as president of the Harvard Law Review." ,"He was a community organizer in Chicago before earning his law degree." ,"He worked as a civil rights attorney and taught constitutional law at University of Chicago Law School between 1992 and 2004." ,"He served three terms representing the 13th District in the Illinois Senate from 1997 to 2004, and ran unsuccessfully in the Democratic primary for the United States House of Representatives in 2000 against incumbent Bobby Rush." ,"In 2004, Obama received national attention during his campaign to represent Illinois in the United States Senate with his victory in the March Democratic Party primary, his keynote address at the Democratic National Convention in July, and his election to the Senate in November." ,"He began his presidential campaign in 2007 and, after a close primary campaign against Hillary Clinton in 2008, he won sufficient delegates in the Democratic Party primaries to receive the presidential nomination." ,"He then defeated Republican nominee John McCain in the general election, and was inaugurated as president on January 20, 2009." ,"Nine months after his inauguration, Obama was named the 2009 Nobel Peace Prize laureate." ,"Albert Einstein was a German-born theoretical physicist. He developed the general theory of relativity, one of the two pillars of modern physics (alongside quantum mechanics)." ,"Einstein's work is also known for its influence on the philosophy of science." ,"Einstein is best known in popular culture for his mass-energy equivalence formula E = mc2 (which has been dubbed \"the world's most famous equation\")." ,"He received the 1921 Nobel Prize in Physics for his \"services to theoretical physics\", in particular his discovery of the law of the photoelectric effect, a pivotal step in the evolution of quantum theory." ,"Near the beginning of his career, Einstein thought that Newtonian mechanics was no longer enough to reconcile the laws of classical mechanics with the laws of the electromagnetic field." ,"This led to the development of his special theory of relativity." ,"He realized, however, that the principle of relativity could also be extended to gravitational fields, and with his subsequent theory of gravitation in 1916, he published a paper on general relativity." ,"He continued to deal with problems of statistical mechanics and quantum theory, which led to his explanations of particle theory and the motion of molecules. He also investigated the thermal properties of light which laid the foundation of the photon theory of light." ,"In 1917, Einstein applied the general theory of relativity to model the large-scale structure of the universe." ,"Ulm is a city in the federal German state of Baden-Wuerttemberg, situated on the River Danube." ,"The city, whose population is estimated at almost 120,000 (2015), forms an urban district of its own and is the administrative seat of the Alb-Donau district." ,"Ulm, founded around 850, is rich in history and traditions as a former Free Imperial City." ,"Today, it is an economic centre due to its varied industries, and it is the seat of the University of Ulm." ,"Internationally, Ulm is primarily known for having the church with the tallest steeple in the world (161.53 m or 529.95 ft), the Gothic minster (Ulm Minster) and as the birthplace of Albert Einstein." ] evaluateTrueCaser(testSentences, wordCasingLookup, uniDist, backwardBiDist, forwardBiDist, trigramDist) if __name__ == "__main__": f = open('english_distributions.obj', 'rb') uniDist = cPickle.load(f) backwardBiDist = cPickle.load(f) forwardBiDist = cPickle.load(f) trigramDist = cPickle.load(f) wordCasingLookup = cPickle.load(f) f.close() defaultTruecaserEvaluation(wordCasingLookup, uniDist, backwardBiDist, forwardBiDist, trigramDist)
tempbottle/Rusthon
refs/heads/master
regtests/loop/yield.py
2
''' generator function ''' def fib(n:int) -> int: int a = 0 int b = 1 int c = 0 for x in range(n): #print('looping') yield a c = b b = a+b a = c yield -1 ## signals end def main(): arr = []int() for n in fib(20): #print(n) arr.append( n ) TestError( arr[0]==0 ) TestError( arr[1]==1 ) TestError( arr[2]==1 ) TestError( arr[3]==2 ) TestError( arr[4]==3 ) TestError( arr[5]==5 ) TestError( arr[6]==8 ) TestError( arr[7]==13 ) TestError( arr[8]==21 ) TestError( arr[9]==34 ) TestError( arr[10]==55 )
tonioo/modoboa
refs/heads/master
modoboa/limits/constants.py
1
# -*- coding: utf-8 -*- """Modoboa limits constants.""" from __future__ import unicode_literals import collections from django.utils.translation import ugettext_lazy as _ DEFAULT_USER_LIMITS = collections.OrderedDict(( ("domains", { "content_type": "admin.domain", "label": _("Domains"), "help": _("Maximum number of domains this user can create"), "required_role": "Resellers"}), ("domain_aliases", { "content_type": "admin.domainalias", "label": _("Domain aliases"), "help": _("Maximum number of domain aliases this user can create"), "required_role": "Resellers"}), ("mailboxes", { "content_type": "admin.mailbox", "label": _("Mailboxes"), "help": _("Maximum number of mailboxes this user can create")}), ("mailbox_aliases", { "content_type": "admin.alias", "label": _("Mailbox aliases"), "help": _("Maximum number of mailbox aliases this user " "can create")}), ("domain_admins", { "content_type": "core.user", "label": _("Domain admins"), "help": _("Maximum number of domain administrators this user " "can create"), "required_role": "Resellers", "extra_filters": {"groups__name": "DomainAdmins"}}), ("quota", { "content_type": "admin.domain", "label": _("Quota"), "help": _("Quota shared between domains of this reseller"), "required_role": "Resellers", "type": "sum", "field": "quota" }) )) DEFAULT_DOMAIN_LIMITS = collections.OrderedDict(( ("domain_aliases", { "relation": "domainalias_set", "label": _("Domain aliases"), "help": _("Maximum number of domain aliases allowed for this domain.") }), ("mailboxes", { "relation": "mailbox_set", "label": _("Mailboxes"), "help": _("Maximum number of mailboxes allowed for this domain.")}), ("mailbox_aliases", { "relation": "alias_set", "label": _("Mailbox aliases"), "help": _( "Maximum number of mailbox aliases allowed for this domain."), "extra_filters": {"internal": False} }), ("domain_admins", { "relation": "admins", "label": _("Domain admins"), "help": _("Maximum number of domain admins allowed for this domain."), }) ))
marcwebbie/passpie
refs/heads/master
tests/test_clipboard.py
3
import pytest from passpie import clipboard def test_clipboard_on_osx_ensure_commands(mocker): mocker.patch('passpie.clipboard.process.call') mock_ensure_commands = mocker.patch('passpie.clipboard.ensure_commands') commands = clipboard.OSX_COMMANDS clipboard._copy_osx('text') mock_ensure_commands.assert_called_once_with(commands) def test_clipboard_on__ensure_commands(mocker): mocker.patch('passpie.clipboard.process.call') mock_ensure_commands = mocker.patch('passpie.clipboard.ensure_commands') commands = clipboard.LINUX_COMMANDS clipboard._copy_linux('text') mock_ensure_commands.assert_called_once_with(commands) def test_copy_calls_copy_osx_when_on_darwin_system(mocker): mocker.patch('passpie.clipboard.process.call') mocker.patch('passpie.clipboard.platform.system', return_value='Darwin') mock_copy_osx = mocker.patch('passpie.clipboard._copy_osx') mock_copy_linux = mocker.patch('passpie.clipboard._copy_linux') mock_copy_windows = mocker.patch('passpie.clipboard._copy_windows') clipboard.copy('text') assert mock_copy_osx.called is True assert mock_copy_linux.called is False assert mock_copy_windows.called is False mock_copy_osx.assert_called_once_with('text', 0) def test_copy_calls_copy_linux_when_on_linux_system(mocker): mocker.patch('passpie.clipboard.process.call') mocker.patch('passpie.clipboard.platform.system', return_value='Linux') mock_copy_osx = mocker.patch('passpie.clipboard._copy_osx') mock_copy_linux = mocker.patch('passpie.clipboard._copy_linux') mock_copy_windows = mocker.patch('passpie.clipboard._copy_windows') clipboard.copy('text') assert mock_copy_linux.called is True assert mock_copy_osx.called is False assert mock_copy_windows.called is False mock_copy_linux.assert_called_once_with('text', 0) def test_copy_calls_copy_windows_when_on_windows_system(mocker): mocker.patch('passpie.clipboard.process.call') mocker.patch('passpie.clipboard.platform.system', return_value='Windows') mock_copy_osx = mocker.patch('passpie.clipboard._copy_osx') mock_copy_linux = mocker.patch('passpie.clipboard._copy_linux') mock_copy_windows = mocker.patch('passpie.clipboard._copy_windows') clipboard.copy('text') assert mock_copy_windows.called is True assert mock_copy_osx.called is False assert mock_copy_linux.called is False mock_copy_windows.assert_called_once_with('text', 0) def test_copy_calls_copy_cygwin_when_on_cygwin_system(mocker): mocker.patch('passpie.clipboard.platform.system', return_value='cygwin system') mock_copy_cygwin = mocker.patch('passpie.clipboard._copy_cygwin') text = 's3cr3t' clipboard.copy(text) assert mock_copy_cygwin.called mock_copy_cygwin.assert_called_once_with(text, 0) def test_logs_error_msg_when_platform_not_supported(mocker): mocker.patch('passpie.clipboard.platform.system', return_value='unknown') mock_logger = mocker.patch('passpie.clipboard.logging') clipboard.copy('text') assert mock_logger.error.called msg = "platform 'unknown' copy to clipboard not supported" mock_logger.error.assert_called_once_with(msg) def test_ensure_commands_logs_error_when_command_not_found(mocker): mocker.patch('passpie.clipboard.which', return_value=False) mock_logging = mocker.patch('passpie.clipboard.logging') clipboard.ensure_commands(clipboard.LINUX_COMMANDS) assert mock_logging.error.called def test_ensure_commands_returns_command(mocker): commands = {'xclip': ['xclip']} mocker.patch('passpie.clipboard.which', return_value=True) result = clipboard.ensure_commands(commands) assert result == commands['xclip'] def test_clear_sleep_for_delay_seconds(mocker): mocker.patch('passpie.clipboard.process') mocker.patch('passpie.clipboard.sys.stdout') mocker.patch('passpie.clipboard.print', create=True) mock_time = mocker.patch('passpie.clipboard.time') clipboard.clean('some command', delay=5) assert mock_time.sleep.call_count == 5 def test_clear_calls_command_to_clear_with_whitespace_char(mocker): mocker.patch('passpie.clipboard.sys.stdout') mocker.patch('passpie.clipboard.print', create=True) mocker.patch('passpie.clipboard.time') mock_process = mocker.patch('passpie.clipboard.process') clipboard.clean('some command', delay=5) assert mock_process.call.called mock_process.call.assert_called_once_with('some command', input='\b') def test_clear_is_called_when_clear_is_passed_to_copy_osx(mocker): mocker.patch('passpie.clipboard.ensure_commands', return_value='command') mocker.patch('passpie.clipboard.process') mock_clean = mocker.patch('passpie.clipboard.clean') clipboard._copy_osx('text', clear=5) assert mock_clean.called mock_clean.assert_called_once_with('command', delay=5) def test_clear_is_called_when_clear_is_passed_to_copy_linux(mocker): mocker.patch('passpie.clipboard.ensure_commands', return_value='command') mocker.patch('passpie.clipboard.process') mock_clean = mocker.patch('passpie.clipboard.clean') clipboard._copy_linux('text', clear=5) assert mock_clean.called mock_clean.assert_called_once_with('command', delay=5)
JianyuWang/neutron
refs/heads/master
neutron/extensions/l3.py
25
# Copyright 2012 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import resource_helper from neutron.common import exceptions as nexception from neutron.plugins.common import constants # L3 Exceptions class RouterNotFound(nexception.NotFound): message = _("Router %(router_id)s could not be found") class RouterInUse(nexception.InUse): message = _("Router %(router_id)s %(reason)s") def __init__(self, **kwargs): if 'reason' not in kwargs: kwargs['reason'] = "still has ports" super(RouterInUse, self).__init__(**kwargs) class RouterInterfaceNotFound(nexception.NotFound): message = _("Router %(router_id)s does not have " "an interface with id %(port_id)s") class RouterInterfaceNotFoundForSubnet(nexception.NotFound): message = _("Router %(router_id)s has no interface " "on subnet %(subnet_id)s") class RouterInterfaceInUseByFloatingIP(nexception.InUse): message = _("Router interface for subnet %(subnet_id)s on router " "%(router_id)s cannot be deleted, as it is required " "by one or more floating IPs.") class FloatingIPNotFound(nexception.NotFound): message = _("Floating IP %(floatingip_id)s could not be found") class ExternalGatewayForFloatingIPNotFound(nexception.NotFound): message = _("External network %(external_network_id)s is not reachable " "from subnet %(subnet_id)s. Therefore, cannot associate " "Port %(port_id)s with a Floating IP.") class FloatingIPPortAlreadyAssociated(nexception.InUse): message = _("Cannot associate floating IP %(floating_ip_address)s " "(%(fip_id)s) with port %(port_id)s " "using fixed IP %(fixed_ip)s, as that fixed IP already " "has a floating IP on external network %(net_id)s.") class RouterExternalGatewayInUseByFloatingIp(nexception.InUse): message = _("Gateway cannot be updated for router %(router_id)s, since a " "gateway to external network %(net_id)s is required by one or " "more floating IPs.") ROUTERS = 'routers' EXTERNAL_GW_INFO = 'external_gateway_info' RESOURCE_ATTRIBUTE_MAP = { ROUTERS: { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'name': {'allow_post': True, 'allow_put': True, 'validate': {'type:string': attr.NAME_MAX_LEN}, 'is_visible': True, 'default': ''}, 'admin_state_up': {'allow_post': True, 'allow_put': True, 'default': True, 'convert_to': attr.convert_to_boolean, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, EXTERNAL_GW_INFO: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'enforce_policy': True, 'validate': { 'type:dict_or_nodata': { 'network_id': {'type:uuid': None, 'required': True}, 'external_fixed_ips': { 'convert_list_to': attr.convert_kvp_list_to_dict, 'type:fixed_ips': None, 'default': None, 'required': False, } } }} }, 'floatingips': { 'id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True}, 'floating_ip_address': {'allow_post': True, 'allow_put': False, 'validate': {'type:ip_address_or_none': None}, 'is_visible': True, 'default': None, 'enforce_policy': True}, 'subnet_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid_or_none': None}, 'is_visible': False, # Use False for input only attr 'default': None}, 'floating_network_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, 'router_id': {'allow_post': False, 'allow_put': False, 'validate': {'type:uuid_or_none': None}, 'is_visible': True, 'default': None}, 'port_id': {'allow_post': True, 'allow_put': True, 'validate': {'type:uuid_or_none': None}, 'is_visible': True, 'default': None, 'required_by_policy': True}, 'fixed_ip_address': {'allow_post': True, 'allow_put': True, 'validate': {'type:ip_address_or_none': None}, 'is_visible': True, 'default': None}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True}, 'status': {'allow_post': False, 'allow_put': False, 'is_visible': True}, }, } l3_quota_opts = [ cfg.IntOpt('quota_router', default=10, help=_('Number of routers allowed per tenant. ' 'A negative value means unlimited.')), cfg.IntOpt('quota_floatingip', default=50, help=_('Number of floating IPs allowed per tenant. ' 'A negative value means unlimited.')), ] cfg.CONF.register_opts(l3_quota_opts, 'QUOTAS') class L3(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Neutron L3 Router" @classmethod def get_alias(cls): return "router" @classmethod def get_description(cls): return ("Router abstraction for basic L3 forwarding" " between L2 Neutron networks and access to external" " networks via a NAT gateway.") @classmethod def get_updated(cls): return "2012-07-20T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) plural_mappings['external_fixed_ips'] = 'external_fixed_ip' attr.PLURALS.update(plural_mappings) action_map = {'router': {'add_router_interface': 'PUT', 'remove_router_interface': 'PUT'}} return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, constants.L3_ROUTER_NAT, action_map=action_map, register_quota=True) def update_attributes_map(self, attributes): super(L3, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} class RouterPluginBase(object): @abc.abstractmethod def create_router(self, context, router): pass @abc.abstractmethod def update_router(self, context, id, router): pass @abc.abstractmethod def get_router(self, context, id, fields=None): pass @abc.abstractmethod def delete_router(self, context, id): pass @abc.abstractmethod def get_routers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def add_router_interface(self, context, router_id, interface_info): pass @abc.abstractmethod def remove_router_interface(self, context, router_id, interface_info): pass @abc.abstractmethod def create_floatingip(self, context, floatingip): pass @abc.abstractmethod def update_floatingip(self, context, id, floatingip): pass @abc.abstractmethod def get_floatingip(self, context, id, fields=None): pass @abc.abstractmethod def delete_floatingip(self, context, id): pass @abc.abstractmethod def get_floatingips(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass def get_routers_count(self, context, filters=None): raise NotImplementedError() def get_floatingips_count(self, context, filters=None): raise NotImplementedError()
Rudde/pyroscope
refs/heads/master
pyroscope/pyroscope/controllers/error.py
2
import cgi from paste.urlparser import PkgResourcesParser from pylons import request from pylons.controllers.util import forward from pylons.middleware import error_document_template from webhelpers.html.builder import literal from pyroscope.lib.base import BaseController class ErrorController(BaseController): """ Generates error documents as and when they are required. The ErrorDocuments middleware forwards to ErrorController when error related status codes are returned from the application. This behaviour can be altered by changing the parameters to the ErrorDocuments middleware in your config/middleware.py file. """ def document(self): """Render the error document""" resp = request.environ.get('pylons.original_response') content = literal(resp.body) or cgi.escape(request.GET.get('message', '')) page = error_document_template % \ dict(prefix=request.environ.get('SCRIPT_NAME', ''), code=cgi.escape(request.GET.get('code', str(resp.status_int))), message=content) return page def img(self, id): """Serve Pylons' stock images""" return self._serve_file('/'.join(['media/img', id])) def style(self, id): """Serve Pylons' stock stylesheets""" return self._serve_file('/'.join(['media/style', id])) def _serve_file(self, path): """ Call Paste's FileApp (a WSGI application) to serve the file at the specified path """ request.environ['PATH_INFO'] = '/%s' % path return forward(PkgResourcesParser('pylons', 'pylons'))
rjeschmi/easybuild-easyblocks
refs/heads/master
easybuild/easyblocks/generic/binary.py
3
## # Copyright 2009-2013 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en), # the Hercules foundation (http://www.herculesstichting.be/in_English) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ General EasyBuild support for software with a binary installer @author: Stijn De Weirdt (Ghent University) @author: Dries Verdegem (Ghent University) @author: Kenneth Hoste (Ghent University) @author: Pieter De Baets (Ghent University) @author: Jens Timmerman (Ghent University) """ import shutil import os import stat from easybuild.framework.easyblock import EasyBlock from easybuild.framework.easyconfig import CUSTOM from easybuild.tools.filetools import run_cmd, rmtree2 class Binary(EasyBlock): """ Support for installing software that comes in binary form. Just copy the sources to the install dir, or use the specified install command. """ @staticmethod def extra_options(extra_vars=None): """Extra easyconfig parameters specific to Binary easyblock.""" extra_vars = dict(EasyBlock.extra_options(extra_vars)) extra_vars.update({ 'install_cmd': [None, "Install command to be used.", CUSTOM], }) return EasyBlock.extra_options(extra_vars) def extract_step(self): """Move all source files to the build directory""" self.src[0]['finalpath'] = self.builddir # copy source to build dir. for source in self.src: src = source['path'] dst = os.path.join(self.builddir, source['name']) try: shutil.copy2(src, self.builddir) os.chmod(dst, stat.S_IRWXU) except (OSError, IOError), err: self.log.exception("Couldn't copy %s to %s: %s" % (src, self.builddir, err)) def configure_step(self): """No configuration, this is binary software""" pass def build_step(self): """No compilation, this is binary software""" pass def install_step(self): """Copy all files in build directory to the install directory""" if self.cfg['install_cmd'] is None: try: # shutil.copytree doesn't allow the target directory to exist already rmtree2(self.installdir) shutil.copytree(self.cfg['start_dir'], self.installdir) except OSError, err: self.log.error("Failed to copy %s to %s: %s" % (self.cfg['start_dir'], self.installdir)) else: self.log.info("Installing %s using command '%s'..." % (self.name, self.cfg['install_cmd'])) run_cmd(self.cfg['install_cmd'], log_all=True, simple=True) def make_module_extra(self): """Add the install directory to the PATH.""" txt = super(Binary, self).make_module_extra() txt += self.moduleGenerator.prepend_paths("PATH", ['']) self.log.debug("make_module_extra added this: %s" % txt) return txt
braams/shtoom
refs/heads/master
shtoom/ui/mfcui/__init__.py
1
# This file is necessary to make this directory a package from main import *
sajuptpm/neutron-ipam
refs/heads/stable/icehouse
neutron/plugins/ml2/drivers/cisco/nexus/exceptions.py
36
# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Exceptions used by Cisco Nexus ML2 mechanism driver.""" from neutron.common import exceptions class CredentialNotFound(exceptions.NeutronException): """Credential with this ID cannot be found.""" message = _("Credential %(credential_id)s could not be found.") class CredentialNameNotFound(exceptions.NeutronException): """Credential Name could not be found.""" message = _("Credential %(credential_name)s could not be found.") class CredentialAlreadyExists(exceptions.NeutronException): """Credential name already exists.""" message = _("Credential %(credential_name)s already exists " "for tenant %(tenant_id)s.") class NexusComputeHostNotConfigured(exceptions.NeutronException): """Connection to compute host is not configured.""" message = _("Connection to %(host)s is not configured.") class NexusConnectFailed(exceptions.NeutronException): """Failed to connect to Nexus switch.""" message = _("Unable to connect to Nexus %(nexus_host)s. Reason: %(exc)s.") class NexusConfigFailed(exceptions.NeutronException): """Failed to configure Nexus switch.""" message = _("Failed to configure Nexus: %(config)s. Reason: %(exc)s.") class NexusPortBindingNotFound(exceptions.NeutronException): """NexusPort Binding is not present.""" message = _("Nexus Port Binding (%(filters)s) is not present") def __init__(self, **kwargs): filters = ','.join('%s=%s' % i for i in kwargs.items()) super(NexusPortBindingNotFound, self).__init__(filters=filters) class NexusMissingRequiredFields(exceptions.NeutronException): """Missing required fields to configure nexus switch.""" message = _("Missing required field(s) to configure nexus switch: " "%(fields)s") class NoNexusSviSwitch(exceptions.NeutronException): """No usable nexus switch found.""" message = _("No usable Nexus switch found to create SVI interface.") class SubnetNotSpecified(exceptions.NeutronException): """Subnet id not specified.""" message = _("No subnet_id specified for router gateway.") class SubnetInterfacePresent(exceptions.NeutronException): """Subnet SVI interface already exists.""" message = _("Subnet %(subnet_id)s has an interface on %(router_id)s.") class PortIdForNexusSvi(exceptions.NeutronException): """Port Id specified for Nexus SVI.""" message = _('Nexus hardware router gateway only uses Subnet Ids.')
practicalswift/bitcoin
refs/heads/master
contrib/verifybinaries/verify.py
5
#!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Script for verifying Bitoin Core release binaries This script attempts to download the signature file SHA256SUMS.asc from bitcoincore.org and bitcoin.org and compares them. It first checks if the signature passes, and then downloads the files specified in the file, and checks if the hashes of these files match those that are specified in the signature file. The script returns 0 if everything passes the checks. It returns 1 if either the signature check or the hash check doesn't pass. If an error occurs the return value is >= 2. """ from hashlib import sha256 import os import subprocess import sys from textwrap import indent WORKINGDIR = "/tmp/bitcoin_verify_binaries" HASHFILE = "hashes.tmp" HOST1 = "https://bitcoincore.org" HOST2 = "https://bitcoin.org" VERSIONPREFIX = "bitcoin-core-" SIGNATUREFILENAME = "SHA256SUMS.asc" def parse_version_string(version_str): if version_str.startswith(VERSIONPREFIX): # remove version prefix version_str = version_str[len(VERSIONPREFIX):] parts = version_str.split('-') version_base = parts[0] version_rc = "" version_os = "" if len(parts) == 2: # "<version>-rcN" or "version-platform" if "rc" in parts[1]: version_rc = parts[1] else: version_os = parts[1] elif len(parts) == 3: # "<version>-rcN-platform" version_rc = parts[1] version_os = parts[2] return version_base, version_rc, version_os def download_with_wget(remote_file, local_file=None): if local_file: wget_args = ['wget', '-O', local_file, remote_file] else: # use timestamping mechanism if local filename is not explicitely set wget_args = ['wget', '-N', remote_file] result = subprocess.run(wget_args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) return result.returncode == 0, result.stdout.decode().rstrip() def files_are_equal(filename1, filename2): with open(filename1, 'rb') as file1: contents1 = file1.read() with open(filename2, 'rb') as file2: contents2 = file2.read() return contents1 == contents2 def verify_with_gpg(signature_filename, output_filename): result = subprocess.run(['gpg', '--yes', '--decrypt', '--output', output_filename, signature_filename], stderr=subprocess.STDOUT, stdout=subprocess.PIPE) return result.returncode, result.stdout.decode().rstrip() def remove_files(filenames): for filename in filenames: os.remove(filename) def main(args): # sanity check if len(args) < 1: print("Error: need to specify a version on the command line") return 3 # determine remote dir dependend on provided version string version_base, version_rc, os_filter = parse_version_string(args[0]) remote_dir = f"/bin/{VERSIONPREFIX}{version_base}/" if version_rc: remote_dir += f"test.{version_rc}/" remote_sigfile = remote_dir + SIGNATUREFILENAME # create working directory os.makedirs(WORKINGDIR, exist_ok=True) os.chdir(WORKINGDIR) # fetch first signature file sigfile1 = SIGNATUREFILENAME success, output = download_with_wget(HOST1 + remote_sigfile, sigfile1) if not success: print("Error: couldn't fetch signature file. " "Have you specified the version number in the following format?") print(f"[{VERSIONPREFIX}]<version>[-rc[0-9]][-platform] " f"(example: {VERSIONPREFIX}0.21.0-rc3-osx)") print("wget output:") print(indent(output, '\t')) return 4 # fetch second signature file sigfile2 = SIGNATUREFILENAME + ".2" success, output = download_with_wget(HOST2 + remote_sigfile, sigfile2) if not success: print("bitcoin.org failed to provide signature file, " "but bitcoincore.org did?") print("wget output:") print(indent(output, '\t')) remove_files([sigfile1]) return 5 # ensure that both signature files are equal if not files_are_equal(sigfile1, sigfile2): print("bitcoin.org and bitcoincore.org signature files were not equal?") print(f"See files {WORKINGDIR}/{sigfile1} and {WORKINGDIR}/{sigfile2}") return 6 # check signature and extract data into file retval, output = verify_with_gpg(sigfile1, HASHFILE) if retval != 0: if retval == 1: print("Bad signature.") elif retval == 2: print("gpg error. Do you have the Bitcoin Core binary release " "signing key installed?") print("gpg output:") print(indent(output, '\t')) remove_files([sigfile1, sigfile2, HASHFILE]) return 1 # extract hashes/filenames of binaries to verify from hash file; # each line has the following format: "<hash> <binary_filename>" with open(HASHFILE, 'r', encoding='utf8') as hash_file: hashes_to_verify = [ line.split()[:2] for line in hash_file if os_filter in line] remove_files([HASHFILE]) if not hashes_to_verify: print("error: no files matched the platform specified") return 7 # download binaries for _, binary_filename in hashes_to_verify: print(f"Downloading {binary_filename}") download_with_wget(HOST1 + remote_dir + binary_filename) # verify hashes offending_files = [] for hash_expected, binary_filename in hashes_to_verify: with open(binary_filename, 'rb') as binary_file: hash_calculated = sha256(binary_file.read()).hexdigest() if hash_calculated != hash_expected: offending_files.append(binary_filename) if offending_files: print("Hashes don't match.") print("Offending files:") print('\n'.join(offending_files)) return 1 verified_binaries = [entry[1] for entry in hashes_to_verify] # clean up files if desired if len(args) >= 2: print("Clean up the binaries") remove_files([sigfile1, sigfile2] + verified_binaries) else: print(f"Keep the binaries in {WORKINGDIR}") print("Verified hashes of") print('\n'.join(verified_binaries)) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
wido/libcloud
refs/heads/trunk
libcloud/common/vultr.py
29
from libcloud.common.base import ConnectionKey, JsonResponse __all__ = [ 'API_HOST', 'VultrConnection', 'VultrException', 'VultrResponse', ] # Endpoint for the Vultr API API_HOST = 'api.vultr.com' class VultrResponse(JsonResponse): objects = None error_dict = {} errors = None ERROR_CODE_MAP = { 400: "Invalid API location. Check the URL that you are using.", 403: "Invalid or missing API key. Check that your API key is present" + " and matches your assigned key.", 405: "Invalid HTTP method. Check that the method (POST|GET) matches" + " what the documentation indicates.", 412: "Request failed. Check the response body for a more detailed" + " description.", 500: "Internal server error. Try again at a later time.", 503: "Rate limit hit. API requests are limited to an average of 1/s." + " Try your request again later.", } def __init__(self, response, connection): self.errors = [] super(VultrResponse, self).__init__(response=response, connection=connection) self.objects, self.errors = self.parse_body_and_errors() if not self.success(): raise self._make_excp(self.errors[0]) def parse_body_and_errors(self): """ Returns JSON data in a python list. """ json_objects = [] errors = [] if self.status in self.ERROR_CODE_MAP: self.error_dict['ERRORCODE'] = self.status self.error_dict['ERRORMESSAGE'] = self.ERROR_CODE_MAP[self.status] errors.append(self.error_dict) js = super(VultrResponse, self).parse_body() if isinstance(js, dict): js = [js] json_objects.append(js) return (json_objects, errors) def _make_excp(self, error): """ Convert API error to a VultrException instance """ return VultrException(error['ERRORCODE'], error['ERRORMESSAGE']) def success(self): return len(self.errors) == 0 class VultrConnection(ConnectionKey): """ A connection to the Vultr API """ host = API_HOST responseCls = VultrResponse def add_default_params(self, params): """ Returns default params such as api_key which is needed to perform an action.Returns a dictionary. Example:/v1/server/upgrade_plan?api_key=self.key """ params['api_key'] = self.key return params def add_default_headers(self, headers): """ Returns default headers such as content-type. Returns a dictionary. """ headers["Content-Type"] = "application/x-www-form-urlencoded" headers["Accept"] = "text/plain" return headers def set_path(self): self.path = '/v/' return self.path class VultrException(Exception): """ Error originating from the Vultr API """ def __init__(self, code, message): self.code = code self.message = message self.args = (code, message) def __str__(self): return "(%u) %s" % (self.code, self.message) def __repr__(self): return "VultrException code %u '%s'" % (self.code, self.message)
mrpau/kolibri
refs/heads/develop
kolibri/core/auth/migrations/0010_auto_20180320_1320.py
4
# -*- coding: utf-8 -*- # Generated by Django 1.11.10 on 2018-03-20 20:20 from __future__ import unicode_literals from django.db import migrations from django.db import models class Migration(migrations.Migration): dependencies = [("kolibriauth", "0009_auto_20180301_1123")] operations = [ migrations.AlterField( model_name="role", name="kind", field=models.CharField( choices=[ ("admin", "Admin"), ("coach", "Coach"), ("classroom assignable coach", "Classroom Assignable Coach"), ], max_length=26, ), ) ]
nacelle/nacelle
refs/heads/master
nacelle/contrib/lockdown/dispatcher.py
2
""" Custom dispatcher for nacelle that implements lockdown support """ # marty mcfly imports from __future__ import absolute_import # stdlib imports import re # third-party imports import webapp2 from nacelle.conf import settings from nacelle.core.dispatcher import nacelle_dispatcher # local imports from .utils import check_auth def compile_url_exceptions(url_exceptions): return [re.compile(p) for p in url_exceptions] _default_url_exceptions = compile_url_exceptions(settings.LOCKDOWN_URL_EXCEPTIONS) def lockdown_dispatcher(router, request, response): """Override dispatch to provide lockdown support """ username = settings.LOCKDOWN_USERNAME password = settings.LOCKDOWN_PASSWORD # Don't lock down if the URL matches an exception pattern. unlocked_url = False for pattern in _default_url_exceptions: if pattern.search(request.path): unlocked_url = True break if not unlocked_url and not check_auth(request, username, password): msg = 'Could not verify your access level for that URL. ' \ 'You have to login with proper credentials' resp = webapp2.Response(msg) resp.set_status(401) resp.headers['WWW-Authenticate'] = 'Basic realm="Login Required"' return resp # Dispatch the request using nacelle's regular dispatcher. return nacelle_dispatcher(router, request, response)
vaaceves/repue-repositorio-universidad-empresa
refs/heads/master
home/forms.py
1
#importamos api forms desde django from django import forms from django.utils.text import slugify from django.forms import ModelForm #importamos los modelos from models import Contenido class FormCrearContenido(forms.ModelForm): """Clase para crear un formulario a partir de un modelo definido""" class Meta: """clase interna donde se define: - de que modelo es el form a contruir asignado a la variable model - cuales campor tendra el formulario, variable fields""" model = Contenido fields = [ 'titulo', 'tipo', 'descripcion', 'autores', 'anoPublicacion', 'pais', 'evento', 'libro', 'tematica', 'descarga', 'video', 'portada', 'issuu', 'slug', ] def save(self): def save(self): instance = super(AddForm, self).save(commit=False) instance.slug = slugify(instance.title) instance.save() return instance
grevutiu-gabriel/phantomjs
refs/heads/master
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/suggestnominations.py
119
# Copyright (c) 2011 Google Inc. All rights reserved. # Copyright (c) 2011 Code Aurora Forum. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from optparse import make_option import re from webkitpy.common.checkout.changelog import ChangeLogEntry from webkitpy.common.config.committers import CommitterList from webkitpy.tool.grammar import join_with_separators from webkitpy.tool.multicommandtool import Command class CommitLogError(Exception): def __init__(self): Exception.__init__(self) class CommitLogMissingReviewer(CommitLogError): def __init__(self): CommitLogError.__init__(self) class AbstractCommitLogCommand(Command): _leading_indent_regexp = re.compile(r"^[ ]{4}", re.MULTILINE) _reviewed_by_regexp = re.compile(ChangeLogEntry.reviewed_by_regexp, re.MULTILINE) _patch_by_regexp = re.compile(r'^Patch by (?P<name>.+?)\s+<(?P<email>[^<>]+)> on (?P<date>\d{4}-\d{2}-\d{2})$', re.MULTILINE) _committer_regexp = re.compile(r'^Author: (?P<email>\S+)\s+<[^>]+>$', re.MULTILINE) _date_regexp = re.compile(r'^Date: (?P<date>\d{4}-\d{2}-\d{2}) (?P<time>\d{2}:\d{2}:\d{2}) [\+\-]\d{4}$', re.MULTILINE) _revision_regexp = re.compile(r'^git-svn-id: http://svn.webkit.org/repository/webkit/trunk@(?P<svnid>\d+) (?P<gitid>[0-9a-f\-]{36})$', re.MULTILINE) def __init__(self, options=None): options = options or [] options += [ make_option("--max-commit-age", action="store", dest="max_commit_age", type="int", default=9, help="Specify maximum commit age to consider (in months)."), ] options = sorted(options, cmp=lambda a, b: cmp(a._long_opts, b._long_opts)) super(AbstractCommitLogCommand, self).__init__(options=options) # FIXME: This should probably be on the tool somewhere. self._committer_list = CommitterList() def _init_options(self, options): self.verbose = options.verbose self.max_commit_age = options.max_commit_age # FIXME: This should move to scm.py def _recent_commit_messages(self): git_log = self._tool.executive.run_command(['git', 'log', '--date=iso', '--since="%s months ago"' % self.max_commit_age]) messages = re.compile(r"^commit \w{40}$", re.MULTILINE).split(git_log)[1:] # Ignore the first message which will be empty. for message in messages: # Unindent all the lines (message, _) = self._leading_indent_regexp.subn("", message) yield message.lstrip() # Remove any leading newlines from the log message. def _author_name_from_email(self, email): contributor = self._committer_list.contributor_by_email(email) return contributor.full_name if contributor else None def _contributor_from_email(self, email): contributor = self._committer_list.contributor_by_email(email) return contributor if contributor else None def _parse_commit_message(self, commit_message): committer_match = self._committer_regexp.search(commit_message) if not committer_match: raise CommitLogError committer_email = committer_match.group('email') if not committer_email: raise CommitLogError committer = self._contributor_from_email(committer_email) if not committer: raise CommitLogError commit_date_match = self._date_regexp.search(commit_message) if not commit_date_match: raise CommitLogError commit_date = commit_date_match.group('date') revision_match = self._revision_regexp.search(commit_message) if not revision_match: raise CommitLogError revision = revision_match.group('svnid') # Look for "Patch by" line first, which is used for non-committer contributors; # otherwise, use committer info determined above. author_match = self._patch_by_regexp.search(commit_message) if not author_match: author_match = committer_match author_email = author_match.group('email') if not author_email: author_email = committer_email author_name = author_match.group('name') if 'name' in author_match.groupdict() else None if not author_name: author_name = self._author_name_from_email(author_email) if not author_name: raise CommitLogError contributor = self._contributor_from_email(author_email) if contributor and author_name != contributor.full_name and contributor.full_name: author_name = contributor.full_name reviewer_match = self._reviewed_by_regexp.search(commit_message) if not reviewer_match: raise CommitLogMissingReviewer reviewers = reviewer_match.group('reviewer') return { 'committer': committer, 'commit_date': commit_date, 'revision': revision, 'author_email': author_email, 'author_name': author_name, 'contributor': contributor, 'reviewers': reviewers, } class SuggestNominations(AbstractCommitLogCommand): name = "suggest-nominations" help_text = "Suggest contributors for committer/reviewer nominations" def __init__(self): options = [ make_option("--committer-minimum", action="store", dest="committer_minimum", type="int", default=10, help="Specify minimum patch count for Committer nominations."), make_option("--reviewer-minimum", action="store", dest="reviewer_minimum", type="int", default=80, help="Specify minimum patch count for Reviewer nominations."), make_option("--show-commits", action="store_true", dest="show_commits", default=False, help="Show commit history with nomination suggestions."), ] super(SuggestNominations, self).__init__(options=options) def _init_options(self, options): super(SuggestNominations, self)._init_options(options) self.committer_minimum = options.committer_minimum self.reviewer_minimum = options.reviewer_minimum self.show_commits = options.show_commits def _count_commit(self, commit, analysis): author_name = commit['author_name'] author_email = commit['author_email'] revision = commit['revision'] commit_date = commit['commit_date'] # See if we already have a contributor with this author_name or email counter_by_name = analysis['counters_by_name'].get(author_name) counter_by_email = analysis['counters_by_email'].get(author_email) if counter_by_name: if counter_by_email: if counter_by_name != counter_by_email: # Merge these two counters This is for the case where we had # John Smith (jsmith@gmail.com) and Jonathan Smith (jsmith@apple.com) # and just found a John Smith (jsmith@apple.com). Now we know the # two names are the same person counter_by_name['names'] |= counter_by_email['names'] counter_by_name['emails'] |= counter_by_email['emails'] counter_by_name['count'] += counter_by_email.get('count', 0) analysis['counters_by_email'][author_email] = counter_by_name else: # Add email to the existing counter analysis['counters_by_email'][author_email] = counter_by_name counter_by_name['emails'] |= set([author_email]) else: if counter_by_email: # Add name to the existing counter analysis['counters_by_name'][author_name] = counter_by_email counter_by_email['names'] |= set([author_name]) else: # Create new counter new_counter = {'names': set([author_name]), 'emails': set([author_email]), 'latest_name': author_name, 'latest_email': author_email, 'commits': ""} analysis['counters_by_name'][author_name] = new_counter analysis['counters_by_email'][author_email] = new_counter assert(analysis['counters_by_name'][author_name] == analysis['counters_by_email'][author_email]) counter = analysis['counters_by_name'][author_name] counter['count'] = counter.get('count', 0) + 1 if revision.isdigit(): revision = "http://trac.webkit.org/changeset/" + revision counter['commits'] += " commit: %s on %s by %s (%s)\n" % (revision, commit_date, author_name, author_email) def _count_recent_patches(self): analysis = { 'counters_by_name': {}, 'counters_by_email': {}, } for commit_message in self._recent_commit_messages(): try: self._count_commit(self._parse_commit_message(commit_message), analysis) except CommitLogError, exception: continue return analysis['counters_by_email'] def _collect_nominations(self, counters_by_email): nominations = [] for author_email, counter in counters_by_email.items(): if author_email != counter['latest_email']: continue roles = [] contributor = self._committer_list.contributor_by_email(author_email) author_name = counter['latest_name'] patch_count = counter['count'] if patch_count >= self.committer_minimum and (not contributor or not contributor.can_commit): roles.append("committer") if patch_count >= self.reviewer_minimum and contributor and contributor.can_commit and not contributor.can_review: roles.append("reviewer") if roles: nominations.append({ 'roles': roles, 'author_name': author_name, 'author_email': author_email, 'patch_count': patch_count, }) return nominations def _print_nominations(self, nominations, counters_by_email): def nomination_cmp(a_nomination, b_nomination): roles_result = cmp(a_nomination['roles'], b_nomination['roles']) if roles_result: return -roles_result count_result = cmp(a_nomination['patch_count'], b_nomination['patch_count']) if count_result: return -count_result return cmp(a_nomination['author_name'], b_nomination['author_name']) for nomination in sorted(nominations, nomination_cmp): # This is a little bit of a hack, but its convienent to just pass the nomination dictionary to the formating operator. nomination['roles_string'] = join_with_separators(nomination['roles']).upper() print "%(roles_string)s: %(author_name)s (%(author_email)s) has %(patch_count)s reviewed patches" % nomination counter = counters_by_email[nomination['author_email']] if self.show_commits: print counter['commits'] def _print_counts(self, counters_by_email): def counter_cmp(a_tuple, b_tuple): # split the tuples # the second element is the "counter" structure _, a_counter = a_tuple _, b_counter = b_tuple count_result = cmp(a_counter['count'], b_counter['count']) if count_result: return -count_result return cmp(a_counter['latest_name'].lower(), b_counter['latest_name'].lower()) for author_email, counter in sorted(counters_by_email.items(), counter_cmp): if author_email != counter['latest_email']: continue contributor = self._committer_list.contributor_by_email(author_email) author_name = counter['latest_name'] patch_count = counter['count'] counter['names'] = counter['names'] - set([author_name]) counter['emails'] = counter['emails'] - set([author_email]) alias_list = [] for alias in counter['names']: alias_list.append(alias) for alias in counter['emails']: alias_list.append(alias) if alias_list: print "CONTRIBUTOR: %s (%s) has %d reviewed patches %s" % (author_name, author_email, patch_count, "(aliases: " + ", ".join(alias_list) + ")") else: print "CONTRIBUTOR: %s (%s) has %d reviewed patches" % (author_name, author_email, patch_count) return def execute(self, options, args, tool): self._init_options(options) patch_counts = self._count_recent_patches() nominations = self._collect_nominations(patch_counts) self._print_nominations(nominations, patch_counts) if self.verbose: self._print_counts(patch_counts) if __name__ == "__main__": SuggestNominations()
chrisschuette/Nyx
refs/heads/master
third_party/gtest/test/gtest_shuffle_test.py
3023
#!/usr/bin/env python # # Copyright 2009 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that test shuffling works.""" __author__ = 'wan@google.com (Zhanyong Wan)' import os import gtest_test_utils # Command to run the gtest_shuffle_test_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_') # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' TEST_FILTER = 'A*.A:A*.B:C*' ALL_TESTS = [] ACTIVE_TESTS = [] FILTERED_TESTS = [] SHARDED_TESTS = [] SHUFFLED_ALL_TESTS = [] SHUFFLED_ACTIVE_TESTS = [] SHUFFLED_FILTERED_TESTS = [] SHUFFLED_SHARDED_TESTS = [] def AlsoRunDisabledTestsFlag(): return '--gtest_also_run_disabled_tests' def FilterFlag(test_filter): return '--gtest_filter=%s' % (test_filter,) def RepeatFlag(n): return '--gtest_repeat=%s' % (n,) def ShuffleFlag(): return '--gtest_shuffle' def RandomSeedFlag(n): return '--gtest_random_seed=%s' % (n,) def RunAndReturnOutput(extra_env, args): """Runs the test program and returns its output.""" environ_copy = os.environ.copy() environ_copy.update(extra_env) return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output def GetTestsForAllIterations(extra_env, args): """Runs the test program and returns a list of test lists. Args: extra_env: a map from environment variables to their values args: command line flags to pass to gtest_shuffle_test_ Returns: A list where the i-th element is the list of tests run in the i-th test iteration. """ test_iterations = [] for line in RunAndReturnOutput(extra_env, args).split('\n'): if line.startswith('----'): tests = [] test_iterations.append(tests) elif line.strip(): tests.append(line.strip()) # 'TestCaseName.TestName' return test_iterations def GetTestCases(tests): """Returns a list of test cases in the given full test names. Args: tests: a list of full test names Returns: A list of test cases from 'tests', in their original order. Consecutive duplicates are removed. """ test_cases = [] for test in tests: test_case = test.split('.')[0] if not test_case in test_cases: test_cases.append(test_case) return test_cases def CalculateTestLists(): """Calculates the list of tests run under different flags.""" if not ALL_TESTS: ALL_TESTS.extend( GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0]) if not ACTIVE_TESTS: ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0]) if not FILTERED_TESTS: FILTERED_TESTS.extend( GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0]) if not SHARDED_TESTS: SHARDED_TESTS.extend( GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [])[0]) if not SHUFFLED_ALL_TESTS: SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations( {}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0]) if not SHUFFLED_ACTIVE_TESTS: SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1)])[0]) if not SHUFFLED_FILTERED_TESTS: SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0]) if not SHUFFLED_SHARDED_TESTS: SHUFFLED_SHARDED_TESTS.extend( GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [ShuffleFlag(), RandomSeedFlag(1)])[0]) class GTestShuffleUnitTest(gtest_test_utils.TestCase): """Tests test shuffling.""" def setUp(self): CalculateTestLists() def testShufflePreservesNumberOfTests(self): self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS)) self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS)) self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS)) self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS)) def testShuffleChangesTestOrder(self): self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS) self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS) self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS, SHUFFLED_FILTERED_TESTS) self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS, SHUFFLED_SHARDED_TESTS) def testShuffleChangesTestCaseOrder(self): self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS), GetTestCases(SHUFFLED_ALL_TESTS)) self.assert_( GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS), GetTestCases(SHUFFLED_ACTIVE_TESTS)) self.assert_( GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS), GetTestCases(SHUFFLED_FILTERED_TESTS)) self.assert_( GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS), GetTestCases(SHUFFLED_SHARDED_TESTS)) def testShuffleDoesNotRepeatTest(self): for test in SHUFFLED_ALL_TESTS: self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_ACTIVE_TESTS: self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_FILTERED_TESTS: self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test), '%s appears more than once' % (test,)) for test in SHUFFLED_SHARDED_TESTS: self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test), '%s appears more than once' % (test,)) def testShuffleDoesNotCreateNewTest(self): for test in SHUFFLED_ALL_TESTS: self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_ACTIVE_TESTS: self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_FILTERED_TESTS: self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,)) for test in SHUFFLED_SHARDED_TESTS: self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,)) def testShuffleIncludesAllTests(self): for test in ALL_TESTS: self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,)) for test in ACTIVE_TESTS: self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,)) for test in FILTERED_TESTS: self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,)) for test in SHARDED_TESTS: self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,)) def testShuffleLeavesDeathTestsAtFront(self): non_death_test_found = False for test in SHUFFLED_ACTIVE_TESTS: if 'DeathTest.' in test: self.assert_(not non_death_test_found, '%s appears after a non-death test' % (test,)) else: non_death_test_found = True def _VerifyTestCasesDoNotInterleave(self, tests): test_cases = [] for test in tests: [test_case, _] = test.split('.') if test_cases and test_cases[-1] != test_case: test_cases.append(test_case) self.assertEqual(1, test_cases.count(test_case), 'Test case %s is not grouped together in %s' % (test_case, tests)) def testShuffleDoesNotInterleaveTestCases(self): self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS) self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS) def testShuffleRestoresOrderAfterEachIteration(self): # Get the test lists in all 3 iterations, using random seed 1, 2, # and 3 respectively. Google Test picks a different seed in each # iteration, and this test depends on the current implementation # picking successive numbers. This dependency is not ideal, but # makes the test much easier to write. [tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = ( GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)])) # Make sure running the tests with random seed 1 gets the same # order as in iteration 1 above. [tests_with_seed1] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1)]) self.assertEqual(tests_in_iteration1, tests_with_seed1) # Make sure running the tests with random seed 2 gets the same # order as in iteration 2 above. Success means that Google Test # correctly restores the test order before re-shuffling at the # beginning of iteration 2. [tests_with_seed2] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(2)]) self.assertEqual(tests_in_iteration2, tests_with_seed2) # Make sure running the tests with random seed 3 gets the same # order as in iteration 3 above. Success means that Google Test # correctly restores the test order before re-shuffling at the # beginning of iteration 3. [tests_with_seed3] = GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(3)]) self.assertEqual(tests_in_iteration3, tests_with_seed3) def testShuffleGeneratesNewOrderInEachIteration(self): [tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = ( GetTestsForAllIterations( {}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)])) self.assert_(tests_in_iteration1 != tests_in_iteration2, tests_in_iteration1) self.assert_(tests_in_iteration1 != tests_in_iteration3, tests_in_iteration1) self.assert_(tests_in_iteration2 != tests_in_iteration3, tests_in_iteration2) def testShuffleShardedTestsPreservesPartition(self): # If we run M tests on N shards, the same M tests should be run in # total, regardless of the random seeds used by the shards. [tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '0'}, [ShuffleFlag(), RandomSeedFlag(1)]) [tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '1'}, [ShuffleFlag(), RandomSeedFlag(20)]) [tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', SHARD_INDEX_ENV_VAR: '2'}, [ShuffleFlag(), RandomSeedFlag(25)]) sorted_sharded_tests = tests1 + tests2 + tests3 sorted_sharded_tests.sort() sorted_active_tests = [] sorted_active_tests.extend(ACTIVE_TESTS) sorted_active_tests.sort() self.assertEqual(sorted_active_tests, sorted_sharded_tests) if __name__ == '__main__': gtest_test_utils.Main()
Captnoord/openpli-enigma2
refs/heads/master
lib/python/Tools/Transponder.py
12
from enigma import eDVBFrontendParametersSatellite, eDVBFrontendParametersCable, eDVBFrontendParametersTerrestrial from Components.NimManager import nimmanager def ConvertToHumanReadable(tp, type = None): ret = { } if type is None: type = tp.get("tuner_type", "None") if type == "DVB-S": ret["tuner_type"] = _("Satellite") ret["inversion"] = { eDVBFrontendParametersSatellite.Inversion_Unknown : _("Auto"), eDVBFrontendParametersSatellite.Inversion_On : _("On"), eDVBFrontendParametersSatellite.Inversion_Off : _("Off")}.get(tp.get("inversion")) ret["fec_inner"] = { eDVBFrontendParametersSatellite.FEC_None : _("None"), eDVBFrontendParametersSatellite.FEC_Auto : _("Auto"), eDVBFrontendParametersSatellite.FEC_1_2 : "1/2", eDVBFrontendParametersSatellite.FEC_2_3 : "2/3", eDVBFrontendParametersSatellite.FEC_3_4 : "3/4", eDVBFrontendParametersSatellite.FEC_5_6 : "5/6", eDVBFrontendParametersSatellite.FEC_6_7 : "6/7", eDVBFrontendParametersSatellite.FEC_7_8 : "7/8", eDVBFrontendParametersSatellite.FEC_3_5 : "3/5", eDVBFrontendParametersSatellite.FEC_4_5 : "4/5", eDVBFrontendParametersSatellite.FEC_8_9 : "8/9", eDVBFrontendParametersSatellite.FEC_9_10 : "9/10"}.get(tp.get("fec_inner")) ret["modulation"] = { eDVBFrontendParametersSatellite.Modulation_Auto : _("Auto"), eDVBFrontendParametersSatellite.Modulation_QPSK : "QPSK", eDVBFrontendParametersSatellite.Modulation_QAM16 : "QAM16", eDVBFrontendParametersSatellite.Modulation_8PSK : "8PSK"}.get(tp.get("modulation")) ret["orbital_position"] = nimmanager.getSatName(int(tp.get("orbital_position"))) ret["polarization"] = { eDVBFrontendParametersSatellite.Polarisation_Horizontal : _("Horizontal"), eDVBFrontendParametersSatellite.Polarisation_Vertical : _("Vertical"), eDVBFrontendParametersSatellite.Polarisation_CircularLeft : _("Circular left"), eDVBFrontendParametersSatellite.Polarisation_CircularRight : _("Circular right")}.get(tp.get("polarization")) ret["polarization_abbreviation"] = { eDVBFrontendParametersSatellite.Polarisation_Horizontal : "H", eDVBFrontendParametersSatellite.Polarisation_Vertical : "V", eDVBFrontendParametersSatellite.Polarisation_CircularLeft : "L", eDVBFrontendParametersSatellite.Polarisation_CircularRight : "R"}.get(tp.get("polarization")) ret["system"] = { eDVBFrontendParametersSatellite.System_DVB_S : "DVB-S", eDVBFrontendParametersSatellite.System_DVB_S2 : "DVB-S2"}.get(tp.get("system")) if ret["system"] == "DVB-S2": ret["rolloff"] = { eDVBFrontendParametersSatellite.RollOff_alpha_0_35 : "0.35", eDVBFrontendParametersSatellite.RollOff_alpha_0_25 : "0.25", eDVBFrontendParametersSatellite.RollOff_alpha_0_20 : "0.20", eDVBFrontendParametersSatellite.RollOff_auto : _("Auto")}.get(tp.get("rolloff")) ret["pilot"] = { eDVBFrontendParametersSatellite.Pilot_Unknown : _("Auto"), eDVBFrontendParametersSatellite.Pilot_On : _("On"), eDVBFrontendParametersSatellite.Pilot_Off : _("Off")}.get(tp.get("pilot")) elif type == "DVB-C": ret["tuner_type"] = _("Cable") ret["modulation"] = { eDVBFrontendParametersCable.Modulation_Auto: _("Auto"), eDVBFrontendParametersCable.Modulation_QAM16 : "QAM16", eDVBFrontendParametersCable.Modulation_QAM32 : "QAM32", eDVBFrontendParametersCable.Modulation_QAM64 : "QAM64", eDVBFrontendParametersCable.Modulation_QAM128 : "QAM128", eDVBFrontendParametersCable.Modulation_QAM256 : "QAM256"}.get(tp.get("modulation")) ret["inversion"] = { eDVBFrontendParametersCable.Inversion_Unknown : _("Auto"), eDVBFrontendParametersCable.Inversion_On : _("On"), eDVBFrontendParametersCable.Inversion_Off : _("Off")}.get(tp.get("inversion")) ret["fec_inner"] = { eDVBFrontendParametersCable.FEC_None : _("None"), eDVBFrontendParametersCable.FEC_Auto : _("Auto"), eDVBFrontendParametersCable.FEC_1_2 : "1/2", eDVBFrontendParametersCable.FEC_2_3 : "2/3", eDVBFrontendParametersCable.FEC_3_4 : "3/4", eDVBFrontendParametersCable.FEC_5_6 : "5/6", eDVBFrontendParametersCable.FEC_7_8 : "7/8", eDVBFrontendParametersCable.FEC_8_9 : "8/9", eDVBFrontendParametersCable.FEC_3_5 : "3/5", eDVBFrontendParametersCable.FEC_4_5 : "4/5", eDVBFrontendParametersCable.FEC_9_10 : "9/10"}.get(tp.get("fec_inner")) ret["system"] = { eDVBFrontendParametersCable.System_DVB_C_ANNEX_A : "DVB-C", eDVBFrontendParametersCable.System_DVB_C_ANNEX_C : "DVB-C ANNEX C"}.get(tp.get("system")) elif type == "DVB-T": ret["tuner_type"] = _("Terrestrial") ret["bandwidth"] = { 0 : _("Auto"), 10000000 : "10 MHz", 8000000 : "8 MHz", 7000000 : "7 MHz", 6000000 : "6 MHz", 5000000 : "5 MHz", 1712000 : "1.712 MHz"}.get(tp.get("bandwidth")) ret["code_rate_lp"] = { eDVBFrontendParametersTerrestrial.FEC_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.FEC_1_2 : "1/2", eDVBFrontendParametersTerrestrial.FEC_2_3 : "2/3", eDVBFrontendParametersTerrestrial.FEC_3_4 : "3/4", eDVBFrontendParametersTerrestrial.FEC_5_6 : "5/6", eDVBFrontendParametersTerrestrial.FEC_6_7 : "6/7", eDVBFrontendParametersTerrestrial.FEC_7_8 : "7/8", eDVBFrontendParametersTerrestrial.FEC_8_9 : "8/9"}.get(tp.get("code_rate_lp")) ret["code_rate_hp"] = { eDVBFrontendParametersTerrestrial.FEC_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.FEC_1_2 : "1/2", eDVBFrontendParametersTerrestrial.FEC_2_3 : "2/3", eDVBFrontendParametersTerrestrial.FEC_3_4 : "3/4", eDVBFrontendParametersTerrestrial.FEC_5_6 : "5/6", eDVBFrontendParametersTerrestrial.FEC_6_7 : "6/7", eDVBFrontendParametersTerrestrial.FEC_7_8 : "7/8", eDVBFrontendParametersTerrestrial.FEC_8_9 : "8/9"}.get(tp.get("code_rate_hp")) ret["constellation"] = { eDVBFrontendParametersTerrestrial.Modulation_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.Modulation_QPSK : "QPSK", eDVBFrontendParametersTerrestrial.Modulation_QAM16 : "QAM16", eDVBFrontendParametersTerrestrial.Modulation_QAM64 : "QAM64", eDVBFrontendParametersTerrestrial.Modulation_QAM256 : "QAM256"}.get(tp.get("constellation")) ret["transmission_mode"] = { eDVBFrontendParametersTerrestrial.TransmissionMode_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.TransmissionMode_1k : "1k", eDVBFrontendParametersTerrestrial.TransmissionMode_2k : "2k", eDVBFrontendParametersTerrestrial.TransmissionMode_4k : "4k", eDVBFrontendParametersTerrestrial.TransmissionMode_8k : "8k", eDVBFrontendParametersTerrestrial.TransmissionMode_16k : "16k", eDVBFrontendParametersTerrestrial.TransmissionMode_32k : "32k"}.get(tp.get("transmission_mode")) ret["guard_interval"] = { eDVBFrontendParametersTerrestrial.GuardInterval_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.GuardInterval_19_256 : "19/256", eDVBFrontendParametersTerrestrial.GuardInterval_19_128 : "19/128", eDVBFrontendParametersTerrestrial.GuardInterval_1_128 : "1/128", eDVBFrontendParametersTerrestrial.GuardInterval_1_32 : "1/32", eDVBFrontendParametersTerrestrial.GuardInterval_1_16 : "1/16", eDVBFrontendParametersTerrestrial.GuardInterval_1_8 : "1/8", eDVBFrontendParametersTerrestrial.GuardInterval_1_4 : "1/4"}.get(tp.get("guard_interval")) ret["hierarchy_information"] = { eDVBFrontendParametersTerrestrial.Hierarchy_Auto : _("Auto"), eDVBFrontendParametersTerrestrial.Hierarchy_None : _("None"), eDVBFrontendParametersTerrestrial.Hierarchy_1 : "1", eDVBFrontendParametersTerrestrial.Hierarchy_2 : "2", eDVBFrontendParametersTerrestrial.Hierarchy_4 : "4"}.get(tp.get("hierarchy_information")) ret["inversion"] = { eDVBFrontendParametersTerrestrial.Inversion_Unknown : _("Auto"), eDVBFrontendParametersTerrestrial.Inversion_On : _("On"), eDVBFrontendParametersTerrestrial.Inversion_Off : _("Off")}.get(tp.get("inversion")) ret["system"] = { eDVBFrontendParametersTerrestrial.System_DVB_T_T2 : "DVB-T/T2", eDVBFrontendParametersTerrestrial.System_DVB_T : "DVB-T", eDVBFrontendParametersTerrestrial.System_DVB_T2 : "DVB-T2"}.get(tp.get("system")) elif type == "ATSC": ret["tuner_type"] = "ATSC" ret["modulation"] = { eDVBFrontendParametersATSC.Modulation_Auto: _("Auto"), eDVBFrontendParametersATSC.Modulation_QAM16 : "QAM16", eDVBFrontendParametersATSC.Modulation_QAM32 : "QAM32", eDVBFrontendParametersATSC.Modulation_QAM64 : "QAM64", eDVBFrontendParametersATSC.Modulation_QAM128 : "QAM128", eDVBFrontendParametersATSC.Modulation_QAM256 : "QAM256", eDVBFrontendParametersATSC.Modulation_VSB_8 : "VSB_8", eDVBFrontendParametersATSC.Modulation_VSB_16 : "VSB_16"}.get(tp.get("modulation")) ret["inversion"] = { eDVBFrontendParametersATSC.Inversion_Unknown : _("Auto"), eDVBFrontendParametersATSC.Inversion_On : _("On"), eDVBFrontendParametersATSC.Inversion_Off : _("Off")}.get(tp.get("inversion")) ret["system"] = { eDVBFrontendParametersATSC.System_ATSC : "ATSC", eDVBFrontendParametersATSC.System_DVB_C_ANNEX_B : "DVB-C ANNEX B"}.get(tp.get("system")) elif type != "None": print "ConvertToHumanReadable: no or unknown type in tpdata dict for type:", type for k,v in tp.items(): if k not in ret: ret[k] = v return ret
akhilari7/pa-dude
refs/heads/master
lib/python2.7/site-packages/django/test/client.py
132
from __future__ import unicode_literals import json import mimetypes import os import re import sys from copy import copy from importlib import import_module from io import BytesIO from django.apps import apps from django.conf import settings from django.core import urlresolvers from django.core.handlers.base import BaseHandler from django.core.handlers.wsgi import ISO_8859_1, UTF_8, WSGIRequest from django.core.signals import ( got_request_exception, request_finished, request_started, ) from django.db import close_old_connections from django.http import HttpRequest, QueryDict, SimpleCookie from django.template import TemplateDoesNotExist from django.test import signals from django.test.utils import ContextList from django.utils import six from django.utils.encoding import force_bytes, force_str, uri_to_iri from django.utils.functional import SimpleLazyObject, curry from django.utils.http import urlencode from django.utils.itercompat import is_iterable from django.utils.six.moves.urllib.parse import urlparse, urlsplit __all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart') BOUNDARY = 'BoUnDaRyStRiNg' MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?') class RedirectCycleError(Exception): """ The test client has been asked to follow a redirect loop. """ def __init__(self, message, last_response): super(RedirectCycleError, self).__init__(message) self.last_response = last_response self.redirect_chain = last_response.redirect_chain class FakePayload(object): """ A wrapper around BytesIO that restricts what can be read since data from the network can't be seeked and cannot be read outside of its content length. This makes sure that views can't do anything under the test client that wouldn't work in Real Life. """ def __init__(self, content=None): self.__content = BytesIO() self.__len = 0 self.read_started = False if content is not None: self.write(content) def __len__(self): return self.__len def read(self, num_bytes=None): if not self.read_started: self.__content.seek(0) self.read_started = True if num_bytes is None: num_bytes = self.__len or 0 assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data." content = self.__content.read(num_bytes) self.__len -= num_bytes return content def write(self, content): if self.read_started: raise ValueError("Unable to write a payload after he's been read") content = force_bytes(content) self.__content.write(content) self.__len += len(content) def closing_iterator_wrapper(iterable, close): try: for item in iterable: yield item finally: request_finished.disconnect(close_old_connections) close() # will fire request_finished request_finished.connect(close_old_connections) class ClientHandler(BaseHandler): """ A HTTP Handler that can be used for testing purposes. Uses the WSGI interface to compose requests, but returns the raw HttpResponse object with the originating WSGIRequest attached to its ``wsgi_request`` attribute. """ def __init__(self, enforce_csrf_checks=True, *args, **kwargs): self.enforce_csrf_checks = enforce_csrf_checks super(ClientHandler, self).__init__(*args, **kwargs) def __call__(self, environ): # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._request_middleware is None: self.load_middleware() request_started.disconnect(close_old_connections) request_started.send(sender=self.__class__, environ=environ) request_started.connect(close_old_connections) request = WSGIRequest(environ) # sneaky little hack so that we can easily get round # CsrfViewMiddleware. This makes life easier, and is probably # required for backwards compatibility with external tests against # admin views. request._dont_enforce_csrf_checks = not self.enforce_csrf_checks # Request goes through middleware. response = self.get_response(request) # Attach the originating request to the response so that it could be # later retrieved. response.wsgi_request = request # We're emulating a WSGI server; we must call the close method # on completion. if response.streaming: response.streaming_content = closing_iterator_wrapper( response.streaming_content, response.close) else: request_finished.disconnect(close_old_connections) response.close() # will fire request_finished request_finished.connect(close_old_connections) return response def store_rendered_templates(store, signal, sender, template, context, **kwargs): """ Stores templates and contexts that are rendered. The context is copied so that it is an accurate representation at the time of rendering. """ store.setdefault('templates', []).append(template) store.setdefault('context', ContextList()).append(copy(context)) def encode_multipart(boundary, data): """ Encodes multipart POST data from a dictionary of form values. The key will be used as the form data name; the value will be transmitted as content. If the value is a file, the contents of the file will be sent as an application/octet-stream; otherwise, str(value) will be sent. """ lines = [] to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET) # Not by any means perfect, but good enough for our purposes. is_file = lambda thing: hasattr(thing, "read") and callable(thing.read) # Each bit of the multipart form data could be either a form value or a # file, or a *list* of form values and/or files. Remember that HTTP field # names can be duplicated! for (key, value) in data.items(): if is_file(value): lines.extend(encode_file(boundary, key, value)) elif not isinstance(value, six.string_types) and is_iterable(value): for item in value: if is_file(item): lines.extend(encode_file(boundary, key, item)) else: lines.extend(to_bytes(val) for val in [ '--%s' % boundary, 'Content-Disposition: form-data; name="%s"' % key, '', item ]) else: lines.extend(to_bytes(val) for val in [ '--%s' % boundary, 'Content-Disposition: form-data; name="%s"' % key, '', value ]) lines.extend([ to_bytes('--%s--' % boundary), b'', ]) return b'\r\n'.join(lines) def encode_file(boundary, key, file): to_bytes = lambda s: force_bytes(s, settings.DEFAULT_CHARSET) filename = os.path.basename(file.name) if hasattr(file, 'name') else '' if hasattr(file, 'content_type'): content_type = file.content_type elif filename: content_type = mimetypes.guess_type(filename)[0] else: content_type = None if content_type is None: content_type = 'application/octet-stream' if not filename: filename = key return [ to_bytes('--%s' % boundary), to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)), to_bytes('Content-Type: %s' % content_type), b'', to_bytes(file.read()) ] class RequestFactory(object): """ Class that lets you create mock Request objects for use in testing. Usage: rf = RequestFactory() get_request = rf.get('/hello/') post_request = rf.post('/submit/', {'foo': 'bar'}) Once you have a request object you can pass it to any view function, just as if that view had been hooked up using a URLconf. """ def __init__(self, **defaults): self.defaults = defaults self.cookies = SimpleCookie() self.errors = BytesIO() def _base_environ(self, **request): """ The base environment for a request. """ # This is a minimal valid WSGI environ dictionary, plus: # - HTTP_COOKIE: for cookie support, # - REMOTE_ADDR: often useful, see #8551. # See http://www.python.org/dev/peps/pep-3333/#environ-variables environ = { 'HTTP_COOKIE': self.cookies.output(header='', sep='; '), 'PATH_INFO': str('/'), 'REMOTE_ADDR': str('127.0.0.1'), 'REQUEST_METHOD': str('GET'), 'SCRIPT_NAME': str(''), 'SERVER_NAME': str('testserver'), 'SERVER_PORT': str('80'), 'SERVER_PROTOCOL': str('HTTP/1.1'), 'wsgi.version': (1, 0), 'wsgi.url_scheme': str('http'), 'wsgi.input': FakePayload(b''), 'wsgi.errors': self.errors, 'wsgi.multiprocess': True, 'wsgi.multithread': False, 'wsgi.run_once': False, } environ.update(self.defaults) environ.update(request) return environ def request(self, **request): "Construct a generic request object." return WSGIRequest(self._base_environ(**request)) def _encode_data(self, data, content_type): if content_type is MULTIPART_CONTENT: return encode_multipart(BOUNDARY, data) else: # Encode the content so that the byte representation is correct. match = CONTENT_TYPE_RE.match(content_type) if match: charset = match.group(1) else: charset = settings.DEFAULT_CHARSET return force_bytes(data, encoding=charset) def _get_path(self, parsed): path = force_str(parsed[2]) # If there are parameters, add them if parsed[3]: path += str(";") + force_str(parsed[3]) path = uri_to_iri(path).encode(UTF_8) # Under Python 3, non-ASCII values in the WSGI environ are arbitrarily # decoded with ISO-8859-1. We replicate this behavior here. # Refs comment in `get_bytes_from_wsgi()`. return path.decode(ISO_8859_1) if six.PY3 else path def get(self, path, data=None, secure=False, **extra): "Construct a GET request." data = {} if data is None else data r = { 'QUERY_STRING': urlencode(data, doseq=True), } r.update(extra) return self.generic('GET', path, secure=secure, **r) def post(self, path, data=None, content_type=MULTIPART_CONTENT, secure=False, **extra): "Construct a POST request." data = {} if data is None else data post_data = self._encode_data(data, content_type) return self.generic('POST', path, post_data, content_type, secure=secure, **extra) def head(self, path, data=None, secure=False, **extra): "Construct a HEAD request." data = {} if data is None else data r = { 'QUERY_STRING': urlencode(data, doseq=True), } r.update(extra) return self.generic('HEAD', path, secure=secure, **r) def trace(self, path, secure=False, **extra): "Construct a TRACE request." return self.generic('TRACE', path, secure=secure, **extra) def options(self, path, data='', content_type='application/octet-stream', secure=False, **extra): "Construct an OPTIONS request." return self.generic('OPTIONS', path, data, content_type, secure=secure, **extra) def put(self, path, data='', content_type='application/octet-stream', secure=False, **extra): "Construct a PUT request." return self.generic('PUT', path, data, content_type, secure=secure, **extra) def patch(self, path, data='', content_type='application/octet-stream', secure=False, **extra): "Construct a PATCH request." return self.generic('PATCH', path, data, content_type, secure=secure, **extra) def delete(self, path, data='', content_type='application/octet-stream', secure=False, **extra): "Construct a DELETE request." return self.generic('DELETE', path, data, content_type, secure=secure, **extra) def generic(self, method, path, data='', content_type='application/octet-stream', secure=False, **extra): """Constructs an arbitrary HTTP request.""" parsed = urlparse(force_str(path)) data = force_bytes(data, settings.DEFAULT_CHARSET) r = { 'PATH_INFO': self._get_path(parsed), 'REQUEST_METHOD': str(method), 'SERVER_PORT': str('443') if secure else str('80'), 'wsgi.url_scheme': str('https') if secure else str('http'), } if data: r.update({ 'CONTENT_LENGTH': len(data), 'CONTENT_TYPE': str(content_type), 'wsgi.input': FakePayload(data), }) r.update(extra) # If QUERY_STRING is absent or empty, we want to extract it from the URL. if not r.get('QUERY_STRING'): query_string = force_bytes(parsed[4]) # WSGI requires latin-1 encoded strings. See get_path_info(). if six.PY3: query_string = query_string.decode('iso-8859-1') r['QUERY_STRING'] = query_string return self.request(**r) class Client(RequestFactory): """ A class that can act as a client for testing purposes. It allows the user to compose GET and POST requests, and obtain the response that the server gave to those requests. The server Response objects are annotated with the details of the contexts and templates that were rendered during the process of serving the request. Client objects are stateful - they will retain cookie (and thus session) details for the lifetime of the Client instance. This is not intended as a replacement for Twill/Selenium or the like - it is here to allow testing against the contexts and templates produced by a view, rather than the HTML rendered to the end-user. """ def __init__(self, enforce_csrf_checks=False, **defaults): super(Client, self).__init__(**defaults) self.handler = ClientHandler(enforce_csrf_checks) self.exc_info = None def store_exc_info(self, **kwargs): """ Stores exceptions when they are generated by a view. """ self.exc_info = sys.exc_info() def _session(self): """ Obtains the current session variables. """ if apps.is_installed('django.contrib.sessions'): engine = import_module(settings.SESSION_ENGINE) cookie = self.cookies.get(settings.SESSION_COOKIE_NAME) if cookie: return engine.SessionStore(cookie.value) else: s = engine.SessionStore() s.save() self.cookies[settings.SESSION_COOKIE_NAME] = s.session_key return s return {} session = property(_session) def request(self, **request): """ The master request method. Composes the environment dictionary and passes to the handler, returning the result of the handler. Assumes defaults for the query environment, which can be overridden using the arguments to the request. """ environ = self._base_environ(**request) # Curry a data dictionary into an instance of the template renderer # callback function. data = {} on_template_render = curry(store_rendered_templates, data) signal_uid = "template-render-%s" % id(request) signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid) # Capture exceptions created by the handler. exception_uid = "request-exception-%s" % id(request) got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid) try: try: response = self.handler(environ) except TemplateDoesNotExist as e: # If the view raises an exception, Django will attempt to show # the 500.html template. If that template is not available, # we should ignore the error in favor of re-raising the # underlying exception that caused the 500 error. Any other # template found to be missing during view error handling # should be reported as-is. if e.args != ('500.html',): raise # Look for a signalled exception, clear the current context # exception data, then re-raise the signalled exception. # Also make sure that the signalled exception is cleared from # the local cache! if self.exc_info: exc_info = self.exc_info self.exc_info = None six.reraise(*exc_info) # Save the client and request that stimulated the response. response.client = self response.request = request # Add any rendered template detail to the response. response.templates = data.get("templates", []) response.context = data.get("context") response.json = curry(self._parse_json, response) # Attach the ResolverMatch instance to the response response.resolver_match = SimpleLazyObject( lambda: urlresolvers.resolve(request['PATH_INFO'])) # Flatten a single context. Not really necessary anymore thanks to # the __getattr__ flattening in ContextList, but has some edge-case # backwards-compatibility implications. if response.context and len(response.context) == 1: response.context = response.context[0] # Update persistent cookie data. if response.cookies: self.cookies.update(response.cookies) return response finally: signals.template_rendered.disconnect(dispatch_uid=signal_uid) got_request_exception.disconnect(dispatch_uid=exception_uid) def get(self, path, data=None, follow=False, secure=False, **extra): """ Requests a response from the server using GET. """ response = super(Client, self).get(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, **extra) return response def post(self, path, data=None, content_type=MULTIPART_CONTENT, follow=False, secure=False, **extra): """ Requests a response from the server using POST. """ response = super(Client, self).post(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, **extra) return response def head(self, path, data=None, follow=False, secure=False, **extra): """ Request a response from the server using HEAD. """ response = super(Client, self).head(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, **extra) return response def options(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """ Request a response from the server using OPTIONS. """ response = super(Client, self).options(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, **extra) return response def put(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """ Send a resource to the server using PUT. """ response = super(Client, self).put(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, **extra) return response def patch(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """ Send a resource to the server using PATCH. """ response = super(Client, self).patch(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, **extra) return response def delete(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """ Send a DELETE request to the server. """ response = super(Client, self).delete(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, **extra) return response def trace(self, path, data='', follow=False, secure=False, **extra): """ Send a TRACE request to the server. """ response = super(Client, self).trace(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, **extra) return response def login(self, **credentials): """ Sets the Factory to appear as if it has successfully logged into a site. Returns True if login is possible; False if the provided credentials are incorrect, or the user is inactive, or if the sessions framework is not available. """ from django.contrib.auth import authenticate user = authenticate(**credentials) if (user and user.is_active and apps.is_installed('django.contrib.sessions')): self._login(user) return True else: return False def force_login(self, user, backend=None): if backend is None: backend = settings.AUTHENTICATION_BACKENDS[0] user.backend = backend self._login(user) def _login(self, user): from django.contrib.auth import login engine = import_module(settings.SESSION_ENGINE) # Create a fake request to store login details. request = HttpRequest() if self.session: request.session = self.session else: request.session = engine.SessionStore() login(request, user) # Save the session values. request.session.save() # Set the cookie to represent the session. session_cookie = settings.SESSION_COOKIE_NAME self.cookies[session_cookie] = request.session.session_key cookie_data = { 'max-age': None, 'path': '/', 'domain': settings.SESSION_COOKIE_DOMAIN, 'secure': settings.SESSION_COOKIE_SECURE or None, 'expires': None, } self.cookies[session_cookie].update(cookie_data) def logout(self): """ Removes the authenticated user's cookies and session object. Causes the authenticated user to be logged out. """ from django.contrib.auth import get_user, logout request = HttpRequest() engine = import_module(settings.SESSION_ENGINE) if self.session: request.session = self.session request.user = get_user(request) else: request.session = engine.SessionStore() logout(request) self.cookies = SimpleCookie() def _parse_json(self, response, **extra): if 'application/json' not in response.get('Content-Type'): raise ValueError( 'Content-Type header is "{0}", not "application/json"' .format(response.get('Content-Type')) ) return json.loads(response.content.decode(), **extra) def _handle_redirects(self, response, **extra): "Follows any redirects by requesting responses from the server using GET." response.redirect_chain = [] while response.status_code in (301, 302, 303, 307): response_url = response.url redirect_chain = response.redirect_chain redirect_chain.append((response_url, response.status_code)) url = urlsplit(response_url) if url.scheme: extra['wsgi.url_scheme'] = url.scheme if url.hostname: extra['SERVER_NAME'] = url.hostname if url.port: extra['SERVER_PORT'] = str(url.port) response = self.get(url.path, QueryDict(url.query), follow=False, **extra) response.redirect_chain = redirect_chain if redirect_chain[-1] in redirect_chain[:-1]: # Check that we're not redirecting to somewhere we've already # been to, to prevent loops. raise RedirectCycleError("Redirect loop detected.", last_response=response) if len(redirect_chain) > 20: # Such a lengthy chain likely also means a loop, but one with # a growing path, changing view, or changing query argument; # 20 is the value of "network.http.redirection-limit" from Firefox. raise RedirectCycleError("Too many redirects.", last_response=response) return response
storm-computers/odoo
refs/heads/9.0
addons/base_gengo/ir_translation.py
46
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from openerp import api from openerp.osv import fields, osv from openerp.exceptions import UserError LANG_CODE_MAPPING = { 'ar_SY': ('ar', 'Arabic'), 'id_ID': ('id', 'Indonesian'), 'nl_NL': ('nl', 'Dutch'), 'fr_CA': ('fr-ca', 'French (Canada)'), 'pl_PL': ('pl', 'Polish'), 'zh_TW': ('zh-tw', 'Chinese (Traditional)'), 'sv_SE': ('sv', 'Swedish'), 'ko_KR': ('ko', 'Korean'), 'pt_PT': ('pt', 'Portuguese (Europe)'), 'en_US': ('en', 'English'), 'ja_JP': ('ja', 'Japanese'), 'es_ES': ('es', 'Spanish (Spain)'), 'zh_CN': ('zh', 'Chinese (Simplified)'), 'de_DE': ('de', 'German'), 'fr_FR': ('fr', 'French'), 'fr_BE': ('fr', 'French'), 'ru_RU': ('ru', 'Russian'), 'it_IT': ('it', 'Italian'), 'pt_BR': ('pt-br', 'Portuguese (Brazil)'), 'th_TH': ('th', 'Thai'), 'nb_NO': ('no', 'Norwegian'), 'ro_RO': ('ro', 'Romanian'), 'tr_TR': ('tr', 'Turkish'), 'bg_BG': ('bg', 'Bulgarian'), 'da_DK': ('da', 'Danish'), 'en_GB': ('en-gb', 'English (British)'), 'el_GR': ('el', 'Greek'), 'vi_VN': ('vi', 'Vietnamese'), 'he_IL': ('he', 'Hebrew'), 'hu_HU': ('hu', 'Hungarian'), 'fi_FI': ('fi', 'Finnish') } class ir_translation(osv.Model): _name = "ir.translation" _inherit = "ir.translation" _columns = { 'gengo_comment': fields.text("Comments & Activity Linked to Gengo"), 'order_id': fields.char('Gengo Order ID', size=32), "gengo_translation": fields.selection([('machine', 'Translation By Machine'), ('standard', 'Standard'), ('pro', 'Pro'), ('ultra', 'Ultra')], "Gengo Translation Service Level", help='You can select here the service level you want for an automatic translation using Gengo.'), } def _get_all_supported_languages(self, cr, uid, context=None): flag, gengo = self.pool.get('base.gengo.translations').gengo_authentication(cr, uid, context=context) if not flag: raise UserError(gengo) supported_langs = {} lang_pair = gengo.getServiceLanguagePairs(lc_src='en') if lang_pair['opstat'] == 'ok': for g_lang in lang_pair['response']: if g_lang['lc_tgt'] not in supported_langs: supported_langs[g_lang['lc_tgt']] = [] supported_langs[g_lang['lc_tgt']] += [g_lang['tier']] return supported_langs def _get_gengo_corresponding_language(cr, lang): return lang in LANG_CODE_MAPPING and LANG_CODE_MAPPING[lang][0] or lang def _get_source_query(self, cr, uid, name, types, lang, source, res_id): query, params = super(ir_translation, self)._get_source_query(cr, uid, name, types, lang, source, res_id) query += """ ORDER BY CASE WHEN gengo_translation=%s then 10 WHEN gengo_translation=%s then 20 WHEN gengo_translation=%s then 30 WHEN gengo_translation=%s then 40 ELSE 0 END DESC """ params += ('machine', 'standard', 'ultra', 'pro',) return (query, params) @api.model def _get_terms_query(self, field, records): query, params = super(ir_translation, self)._get_terms_query(field, records) # order translations from worst to best query += """ ORDER BY CASE WHEN gengo_translation=%s then 10 WHEN gengo_translation=%s then 20 WHEN gengo_translation=%s then 30 WHEN gengo_translation=%s then 40 ELSE 0 END ASC """ params += ('machine', 'standard', 'ultra', 'pro') return query, params
nitzmahone/tower-cli
refs/heads/master
tests/test_utils_datastructures.py
2
# Copyright 2014, Ansible, Inc. # Luke Sneeringer <lsneeringer@ansible.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from tower_cli.utils.data_structures import OrderedDict from tests.compat import unittest class OrderedDictTests(unittest.TestCase): """A set of tests to ensure that the OrderedDict subclass that tower-cli provides works as expected. """ def test_dunder_repr(self): """Establish that the OrderedDict __repr__ method works in the way we expect. """ d = OrderedDict() d['foo'] = 'spam' d['bar'] = 'eggs' self.assertEqual(repr(d), "{'foo': 'spam', 'bar': 'eggs'}")
sdague/home-assistant
refs/heads/dev
homeassistant/components/acmeda/__init__.py
15
"""The Rollease Acmeda Automate integration.""" import asyncio from homeassistant import config_entries, core from .const import DOMAIN from .hub import PulseHub CONF_HUBS = "hubs" PLATFORMS = ["cover", "sensor"] async def async_setup(hass: core.HomeAssistant, config: dict): """Set up the Rollease Acmeda Automate component.""" return True async def async_setup_entry( hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry ): """Set up Rollease Acmeda Automate hub from a config entry.""" hub = PulseHub(hass, config_entry) if not await hub.async_setup(): return False hass.data.setdefault(DOMAIN, {}) hass.data[DOMAIN][config_entry.entry_id] = hub for component in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(config_entry, component) ) return True async def async_unload_entry( hass: core.HomeAssistant, config_entry: config_entries.ConfigEntry ): """Unload a config entry.""" hub = hass.data[DOMAIN][config_entry.entry_id] unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(config_entry, component) for component in PLATFORMS ] ) ) if not await hub.async_reset(): return False if unload_ok: hass.data[DOMAIN].pop(config_entry.entry_id) return unload_ok
40223244/cdb-2
refs/heads/master
static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/__init__.py
603
## pygame - Python Game Library ## Copyright (C) 2000-2001 Pete Shinners ## ## This library is free software; you can redistribute it and/or ## modify it under the terms of the GNU Library General Public ## License as published by the Free Software Foundation; either ## version 2 of the License, or (at your option) any later version. ## ## This library is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## Library General Public License for more details. ## ## You should have received a copy of the GNU Library General Public ## License along with this library; if not, write to the Free ## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ## ## Pete Shinners ## pete@shinners.org '''Top-level Pygame module. Pygame is a set of Python modules designed for writing games. It is written on top of the excellent SDL library. This allows you to create fully featured games and multimedia programs in the Python language. The package is highly portable, with games running on Windows, MacOS, OS X, BeOS, FreeBSD, IRIX, and Linux. ''' __docformat__ = 'restructuredtext' __version__ = '$Id$' import os import sys class MissingModule: def __init__(self, name, info='', urgent=0): self.name = name self.info = str(info) self.urgent = urgent if urgent: self.warn() def __getattr__(self, var): if not self.urgent: self.warn() self.urgent = 1 MissingPygameModule = "%s module not available" % self.name raise NotImplementedError(MissingPygameModule) def __nonzero__(self): return 0 def warn(self): if self.urgent: type = 'import' else: type = 'use' message = '%s %s: %s' % (type, self.name, self.info) try: import warnings if self.urgent: level = 4 else: level = 3 warnings.warn(message, RuntimeWarning, level) except ImportError: print(message) #we need to import like this, each at a time. the cleanest way to import #our modules is with the import command (not the __import__ function) #first, the "required" modules #from pygame.array import * #brython fix me from pygame.base import * from pygame.constants import * from pygame.version import * from pygame.rect import Rect import pygame.color Color = pygame.color.Color __version__ = ver #added by earney from . import time from . import display from . import constants from . import event from . import font from . import mixer from . import sprite from .surface import Surface from . import image from . import mouse from . import transform #next, the "standard" modules #we still allow them to be missing for stripped down pygame distributions ''' try: import pygame.cdrom except (ImportError,IOError), msg:cdrom=MissingModule("cdrom", msg, 1) try: import pygame.cursors except (ImportError,IOError), msg:cursors=MissingModule("cursors", msg, 1) try: import pygame.display except (ImportError,IOError), msg:display=MissingModule("display", msg, 1) try: import pygame.draw except (ImportError,IOError), msg:draw=MissingModule("draw", msg, 1) try: import pygame.event except (ImportError,IOError), msg:event=MissingModule("event", msg, 1) try: import pygame.image except (ImportError,IOError), msg:image=MissingModule("image", msg, 1) try: import pygame.joystick except (ImportError,IOError), msg:joystick=MissingModule("joystick", msg, 1) try: import pygame.key except (ImportError,IOError), msg:key=MissingModule("key", msg, 1) try: import pygame.mouse except (ImportError,IOError), msg:mouse=MissingModule("mouse", msg, 1) try: import pygame.sprite except (ImportError,IOError), msg:sprite=MissingModule("sprite", msg, 1) try: from pygame.surface import Surface except (ImportError,IOError):Surface = lambda:Missing_Function try: from pygame.overlay import Overlay except (ImportError,IOError):Overlay = lambda:Missing_Function try: import pygame.time except (ImportError,IOError), msg:time=MissingModule("time", msg, 1) try: import pygame.transform except (ImportError,IOError), msg:transform=MissingModule("transform", msg, 1) #lastly, the "optional" pygame modules try: import pygame.font import pygame.sysfont pygame.font.SysFont = pygame.sysfont.SysFont pygame.font.get_fonts = pygame.sysfont.get_fonts pygame.font.match_font = pygame.sysfont.match_font except (ImportError,IOError), msg:font=MissingModule("font", msg, 0) try: import pygame.mixer except (ImportError,IOError), msg:mixer=MissingModule("mixer", msg, 0) #try: import pygame.movie #except (ImportError,IOError), msg:movie=MissingModule("movie", msg, 0) #try: import pygame.movieext #except (ImportError,IOError), msg:movieext=MissingModule("movieext", msg, 0) try: import pygame.surfarray except (ImportError,IOError), msg:surfarray=MissingModule("surfarray", msg, 0) try: import pygame.sndarray except (ImportError,IOError), msg:sndarray=MissingModule("sndarray", msg, 0) #try: import pygame.fastevent #except (ImportError,IOError), msg:fastevent=MissingModule("fastevent", msg, 0) #there's also a couple "internal" modules not needed #by users, but putting them here helps "dependency finder" #programs get everything they need (like py2exe) try: import pygame.imageext; del pygame.imageext except (ImportError,IOError):pass try: import pygame.mixer_music; del pygame.mixer_music except (ImportError,IOError):pass def packager_imports(): """ Some additional things that py2app/py2exe will want to see """ import OpenGL.GL ''' #make Rects pickleable import copyreg def __rect_constructor(x,y,w,h): return Rect(x,y,w,h) def __rect_reduce(r): assert type(r) == Rect return __rect_constructor, (r.x, r.y, r.w, r.h) copyreg.pickle(Rect, __rect_reduce, __rect_constructor) #cleanup namespace del pygame, os, sys, #TODO rwobject, surflock, MissingModule, copy_reg
grzes/djangae
refs/heads/master
djangae/contrib/backup/utils.py
1
from django.conf import settings SETTINGS_PREFIX = "DJANGAE_BACKUP_" def get_backup_setting(name, required=True, default=None): settings_name = "{}{}".format(SETTINGS_PREFIX, name) if required and not hasattr(settings, settings_name): raise Exception("{} is required".format(settings_name)) return getattr(settings, settings_name, default)
DefyVentures/edx-platform
refs/heads/master
common/static/js/vendor/mathjax-MathJax-c9db6ac/docs/source/conf.py
104
# -*- coding: utf-8 -*- # # MathJax documentation build configuration file, created by # sphinx-quickstart on Sun May 16 23:18:19 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'MathJax' copyright = u'2012 Design Science' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2.0' # The full version, including alpha/beta/rc tags. release = '2.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' #highlight_language = 'javascript' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'mjtheme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['.'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True html_show_sourcelink = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'MathJaxdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ( 'index', 'MathJax.tex', u'MathJax Documentation', u'Davide Cervone, Casey Stark, Robert Miner, Paul Topping', 'manual', ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True
ekkleesia3/google-app-engine-django
refs/heads/master
appengine_django/management/commands/rollback.py
60
#!/usr/bin/python2.4 # # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import logging from django.core.management.base import BaseCommand def run_appcfg(): # import this so that we run through the checks at the beginning # and report the appropriate errors import appcfg # We don't really want to use that one though, it just executes this one from google.appengine.tools import appcfg # Reset the logging level to WARN as appcfg will spew tons of logs on INFO logging.getLogger().setLevel(logging.WARN) # Note: if we decide to change the name of this command to something other # than 'rollback' we will have to munge the args to replace whatever # we called it with 'rollback' new_args = sys.argv[:] new_args.append('.') appcfg.main(new_args) class Command(BaseCommand): """Calls the appcfg.py's rollback command for the current project. Any additional arguments are passed directly to appcfg.py. """ help = 'Calls appcfg.py rollback for the current project.' args = '[any appcfg.py options]' def run_from_argv(self, argv): run_appcfg()
franekp/millandict
refs/heads/master
ankidict/thirdparty/sqlalchemy/orm/session.py
2
# orm/session.py # Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Provides the Session class and related utilities.""" import weakref from .. import util, sql, engine, exc as sa_exc from ..sql import util as sql_util, expression from . import ( SessionExtension, attributes, exc, query, loading, identity ) from ..inspection import inspect from .base import ( object_mapper, class_mapper, _class_to_mapper, _state_mapper, object_state, _none_set, state_str, instance_str ) import itertools from . import persistence from .unitofwork import UOWTransaction from . import state as statelib import sys __all__ = ['Session', 'SessionTransaction', 'SessionExtension', 'sessionmaker'] _sessions = weakref.WeakValueDictionary() """Weak-referencing dictionary of :class:`.Session` objects. """ def _state_session(state): """Given an :class:`.InstanceState`, return the :class:`.Session` associated, if any. """ if state.session_id: try: return _sessions[state.session_id] except KeyError: pass return None class _SessionClassMethods(object): """Class-level methods for :class:`.Session`, :class:`.sessionmaker`.""" @classmethod def close_all(cls): """Close *all* sessions in memory.""" for sess in _sessions.values(): sess.close() @classmethod @util.dependencies("sqlalchemy.orm.util") def identity_key(cls, orm_util, *args, **kwargs): """Return an identity key. This is an alias of :func:`.util.identity_key`. """ return orm_util.identity_key(*args, **kwargs) @classmethod def object_session(cls, instance): """Return the :class:`.Session` to which an object belongs. This is an alias of :func:`.object_session`. """ return object_session(instance) ACTIVE = util.symbol('ACTIVE') PREPARED = util.symbol('PREPARED') COMMITTED = util.symbol('COMMITTED') DEACTIVE = util.symbol('DEACTIVE') CLOSED = util.symbol('CLOSED') class SessionTransaction(object): """A :class:`.Session`-level transaction. :class:`.SessionTransaction` is a mostly behind-the-scenes object not normally referenced directly by application code. It coordinates among multiple :class:`.Connection` objects, maintaining a database transaction for each one individually, committing or rolling them back all at once. It also provides optional two-phase commit behavior which can augment this coordination operation. The :attr:`.Session.transaction` attribute of :class:`.Session` refers to the current :class:`.SessionTransaction` object in use, if any. A :class:`.SessionTransaction` is associated with a :class:`.Session` in its default mode of ``autocommit=False`` immediately, associated with no database connections. As the :class:`.Session` is called upon to emit SQL on behalf of various :class:`.Engine` or :class:`.Connection` objects, a corresponding :class:`.Connection` and associated :class:`.Transaction` is added to a collection within the :class:`.SessionTransaction` object, becoming one of the connection/transaction pairs maintained by the :class:`.SessionTransaction`. The lifespan of the :class:`.SessionTransaction` ends when the :meth:`.Session.commit`, :meth:`.Session.rollback` or :meth:`.Session.close` methods are called. At this point, the :class:`.SessionTransaction` removes its association with its parent :class:`.Session`. A :class:`.Session` that is in ``autocommit=False`` mode will create a new :class:`.SessionTransaction` to replace it immediately, whereas a :class:`.Session` that's in ``autocommit=True`` mode will remain without a :class:`.SessionTransaction` until the :meth:`.Session.begin` method is called. Another detail of :class:`.SessionTransaction` behavior is that it is capable of "nesting". This means that the :meth:`.Session.begin` method can be called while an existing :class:`.SessionTransaction` is already present, producing a new :class:`.SessionTransaction` that temporarily replaces the parent :class:`.SessionTransaction`. When a :class:`.SessionTransaction` is produced as nested, it assigns itself to the :attr:`.Session.transaction` attribute. When it is ended via :meth:`.Session.commit` or :meth:`.Session.rollback`, it restores its parent :class:`.SessionTransaction` back onto the :attr:`.Session.transaction` attribute. The behavior is effectively a stack, where :attr:`.Session.transaction` refers to the current head of the stack. The purpose of this stack is to allow nesting of :meth:`.Session.rollback` or :meth:`.Session.commit` calls in context with various flavors of :meth:`.Session.begin`. This nesting behavior applies to when :meth:`.Session.begin_nested` is used to emit a SAVEPOINT transaction, and is also used to produce a so-called "subtransaction" which allows a block of code to use a begin/rollback/commit sequence regardless of whether or not its enclosing code block has begun a transaction. The :meth:`.flush` method, whether called explicitly or via autoflush, is the primary consumer of the "subtransaction" feature, in that it wishes to guarantee that it works within in a transaction block regardless of whether or not the :class:`.Session` is in transactional mode when the method is called. See also: :meth:`.Session.rollback` :meth:`.Session.commit` :meth:`.Session.begin` :meth:`.Session.begin_nested` :attr:`.Session.is_active` :meth:`.SessionEvents.after_commit` :meth:`.SessionEvents.after_rollback` :meth:`.SessionEvents.after_soft_rollback` """ _rollback_exception = None def __init__(self, session, parent=None, nested=False): self.session = session self._connections = {} self._parent = parent self.nested = nested self._state = ACTIVE if not parent and nested: raise sa_exc.InvalidRequestError( "Can't start a SAVEPOINT transaction when no existing " "transaction is in progress") if self.session._enable_transaction_accounting: self._take_snapshot() self.session.dispatch.after_transaction_create(self.session, self) @property def is_active(self): return self.session is not None and self._state is ACTIVE def _assert_active(self, prepared_ok=False, rollback_ok=False, deactive_ok=False, closed_msg="This transaction is closed"): if self._state is COMMITTED: raise sa_exc.InvalidRequestError( "This session is in 'committed' state; no further " "SQL can be emitted within this transaction." ) elif self._state is PREPARED: if not prepared_ok: raise sa_exc.InvalidRequestError( "This session is in 'prepared' state; no further " "SQL can be emitted within this transaction." ) elif self._state is DEACTIVE: if not deactive_ok and not rollback_ok: if self._rollback_exception: raise sa_exc.InvalidRequestError( "This Session's transaction has been rolled back " "due to a previous exception during flush." " To begin a new transaction with this Session, " "first issue Session.rollback()." " Original exception was: %s" % self._rollback_exception ) elif not deactive_ok: raise sa_exc.InvalidRequestError( "This Session's transaction has been rolled back " "by a nested rollback() call. To begin a new " "transaction, issue Session.rollback() first." ) elif self._state is CLOSED: raise sa_exc.ResourceClosedError(closed_msg) @property def _is_transaction_boundary(self): return self.nested or not self._parent def connection(self, bindkey, execution_options=None, **kwargs): self._assert_active() bind = self.session.get_bind(bindkey, **kwargs) return self._connection_for_bind(bind, execution_options) def _begin(self, nested=False): self._assert_active() return SessionTransaction( self.session, self, nested=nested) def _iterate_self_and_parents(self, upto=None): current = self result = () while current: result += (current, ) if current._parent is upto: break elif current._parent is None: raise sa_exc.InvalidRequestError( "Transaction %s is not on the active transaction list" % ( upto)) else: current = current._parent return result def _take_snapshot(self): if not self._is_transaction_boundary: self._new = self._parent._new self._deleted = self._parent._deleted self._dirty = self._parent._dirty self._key_switches = self._parent._key_switches return if not self.session._flushing: self.session.flush() self._new = weakref.WeakKeyDictionary() self._deleted = weakref.WeakKeyDictionary() self._dirty = weakref.WeakKeyDictionary() self._key_switches = weakref.WeakKeyDictionary() def _restore_snapshot(self, dirty_only=False): """Restore the restoration state taken before a transaction began. Corresponds to a rollback. """ assert self._is_transaction_boundary self.session._expunge_states( set(self._new).union(self.session._new), to_transient=True) for s, (oldkey, newkey) in self._key_switches.items(): self.session.identity_map.safe_discard(s) s.key = oldkey self.session.identity_map.replace(s) for s in set(self._deleted).union(self.session._deleted): self.session._update_impl(s, revert_deletion=True) assert not self.session._deleted for s in self.session.identity_map.all_states(): if not dirty_only or s.modified or s in self._dirty: s._expire(s.dict, self.session.identity_map._modified) def _remove_snapshot(self): """Remove the restoration state taken before a transaction began. Corresponds to a commit. """ assert self._is_transaction_boundary if not self.nested and self.session.expire_on_commit: for s in self.session.identity_map.all_states(): s._expire(s.dict, self.session.identity_map._modified) statelib.InstanceState._detach_states( list(self._deleted), self.session) self._deleted.clear() elif self.nested: self._parent._new.update(self._new) self._parent._dirty.update(self._dirty) self._parent._deleted.update(self._deleted) self._parent._key_switches.update(self._key_switches) def _connection_for_bind(self, bind, execution_options): self._assert_active() if bind in self._connections: if execution_options: util.warn( "Connection is already established for the " "given bind; execution_options ignored") return self._connections[bind][0] if self._parent: conn = self._parent._connection_for_bind(bind, execution_options) if not self.nested: return conn else: if isinstance(bind, engine.Connection): conn = bind if conn.engine in self._connections: raise sa_exc.InvalidRequestError( "Session already has a Connection associated for the " "given Connection's Engine") else: conn = bind.contextual_connect() if execution_options: conn = conn.execution_options(**execution_options) if self.session.twophase and self._parent is None: transaction = conn.begin_twophase() elif self.nested: transaction = conn.begin_nested() else: transaction = conn.begin() self._connections[conn] = self._connections[conn.engine] = \ (conn, transaction, conn is not bind) self.session.dispatch.after_begin(self.session, self, conn) return conn def prepare(self): if self._parent is not None or not self.session.twophase: raise sa_exc.InvalidRequestError( "'twophase' mode not enabled, or not root transaction; " "can't prepare.") self._prepare_impl() def _prepare_impl(self): self._assert_active() if self._parent is None or self.nested: self.session.dispatch.before_commit(self.session) stx = self.session.transaction if stx is not self: for subtransaction in stx._iterate_self_and_parents(upto=self): subtransaction.commit() if not self.session._flushing: for _flush_guard in range(100): if self.session._is_clean(): break self.session.flush() else: raise exc.FlushError( "Over 100 subsequent flushes have occurred within " "session.commit() - is an after_flush() hook " "creating new objects?") if self._parent is None and self.session.twophase: try: for t in set(self._connections.values()): t[1].prepare() except: with util.safe_reraise(): self.rollback() self._state = PREPARED def commit(self): self._assert_active(prepared_ok=True) if self._state is not PREPARED: self._prepare_impl() if self._parent is None or self.nested: for t in set(self._connections.values()): t[1].commit() self._state = COMMITTED self.session.dispatch.after_commit(self.session) if self.session._enable_transaction_accounting: self._remove_snapshot() self.close() return self._parent def rollback(self, _capture_exception=False): self._assert_active(prepared_ok=True, rollback_ok=True) stx = self.session.transaction if stx is not self: for subtransaction in stx._iterate_self_and_parents(upto=self): subtransaction.close() boundary = self rollback_err = None if self._state in (ACTIVE, PREPARED): for transaction in self._iterate_self_and_parents(): if transaction._parent is None or transaction.nested: try: transaction._rollback_impl() except: rollback_err = sys.exc_info() transaction._state = DEACTIVE boundary = transaction break else: transaction._state = DEACTIVE sess = self.session if not rollback_err and sess._enable_transaction_accounting and \ not sess._is_clean(): # if items were added, deleted, or mutated # here, we need to re-restore the snapshot util.warn( "Session's state has been changed on " "a non-active transaction - this state " "will be discarded.") boundary._restore_snapshot(dirty_only=boundary.nested) self.close() if self._parent and _capture_exception: self._parent._rollback_exception = sys.exc_info()[1] if rollback_err: util.reraise(*rollback_err) sess.dispatch.after_soft_rollback(sess, self) return self._parent def _rollback_impl(self): try: for t in set(self._connections.values()): t[1].rollback() finally: if self.session._enable_transaction_accounting: self._restore_snapshot(dirty_only=self.nested) self.session.dispatch.after_rollback(self.session) def close(self, invalidate=False): self.session.transaction = self._parent if self._parent is None: for connection, transaction, autoclose in \ set(self._connections.values()): if invalidate: connection.invalidate() if autoclose: connection.close() else: transaction.close() self._state = CLOSED self.session.dispatch.after_transaction_end(self.session, self) if self._parent is None: if not self.session.autocommit: self.session.begin() self.session = None self._connections = None def __enter__(self): return self def __exit__(self, type, value, traceback): self._assert_active(deactive_ok=True, prepared_ok=True) if self.session.transaction is None: return if type is None: try: self.commit() except: with util.safe_reraise(): self.rollback() else: self.rollback() class Session(_SessionClassMethods): """Manages persistence operations for ORM-mapped objects. The Session's usage paradigm is described at :doc:`/orm/session`. """ public_methods = ( '__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested', 'close', 'commit', 'connection', 'delete', 'execute', 'expire', 'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind', 'is_modified', 'bulk_save_objects', 'bulk_insert_mappings', 'bulk_update_mappings', 'merge', 'query', 'refresh', 'rollback', 'scalar') def __init__(self, bind=None, autoflush=True, expire_on_commit=True, _enable_transaction_accounting=True, autocommit=False, twophase=False, weak_identity_map=True, binds=None, extension=None, info=None, query_cls=query.Query): """Construct a new Session. See also the :class:`.sessionmaker` function which is used to generate a :class:`.Session`-producing callable with a given set of arguments. :param autocommit: .. warning:: The autocommit flag is **not for general use**, and if it is used, queries should only be invoked within the span of a :meth:`.Session.begin` / :meth:`.Session.commit` pair. Executing queries outside of a demarcated transaction is a legacy mode of usage, and can in some cases lead to concurrent connection checkouts. Defaults to ``False``. When ``True``, the :class:`.Session` does not keep a persistent transaction running, and will acquire connections from the engine on an as-needed basis, returning them immediately after their use. Flushes will begin and commit (or possibly rollback) their own transaction if no transaction is present. When using this mode, the :meth:`.Session.begin` method is used to explicitly start transactions. .. seealso:: :ref:`session_autocommit` :param autoflush: When ``True``, all query operations will issue a :meth:`~.Session.flush` call to this ``Session`` before proceeding. This is a convenience feature so that :meth:`~.Session.flush` need not be called repeatedly in order for database queries to retrieve results. It's typical that ``autoflush`` is used in conjunction with ``autocommit=False``. In this scenario, explicit calls to :meth:`~.Session.flush` are rarely needed; you usually only need to call :meth:`~.Session.commit` (which flushes) to finalize changes. :param bind: An optional :class:`.Engine` or :class:`.Connection` to which this ``Session`` should be bound. When specified, all SQL operations performed by this session will execute via this connectable. :param binds: An optional dictionary which contains more granular "bind" information than the ``bind`` parameter provides. This dictionary can map individual :class`.Table` instances as well as :class:`~.Mapper` instances to individual :class:`.Engine` or :class:`.Connection` objects. Operations which proceed relative to a particular :class:`.Mapper` will consult this dictionary for the direct :class:`.Mapper` instance as well as the mapper's ``mapped_table`` attribute in order to locate a connectable to use. The full resolution is described in the :meth:`.Session.get_bind`. Usage looks like:: Session = sessionmaker(binds={ SomeMappedClass: create_engine('postgresql://engine1'), somemapper: create_engine('postgresql://engine2'), some_table: create_engine('postgresql://engine3'), }) Also see the :meth:`.Session.bind_mapper` and :meth:`.Session.bind_table` methods. :param \class_: Specify an alternate class other than ``sqlalchemy.orm.session.Session`` which should be used by the returned class. This is the only argument that is local to the :class:`.sessionmaker` function, and is not sent directly to the constructor for ``Session``. :param _enable_transaction_accounting: Defaults to ``True``. A legacy-only flag which when ``False`` disables *all* 0.5-style object accounting on transaction boundaries, including auto-expiry of instances on rollback and commit, maintenance of the "new" and "deleted" lists upon rollback, and autoflush of pending changes upon :meth:`~.Session.begin`, all of which are interdependent. :param expire_on_commit: Defaults to ``True``. When ``True``, all instances will be fully expired after each :meth:`~.commit`, so that all attribute/object access subsequent to a completed transaction will load from the most recent database state. :param extension: An optional :class:`~.SessionExtension` instance, or a list of such instances, which will receive pre- and post- commit and flush events, as well as a post-rollback event. **Deprecated.** Please see :class:`.SessionEvents`. :param info: optional dictionary of arbitrary data to be associated with this :class:`.Session`. Is available via the :attr:`.Session.info` attribute. Note the dictionary is copied at construction time so that modifications to the per- :class:`.Session` dictionary will be local to that :class:`.Session`. .. versionadded:: 0.9.0 :param query_cls: Class which should be used to create new Query objects, as returned by the :meth:`~.Session.query` method. Defaults to :class:`.Query`. :param twophase: When ``True``, all transactions will be started as a "two phase" transaction, i.e. using the "two phase" semantics of the database in use along with an XID. During a :meth:`~.commit`, after :meth:`~.flush` has been issued for all attached databases, the :meth:`~.TwoPhaseTransaction.prepare` method on each database's :class:`.TwoPhaseTransaction` will be called. This allows each database to roll back the entire transaction, before each transaction is committed. :param weak_identity_map: Defaults to ``True`` - when set to ``False``, objects placed in the :class:`.Session` will be strongly referenced until explicitly removed or the :class:`.Session` is closed. **Deprecated** - The strong reference identity map is legacy. See the recipe at :ref:`session_referencing_behavior` for an event-based approach to maintaining strong identity references. """ if weak_identity_map: self._identity_cls = identity.WeakInstanceDict else: util.warn_deprecated( "weak_identity_map=False is deprecated. " "See the documentation on 'Session Referencing Behavior' " "for an event-based approach to maintaining strong identity " "references.") self._identity_cls = identity.StrongInstanceDict self.identity_map = self._identity_cls() self._new = {} # InstanceState->object, strong refs object self._deleted = {} # same self.bind = bind self.__binds = {} self._flushing = False self._warn_on_events = False self.transaction = None self.hash_key = _new_sessionid() self.autoflush = autoflush self.autocommit = autocommit self.expire_on_commit = expire_on_commit self._enable_transaction_accounting = _enable_transaction_accounting self.twophase = twophase self._query_cls = query_cls if info: self.info.update(info) if extension: for ext in util.to_list(extension): SessionExtension._adapt_listener(self, ext) if binds is not None: for key, bind in binds.items(): self._add_bind(key, bind) if not self.autocommit: self.begin() _sessions[self.hash_key] = self connection_callable = None transaction = None """The current active or inactive :class:`.SessionTransaction`.""" @util.memoized_property def info(self): """A user-modifiable dictionary. The initial value of this dictionary can be populated using the ``info`` argument to the :class:`.Session` constructor or :class:`.sessionmaker` constructor or factory methods. The dictionary here is always local to this :class:`.Session` and can be modified independently of all other :class:`.Session` objects. .. versionadded:: 0.9.0 """ return {} def begin(self, subtransactions=False, nested=False): """Begin a transaction on this :class:`.Session`. If this Session is already within a transaction, either a plain transaction or nested transaction, an error is raised, unless ``subtransactions=True`` or ``nested=True`` is specified. The ``subtransactions=True`` flag indicates that this :meth:`~.Session.begin` can create a subtransaction if a transaction is already in progress. For documentation on subtransactions, please see :ref:`session_subtransactions`. The ``nested`` flag begins a SAVEPOINT transaction and is equivalent to calling :meth:`~.Session.begin_nested`. For documentation on SAVEPOINT transactions, please see :ref:`session_begin_nested`. """ if self.transaction is not None: if subtransactions or nested: self.transaction = self.transaction._begin( nested=nested) else: raise sa_exc.InvalidRequestError( "A transaction is already begun. Use " "subtransactions=True to allow subtransactions.") else: self.transaction = SessionTransaction( self, nested=nested) return self.transaction # needed for __enter__/__exit__ hook def begin_nested(self): """Begin a `nested` transaction on this Session. The target database(s) must support SQL SAVEPOINTs or a SQLAlchemy-supported vendor implementation of the idea. For documentation on SAVEPOINT transactions, please see :ref:`session_begin_nested`. """ return self.begin(nested=True) def rollback(self): """Rollback the current transaction in progress. If no transaction is in progress, this method is a pass-through. This method rolls back the current transaction or nested transaction regardless of subtransactions being in effect. All subtransactions up to the first real transaction are closed. Subtransactions occur when :meth:`.begin` is called multiple times. .. seealso:: :ref:`session_rollback` """ if self.transaction is None: pass else: self.transaction.rollback() def commit(self): """Flush pending changes and commit the current transaction. If no transaction is in progress, this method raises an :exc:`~sqlalchemy.exc.InvalidRequestError`. By default, the :class:`.Session` also expires all database loaded state on all ORM-managed attributes after transaction commit. This so that subsequent operations load the most recent data from the database. This behavior can be disabled using the ``expire_on_commit=False`` option to :class:`.sessionmaker` or the :class:`.Session` constructor. If a subtransaction is in effect (which occurs when begin() is called multiple times), the subtransaction will be closed, and the next call to ``commit()`` will operate on the enclosing transaction. When using the :class:`.Session` in its default mode of ``autocommit=False``, a new transaction will be begun immediately after the commit, but note that the newly begun transaction does *not* use any connection resources until the first SQL is actually emitted. .. seealso:: :ref:`session_committing` """ if self.transaction is None: if not self.autocommit: self.begin() else: raise sa_exc.InvalidRequestError("No transaction is begun.") self.transaction.commit() def prepare(self): """Prepare the current transaction in progress for two phase commit. If no transaction is in progress, this method raises an :exc:`~sqlalchemy.exc.InvalidRequestError`. Only root transactions of two phase sessions can be prepared. If the current transaction is not such, an :exc:`~sqlalchemy.exc.InvalidRequestError` is raised. """ if self.transaction is None: if not self.autocommit: self.begin() else: raise sa_exc.InvalidRequestError("No transaction is begun.") self.transaction.prepare() def connection(self, mapper=None, clause=None, bind=None, close_with_result=False, execution_options=None, **kw): """Return a :class:`.Connection` object corresponding to this :class:`.Session` object's transactional state. If this :class:`.Session` is configured with ``autocommit=False``, either the :class:`.Connection` corresponding to the current transaction is returned, or if no transaction is in progress, a new one is begun and the :class:`.Connection` returned (note that no transactional state is established with the DBAPI until the first SQL statement is emitted). Alternatively, if this :class:`.Session` is configured with ``autocommit=True``, an ad-hoc :class:`.Connection` is returned using :meth:`.Engine.contextual_connect` on the underlying :class:`.Engine`. Ambiguity in multi-bind or unbound :class:`.Session` objects can be resolved through any of the optional keyword arguments. This ultimately makes usage of the :meth:`.get_bind` method for resolution. :param bind: Optional :class:`.Engine` to be used as the bind. If this engine is already involved in an ongoing transaction, that connection will be used. This argument takes precedence over ``mapper``, ``clause``. :param mapper: Optional :func:`.mapper` mapped class, used to identify the appropriate bind. This argument takes precedence over ``clause``. :param clause: A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`, :func:`~.sql.expression.text`, etc.) which will be used to locate a bind, if a bind cannot otherwise be identified. :param close_with_result: Passed to :meth:`.Engine.connect`, indicating the :class:`.Connection` should be considered "single use", automatically closing when the first result set is closed. This flag only has an effect if this :class:`.Session` is configured with ``autocommit=True`` and does not already have a transaction in progress. :param execution_options: a dictionary of execution options that will be passed to :meth:`.Connection.execution_options`, **when the connection is first procured only**. If the connection is already present within the :class:`.Session`, a warning is emitted and the arguments are ignored. .. versionadded:: 0.9.9 .. seealso:: :ref:`session_transaction_isolation` :param \**kw: Additional keyword arguments are sent to :meth:`get_bind()`, allowing additional arguments to be passed to custom implementations of :meth:`get_bind`. """ if bind is None: bind = self.get_bind(mapper, clause=clause, **kw) return self._connection_for_bind(bind, close_with_result=close_with_result, execution_options=execution_options) def _connection_for_bind(self, engine, execution_options=None, **kw): if self.transaction is not None: return self.transaction._connection_for_bind( engine, execution_options) else: conn = engine.contextual_connect(**kw) if execution_options: conn = conn.execution_options(**execution_options) return conn def execute(self, clause, params=None, mapper=None, bind=None, **kw): """Execute a SQL expression construct or string statement within the current transaction. Returns a :class:`.ResultProxy` representing results of the statement execution, in the same manner as that of an :class:`.Engine` or :class:`.Connection`. E.g.:: result = session.execute( user_table.select().where(user_table.c.id == 5) ) :meth:`~.Session.execute` accepts any executable clause construct, such as :func:`~.sql.expression.select`, :func:`~.sql.expression.insert`, :func:`~.sql.expression.update`, :func:`~.sql.expression.delete`, and :func:`~.sql.expression.text`. Plain SQL strings can be passed as well, which in the case of :meth:`.Session.execute` only will be interpreted the same as if it were passed via a :func:`~.expression.text` construct. That is, the following usage:: result = session.execute( "SELECT * FROM user WHERE id=:param", {"param":5} ) is equivalent to:: from sqlalchemy import text result = session.execute( text("SELECT * FROM user WHERE id=:param"), {"param":5} ) The second positional argument to :meth:`.Session.execute` is an optional parameter set. Similar to that of :meth:`.Connection.execute`, whether this is passed as a single dictionary, or a list of dictionaries, determines whether the DBAPI cursor's ``execute()`` or ``executemany()`` is used to execute the statement. An INSERT construct may be invoked for a single row:: result = session.execute( users.insert(), {"id": 7, "name": "somename"}) or for multiple rows:: result = session.execute(users.insert(), [ {"id": 7, "name": "somename7"}, {"id": 8, "name": "somename8"}, {"id": 9, "name": "somename9"} ]) The statement is executed within the current transactional context of this :class:`.Session`. The :class:`.Connection` which is used to execute the statement can also be acquired directly by calling the :meth:`.Session.connection` method. Both methods use a rule-based resolution scheme in order to determine the :class:`.Connection`, which in the average case is derived directly from the "bind" of the :class:`.Session` itself, and in other cases can be based on the :func:`.mapper` and :class:`.Table` objects passed to the method; see the documentation for :meth:`.Session.get_bind` for a full description of this scheme. The :meth:`.Session.execute` method does *not* invoke autoflush. The :class:`.ResultProxy` returned by the :meth:`.Session.execute` method is returned with the "close_with_result" flag set to true; the significance of this flag is that if this :class:`.Session` is autocommitting and does not have a transaction-dedicated :class:`.Connection` available, a temporary :class:`.Connection` is established for the statement execution, which is closed (meaning, returned to the connection pool) when the :class:`.ResultProxy` has consumed all available data. This applies *only* when the :class:`.Session` is configured with autocommit=True and no transaction has been started. :param clause: An executable statement (i.e. an :class:`.Executable` expression such as :func:`.expression.select`) or string SQL statement to be executed. :param params: Optional dictionary, or list of dictionaries, containing bound parameter values. If a single dictionary, single-row execution occurs; if a list of dictionaries, an "executemany" will be invoked. The keys in each dictionary must correspond to parameter names present in the statement. :param mapper: Optional :func:`.mapper` or mapped class, used to identify the appropriate bind. This argument takes precedence over ``clause`` when locating a bind. See :meth:`.Session.get_bind` for more details. :param bind: Optional :class:`.Engine` to be used as the bind. If this engine is already involved in an ongoing transaction, that connection will be used. This argument takes precedence over ``mapper`` and ``clause`` when locating a bind. :param \**kw: Additional keyword arguments are sent to :meth:`.Session.get_bind()` to allow extensibility of "bind" schemes. .. seealso:: :ref:`sqlexpression_toplevel` - Tutorial on using Core SQL constructs. :ref:`connections_toplevel` - Further information on direct statement execution. :meth:`.Connection.execute` - core level statement execution method, which is :meth:`.Session.execute` ultimately uses in order to execute the statement. """ clause = expression._literal_as_text(clause) if bind is None: bind = self.get_bind(mapper, clause=clause, **kw) return self._connection_for_bind( bind, close_with_result=True).execute(clause, params or {}) def scalar(self, clause, params=None, mapper=None, bind=None, **kw): """Like :meth:`~.Session.execute` but return a scalar result.""" return self.execute( clause, params=params, mapper=mapper, bind=bind, **kw).scalar() def close(self): """Close this Session. This clears all items and ends any transaction in progress. If this session were created with ``autocommit=False``, a new transaction is immediately begun. Note that this new transaction does not use any connection resources until they are first needed. """ self._close_impl(invalidate=False) def invalidate(self): """Close this Session, using connection invalidation. This is a variant of :meth:`.Session.close` that will additionally ensure that the :meth:`.Connection.invalidate` method will be called on all :class:`.Connection` objects. This can be called when the database is known to be in a state where the connections are no longer safe to be used. E.g.:: try: sess = Session() sess.add(User()) sess.commit() except gevent.Timeout: sess.invalidate() raise except: sess.rollback() raise This clears all items and ends any transaction in progress. If this session were created with ``autocommit=False``, a new transaction is immediately begun. Note that this new transaction does not use any connection resources until they are first needed. .. versionadded:: 0.9.9 """ self._close_impl(invalidate=True) def _close_impl(self, invalidate): self.expunge_all() if self.transaction is not None: for transaction in self.transaction._iterate_self_and_parents(): transaction.close(invalidate) def expunge_all(self): """Remove all object instances from this ``Session``. This is equivalent to calling ``expunge(obj)`` on all objects in this ``Session``. """ all_states = self.identity_map.all_states() + list(self._new) self.identity_map = self._identity_cls() self._new = {} self._deleted = {} statelib.InstanceState._detach_states( all_states, self ) def _add_bind(self, key, bind): try: insp = inspect(key) except sa_exc.NoInspectionAvailable: if not isinstance(key, type): raise exc.ArgumentError( "Not acceptable bind target: %s" % key) else: self.__binds[key] = bind else: if insp.is_selectable: self.__binds[insp] = bind elif insp.is_mapper: self.__binds[insp.class_] = bind for selectable in insp._all_tables: self.__binds[selectable] = bind else: raise exc.ArgumentError( "Not acceptable bind target: %s" % key) def bind_mapper(self, mapper, bind): """Associate a :class:`.Mapper` with a "bind", e.g. a :class:`.Engine` or :class:`.Connection`. The given mapper is added to a lookup used by the :meth:`.Session.get_bind` method. """ self._add_bind(mapper, bind) def bind_table(self, table, bind): """Associate a :class:`.Table` with a "bind", e.g. a :class:`.Engine` or :class:`.Connection`. The given mapper is added to a lookup used by the :meth:`.Session.get_bind` method. """ self._add_bind(table, bind) def get_bind(self, mapper=None, clause=None): """Return a "bind" to which this :class:`.Session` is bound. The "bind" is usually an instance of :class:`.Engine`, except in the case where the :class:`.Session` has been explicitly bound directly to a :class:`.Connection`. For a multiply-bound or unbound :class:`.Session`, the ``mapper`` or ``clause`` arguments are used to determine the appropriate bind to return. Note that the "mapper" argument is usually present when :meth:`.Session.get_bind` is called via an ORM operation such as a :meth:`.Session.query`, each individual INSERT/UPDATE/DELETE operation within a :meth:`.Session.flush`, call, etc. The order of resolution is: 1. if mapper given and session.binds is present, locate a bind based on mapper. 2. if clause given and session.binds is present, locate a bind based on :class:`.Table` objects found in the given clause present in session.binds. 3. if session.bind is present, return that. 4. if clause given, attempt to return a bind linked to the :class:`.MetaData` ultimately associated with the clause. 5. if mapper given, attempt to return a bind linked to the :class:`.MetaData` ultimately associated with the :class:`.Table` or other selectable to which the mapper is mapped. 6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError` is raised. :param mapper: Optional :func:`.mapper` mapped class or instance of :class:`.Mapper`. The bind can be derived from a :class:`.Mapper` first by consulting the "binds" map associated with this :class:`.Session`, and secondly by consulting the :class:`.MetaData` associated with the :class:`.Table` to which the :class:`.Mapper` is mapped for a bind. :param clause: A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`, :func:`~.sql.expression.text`, etc.). If the ``mapper`` argument is not present or could not produce a bind, the given expression construct will be searched for a bound element, typically a :class:`.Table` associated with bound :class:`.MetaData`. """ if mapper is clause is None: if self.bind: return self.bind else: raise sa_exc.UnboundExecutionError( "This session is not bound to a single Engine or " "Connection, and no context was provided to locate " "a binding.") if mapper is not None: try: mapper = inspect(mapper) except sa_exc.NoInspectionAvailable: if isinstance(mapper, type): raise exc.UnmappedClassError(mapper) else: raise if self.__binds: if mapper: for cls in mapper.class_.__mro__: if cls in self.__binds: return self.__binds[cls] if clause is None: clause = mapper.mapped_table if clause is not None: for t in sql_util.find_tables(clause, include_crud=True): if t in self.__binds: return self.__binds[t] if self.bind: return self.bind if isinstance(clause, sql.expression.ClauseElement) and clause.bind: return clause.bind if mapper and mapper.mapped_table.bind: return mapper.mapped_table.bind context = [] if mapper is not None: context.append('mapper %s' % mapper) if clause is not None: context.append('SQL expression') raise sa_exc.UnboundExecutionError( "Could not locate a bind configured on %s or this Session" % ( ', '.join(context))) def query(self, *entities, **kwargs): """Return a new :class:`.Query` object corresponding to this :class:`.Session`.""" return self._query_cls(entities, self, **kwargs) @property @util.contextmanager def no_autoflush(self): """Return a context manager that disables autoflush. e.g.:: with session.no_autoflush: some_object = SomeClass() session.add(some_object) # won't autoflush some_object.related_thing = session.query(SomeRelated).first() Operations that proceed within the ``with:`` block will not be subject to flushes occurring upon query access. This is useful when initializing a series of objects which involve existing database queries, where the uncompleted object should not yet be flushed. .. versionadded:: 0.7.6 """ autoflush = self.autoflush self.autoflush = False yield self self.autoflush = autoflush def _autoflush(self): if self.autoflush and not self._flushing: try: self.flush() except sa_exc.StatementError as e: # note we are reraising StatementError as opposed to # raising FlushError with "chaining" to remain compatible # with code that catches StatementError, IntegrityError, # etc. e.add_detail( "raised as a result of Query-invoked autoflush; " "consider using a session.no_autoflush block if this " "flush is occurring prematurely") util.raise_from_cause(e) def refresh(self, instance, attribute_names=None, lockmode=None): """Expire and refresh the attributes on the given instance. A query will be issued to the database and all attributes will be refreshed with their current database value. Lazy-loaded relational attributes will remain lazily loaded, so that the instance-wide refresh operation will be followed immediately by the lazy load of that attribute. Eagerly-loaded relational attributes will eagerly load within the single refresh operation. Note that a highly isolated transaction will return the same values as were previously read in that same transaction, regardless of changes in database state outside of that transaction - usage of :meth:`~Session.refresh` usually only makes sense if non-ORM SQL statement were emitted in the ongoing transaction, or if autocommit mode is turned on. :param attribute_names: optional. An iterable collection of string attribute names indicating a subset of attributes to be refreshed. :param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query` as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`. .. seealso:: :ref:`session_expire` - introductory material :meth:`.Session.expire` :meth:`.Session.expire_all` """ try: state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) self._expire_state(state, attribute_names) if loading.load_on_ident( self.query(object_mapper(instance)), state.key, refresh_state=state, lockmode=lockmode, only_load_props=attribute_names) is None: raise sa_exc.InvalidRequestError( "Could not refresh instance '%s'" % instance_str(instance)) def expire_all(self): """Expires all persistent instances within this Session. When any attributes on a persistent instance is next accessed, a query will be issued using the :class:`.Session` object's current transactional context in order to load all expired attributes for the given instance. Note that a highly isolated transaction will return the same values as were previously read in that same transaction, regardless of changes in database state outside of that transaction. To expire individual objects and individual attributes on those objects, use :meth:`Session.expire`. The :class:`.Session` object's default behavior is to expire all state whenever the :meth:`Session.rollback` or :meth:`Session.commit` methods are called, so that new state can be loaded for the new transaction. For this reason, calling :meth:`Session.expire_all` should not be needed when autocommit is ``False``, assuming the transaction is isolated. .. seealso:: :ref:`session_expire` - introductory material :meth:`.Session.expire` :meth:`.Session.refresh` """ for state in self.identity_map.all_states(): state._expire(state.dict, self.identity_map._modified) def expire(self, instance, attribute_names=None): """Expire the attributes on an instance. Marks the attributes of an instance as out of date. When an expired attribute is next accessed, a query will be issued to the :class:`.Session` object's current transactional context in order to load all expired attributes for the given instance. Note that a highly isolated transaction will return the same values as were previously read in that same transaction, regardless of changes in database state outside of that transaction. To expire all objects in the :class:`.Session` simultaneously, use :meth:`Session.expire_all`. The :class:`.Session` object's default behavior is to expire all state whenever the :meth:`Session.rollback` or :meth:`Session.commit` methods are called, so that new state can be loaded for the new transaction. For this reason, calling :meth:`Session.expire` only makes sense for the specific case that a non-ORM SQL statement was emitted in the current transaction. :param instance: The instance to be refreshed. :param attribute_names: optional list of string attribute names indicating a subset of attributes to be expired. .. seealso:: :ref:`session_expire` - introductory material :meth:`.Session.expire` :meth:`.Session.refresh` """ try: state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) self._expire_state(state, attribute_names) def _expire_state(self, state, attribute_names): self._validate_persistent(state) if attribute_names: state._expire_attributes(state.dict, attribute_names) else: # pre-fetch the full cascade since the expire is going to # remove associations cascaded = list(state.manager.mapper.cascade_iterator( 'refresh-expire', state)) self._conditional_expire(state) for o, m, st_, dct_ in cascaded: self._conditional_expire(st_) def _conditional_expire(self, state): """Expire a state if persistent, else expunge if pending""" if state.key: state._expire(state.dict, self.identity_map._modified) elif state in self._new: self._new.pop(state) state._detach(self) @util.deprecated("0.7", "The non-weak-referencing identity map " "feature is no longer needed.") def prune(self): """Remove unreferenced instances cached in the identity map. Note that this method is only meaningful if "weak_identity_map" is set to False. The default weak identity map is self-pruning. Removes any object in this Session's identity map that is not referenced in user code, modified, new or scheduled for deletion. Returns the number of objects pruned. """ return self.identity_map.prune() def expunge(self, instance): """Remove the `instance` from this ``Session``. This will free all internal references to the instance. Cascading will be applied according to the *expunge* cascade rule. """ try: state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) if state.session_id is not self.hash_key: raise sa_exc.InvalidRequestError( "Instance %s is not present in this Session" % state_str(state)) cascaded = list(state.manager.mapper.cascade_iterator( 'expunge', state)) self._expunge_states( [state] + [st_ for o, m, st_, dct_ in cascaded] ) def _expunge_states(self, states, to_transient=False): for state in states: if state in self._new: self._new.pop(state) elif self.identity_map.contains_state(state): self.identity_map.safe_discard(state) self._deleted.pop(state, None) elif self.transaction: # state is "detached" from being deleted, but still present # in the transaction snapshot self.transaction._deleted.pop(state, None) statelib.InstanceState._detach_states( states, self, to_transient=to_transient) def _register_newly_persistent(self, states): pending_to_persistent = self.dispatch.pending_to_persistent or None for state in states: mapper = _state_mapper(state) # prevent against last minute dereferences of the object obj = state.obj() if obj is not None: instance_key = mapper._identity_key_from_state(state) if _none_set.intersection(instance_key[1]) and \ not mapper.allow_partial_pks or \ _none_set.issuperset(instance_key[1]): raise exc.FlushError( "Instance %s has a NULL identity key. If this is an " "auto-generated value, check that the database table " "allows generation of new primary key values, and " "that the mapped Column object is configured to " "expect these generated values. Ensure also that " "this flush() is not occurring at an inappropriate " "time, such aswithin a load() event." % state_str(state) ) if state.key is None: state.key = instance_key elif state.key != instance_key: # primary key switch. use safe_discard() in case another # state has already replaced this one in the identity # map (see test/orm/test_naturalpks.py ReversePKsTest) self.identity_map.safe_discard(state) if state in self.transaction._key_switches: orig_key = self.transaction._key_switches[state][0] else: orig_key = state.key self.transaction._key_switches[state] = ( orig_key, instance_key) state.key = instance_key self.identity_map.replace(state) statelib.InstanceState._commit_all_states( ((state, state.dict) for state in states), self.identity_map ) self._register_altered(states) if pending_to_persistent is not None: for state in states: pending_to_persistent(self, state.obj()) # remove from new last, might be the last strong ref for state in set(states).intersection(self._new): self._new.pop(state) def _register_altered(self, states): if self._enable_transaction_accounting and self.transaction: for state in states: if state in self._new: self.transaction._new[state] = True else: self.transaction._dirty[state] = True def _remove_newly_deleted(self, states): persistent_to_deleted = self.dispatch.persistent_to_deleted or None for state in states: if self._enable_transaction_accounting and self.transaction: self.transaction._deleted[state] = True self.identity_map.safe_discard(state) self._deleted.pop(state, None) state._deleted = True # can't call state._detach() here, because this state # is still in the transaction snapshot and needs to be # tracked as part of that if persistent_to_deleted is not None: persistent_to_deleted(self, state.obj()) def add(self, instance, _warn=True): """Place an object in the ``Session``. Its state will be persisted to the database on the next flush operation. Repeated calls to ``add()`` will be ignored. The opposite of ``add()`` is ``expunge()``. """ if _warn and self._warn_on_events: self._flush_warning("Session.add()") try: state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) self._save_or_update_state(state) def add_all(self, instances): """Add the given collection of instances to this ``Session``.""" if self._warn_on_events: self._flush_warning("Session.add_all()") for instance in instances: self.add(instance, _warn=False) def _save_or_update_state(self, state): self._save_or_update_impl(state) mapper = _state_mapper(state) for o, m, st_, dct_ in mapper.cascade_iterator( 'save-update', state, halt_on=self._contains_state): self._save_or_update_impl(st_) def delete(self, instance): """Mark an instance as deleted. The database delete operation occurs upon ``flush()``. """ if self._warn_on_events: self._flush_warning("Session.delete()") try: state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) self._delete_impl(state, instance, head=True) def _delete_impl(self, state, obj, head): if state.key is None: if head: raise sa_exc.InvalidRequestError( "Instance '%s' is not persisted" % state_str(state)) else: return to_attach = self._before_attach(state, obj) if state in self._deleted: return if to_attach: self.identity_map.add(state) self._after_attach(state, obj) if head: # grab the cascades before adding the item to the deleted list # so that autoflush does not delete the item # the strong reference to the instance itself is significant here cascade_states = list(state.manager.mapper.cascade_iterator( 'delete', state)) self._deleted[state] = obj if head: for o, m, st_, dct_ in cascade_states: self._delete_impl(st_, o, False) def merge(self, instance, load=True): """Copy the state of a given instance into a corresponding instance within this :class:`.Session`. :meth:`.Session.merge` examines the primary key attributes of the source instance, and attempts to reconcile it with an instance of the same primary key in the session. If not found locally, it attempts to load the object from the database based on primary key, and if none can be located, creates a new instance. The state of each attribute on the source instance is then copied to the target instance. The resulting target instance is then returned by the method; the original source instance is left unmodified, and un-associated with the :class:`.Session` if not already. This operation cascades to associated instances if the association is mapped with ``cascade="merge"``. See :ref:`unitofwork_merging` for a detailed discussion of merging. .. versionchanged:: 1.1 - :meth:`.Session.merge` will now reconcile pending objects with overlapping primary keys in the same way as persistent. See :ref:`change_3601` for discussion. :param instance: Instance to be merged. :param load: Boolean, when False, :meth:`.merge` switches into a "high performance" mode which causes it to forego emitting history events as well as all database access. This flag is used for cases such as transferring graphs of objects into a :class:`.Session` from a second level cache, or to transfer just-loaded objects into the :class:`.Session` owned by a worker thread or process without re-querying the database. The ``load=False`` use case adds the caveat that the given object has to be in a "clean" state, that is, has no pending changes to be flushed - even if the incoming object is detached from any :class:`.Session`. This is so that when the merge operation populates local attributes and cascades to related objects and collections, the values can be "stamped" onto the target object as is, without generating any history or attribute events, and without the need to reconcile the incoming data with any existing related objects or collections that might not be loaded. The resulting objects from ``load=False`` are always produced as "clean", so it is only appropriate that the given objects should be "clean" as well, else this suggests a mis-use of the method. """ if self._warn_on_events: self._flush_warning("Session.merge()") _recursive = {} _resolve_conflict_map = {} if load: # flush current contents if we expect to load data self._autoflush() object_mapper(instance) # verify mapped autoflush = self.autoflush try: self.autoflush = False return self._merge( attributes.instance_state(instance), attributes.instance_dict(instance), load=load, _recursive=_recursive, _resolve_conflict_map=_resolve_conflict_map) finally: self.autoflush = autoflush def _merge(self, state, state_dict, load=True, _recursive=None, _resolve_conflict_map=None): mapper = _state_mapper(state) if state in _recursive: return _recursive[state] new_instance = False key = state.key if key is None: if not load: raise sa_exc.InvalidRequestError( "merge() with load=False option does not support " "objects transient (i.e. unpersisted) objects. flush() " "all changes on mapped instances before merging with " "load=False.") key = mapper._identity_key_from_state(state) key_is_persistent = attributes.NEVER_SET not in key[1] else: key_is_persistent = True if key in self.identity_map: merged = self.identity_map[key] elif key_is_persistent and key in _resolve_conflict_map: merged = _resolve_conflict_map[key] elif not load: if state.modified: raise sa_exc.InvalidRequestError( "merge() with load=False option does not support " "objects marked as 'dirty'. flush() all changes on " "mapped instances before merging with load=False.") merged = mapper.class_manager.new_instance() merged_state = attributes.instance_state(merged) merged_state.key = key self._update_impl(merged_state) new_instance = True elif key_is_persistent and ( not _none_set.intersection(key[1]) or (mapper.allow_partial_pks and not _none_set.issuperset(key[1]))): merged = self.query(mapper.class_).get(key[1]) else: merged = None if merged is None: merged = mapper.class_manager.new_instance() merged_state = attributes.instance_state(merged) merged_dict = attributes.instance_dict(merged) new_instance = True self._save_or_update_state(merged_state) else: merged_state = attributes.instance_state(merged) merged_dict = attributes.instance_dict(merged) _recursive[state] = merged _resolve_conflict_map[key] = merged # check that we didn't just pull the exact same # state out. if state is not merged_state: # version check if applicable if mapper.version_id_col is not None: existing_version = mapper._get_state_attr_by_column( state, state_dict, mapper.version_id_col, passive=attributes.PASSIVE_NO_INITIALIZE) merged_version = mapper._get_state_attr_by_column( merged_state, merged_dict, mapper.version_id_col, passive=attributes.PASSIVE_NO_INITIALIZE) if existing_version is not attributes.PASSIVE_NO_RESULT and \ merged_version is not attributes.PASSIVE_NO_RESULT and \ existing_version != merged_version: raise exc.StaleDataError( "Version id '%s' on merged state %s " "does not match existing version '%s'. " "Leave the version attribute unset when " "merging to update the most recent version." % ( existing_version, state_str(merged_state), merged_version )) merged_state.load_path = state.load_path merged_state.load_options = state.load_options # since we are copying load_options, we need to copy # the callables_ that would have been generated by those # load_options. # assumes that the callables we put in state.callables_ # are not instance-specific (which they should not be) merged_state._copy_callables(state) for prop in mapper.iterate_properties: prop.merge(self, state, state_dict, merged_state, merged_dict, load, _recursive, _resolve_conflict_map) if not load: # remove any history merged_state._commit_all(merged_dict, self.identity_map) if new_instance: merged_state.manager.dispatch.load(merged_state, None) return merged def _validate_persistent(self, state): if not self.identity_map.contains_state(state): raise sa_exc.InvalidRequestError( "Instance '%s' is not persistent within this Session" % state_str(state)) def _save_impl(self, state): if state.key is not None: raise sa_exc.InvalidRequestError( "Object '%s' already has an identity - " "it can't be registered as pending" % state_str(state)) obj = state.obj() to_attach = self._before_attach(state, obj) if state not in self._new: self._new[state] = obj state.insert_order = len(self._new) if to_attach: self._after_attach(state, obj) def _update_impl(self, state, revert_deletion=False): if state.key is None: raise sa_exc.InvalidRequestError( "Instance '%s' is not persisted" % state_str(state)) if state._deleted: if revert_deletion: if not state._attached: return del state._deleted else: raise sa_exc.InvalidRequestError( "Instance '%s' has been deleted. " "Use the make_transient() " "function to send this object back " "to the transient state." % state_str(state) ) obj = state.obj() to_attach = self._before_attach(state, obj) self._deleted.pop(state, None) if revert_deletion: self.identity_map.replace(state) else: self.identity_map.add(state) if to_attach: self._after_attach(state, obj) elif revert_deletion: self.dispatch.deleted_to_persistent(self, obj) def _save_or_update_impl(self, state): if state.key is None: self._save_impl(state) else: self._update_impl(state) def enable_relationship_loading(self, obj): """Associate an object with this :class:`.Session` for related object loading. .. warning:: :meth:`.enable_relationship_loading` exists to serve special use cases and is not recommended for general use. Accesses of attributes mapped with :func:`.relationship` will attempt to load a value from the database using this :class:`.Session` as the source of connectivity. The values will be loaded based on foreign key values present on this object - it follows that this functionality generally only works for many-to-one-relationships. The object will be attached to this session, but will **not** participate in any persistence operations; its state for almost all purposes will remain either "transient" or "detached", except for the case of relationship loading. Also note that backrefs will often not work as expected. Altering a relationship-bound attribute on the target object may not fire off a backref event, if the effective value is what was already loaded from a foreign-key-holding value. The :meth:`.Session.enable_relationship_loading` method is similar to the ``load_on_pending`` flag on :func:`.relationship`. Unlike that flag, :meth:`.Session.enable_relationship_loading` allows an object to remain transient while still being able to load related items. To make a transient object associated with a :class:`.Session` via :meth:`.Session.enable_relationship_loading` pending, add it to the :class:`.Session` using :meth:`.Session.add` normally. :meth:`.Session.enable_relationship_loading` does not improve behavior when the ORM is used normally - object references should be constructed at the object level, not at the foreign key level, so that they are present in an ordinary way before flush() proceeds. This method is not intended for general use. .. versionadded:: 0.8 .. seealso:: ``load_on_pending`` at :func:`.relationship` - this flag allows per-relationship loading of many-to-ones on items that are pending. """ state = attributes.instance_state(obj) to_attach = self._before_attach(state, obj) state._load_pending = True if to_attach: self._after_attach(state, obj) def _before_attach(self, state, obj): if state.session_id == self.hash_key: return False if state.session_id and state.session_id in _sessions: raise sa_exc.InvalidRequestError( "Object '%s' is already attached to session '%s' " "(this is '%s')" % (state_str(state), state.session_id, self.hash_key)) self.dispatch.before_attach(self, obj) return True def _after_attach(self, state, obj): state.session_id = self.hash_key if state.modified and state._strong_obj is None: state._strong_obj = obj self.dispatch.after_attach(self, obj) if state.key: self.dispatch.detached_to_persistent(self, obj) else: self.dispatch.transient_to_pending(self, obj) def __contains__(self, instance): """Return True if the instance is associated with this session. The instance may be pending or persistent within the Session for a result of True. """ try: state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) return self._contains_state(state) def __iter__(self): """Iterate over all pending or persistent instances within this Session. """ return iter( list(self._new.values()) + list(self.identity_map.values())) def _contains_state(self, state): return state in self._new or self.identity_map.contains_state(state) def flush(self, objects=None): """Flush all the object changes to the database. Writes out all pending object creations, deletions and modifications to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are automatically ordered by the Session's unit of work dependency solver. Database operations will be issued in the current transactional context and do not affect the state of the transaction, unless an error occurs, in which case the entire transaction is rolled back. You may flush() as often as you like within a transaction to move changes from Python to the database's transaction buffer. For ``autocommit`` Sessions with no active manual transaction, flush() will create a transaction on the fly that surrounds the entire set of operations into the flush. :param objects: Optional; restricts the flush operation to operate only on elements that are in the given collection. This feature is for an extremely narrow set of use cases where particular objects may need to be operated upon before the full flush() occurs. It is not intended for general use. """ if self._flushing: raise sa_exc.InvalidRequestError("Session is already flushing") if self._is_clean(): return try: self._flushing = True self._flush(objects) finally: self._flushing = False def _flush_warning(self, method): util.warn( "Usage of the '%s' operation is not currently supported " "within the execution stage of the flush process. " "Results may not be consistent. Consider using alternative " "event listeners or connection-level operations instead." % method) def _is_clean(self): return not self.identity_map.check_modified() and \ not self._deleted and \ not self._new def _flush(self, objects=None): dirty = self._dirty_states if not dirty and not self._deleted and not self._new: self.identity_map._modified.clear() return flush_context = UOWTransaction(self) if self.dispatch.before_flush: self.dispatch.before_flush(self, flush_context, objects) # re-establish "dirty states" in case the listeners # added dirty = self._dirty_states deleted = set(self._deleted) new = set(self._new) dirty = set(dirty).difference(deleted) # create the set of all objects we want to operate upon if objects: # specific list passed in objset = set() for o in objects: try: state = attributes.instance_state(o) except exc.NO_STATE: raise exc.UnmappedInstanceError(o) objset.add(state) else: objset = None # store objects whose fate has been decided processed = set() # put all saves/updates into the flush context. detect top-level # orphans and throw them into deleted. if objset: proc = new.union(dirty).intersection(objset).difference(deleted) else: proc = new.union(dirty).difference(deleted) for state in proc: is_orphan = ( _state_mapper(state)._is_orphan(state) and state.has_identity) flush_context.register_object(state, isdelete=is_orphan) processed.add(state) # put all remaining deletes into the flush context. if objset: proc = deleted.intersection(objset).difference(processed) else: proc = deleted.difference(processed) for state in proc: flush_context.register_object(state, isdelete=True) if not flush_context.has_work: return flush_context.transaction = transaction = self.begin( subtransactions=True) try: self._warn_on_events = True try: flush_context.execute() finally: self._warn_on_events = False self.dispatch.after_flush(self, flush_context) flush_context.finalize_flush_changes() if not objects and self.identity_map._modified: len_ = len(self.identity_map._modified) statelib.InstanceState._commit_all_states( [(state, state.dict) for state in self.identity_map._modified], instance_dict=self.identity_map) util.warn("Attribute history events accumulated on %d " "previously clean instances " "within inner-flush event handlers have been " "reset, and will not result in database updates. " "Consider using set_committed_value() within " "inner-flush event handlers to avoid this warning." % len_) # useful assertions: # if not objects: # assert not self.identity_map._modified # else: # assert self.identity_map._modified == \ # self.identity_map._modified.difference(objects) self.dispatch.after_flush_postexec(self, flush_context) transaction.commit() except: with util.safe_reraise(): transaction.rollback(_capture_exception=True) def bulk_save_objects( self, objects, return_defaults=False, update_changed_only=True): """Perform a bulk save of the given list of objects. The bulk save feature allows mapped objects to be used as the source of simple INSERT and UPDATE operations which can be more easily grouped together into higher performing "executemany" operations; the extraction of data from the objects is also performed using a lower-latency process that ignores whether or not attributes have actually been modified in the case of UPDATEs, and also ignores SQL expressions. The objects as given are not added to the session and no additional state is established on them, unless the ``return_defaults`` flag is also set, in which case primary key attributes and server-side default values will be populated. .. versionadded:: 1.0.0 .. warning:: The bulk save feature allows for a lower-latency INSERT/UPDATE of rows at the expense of most other unit-of-work features. Features such as object management, relationship handling, and SQL clause support are **silently omitted** in favor of raw INSERT/UPDATES of records. **Please read the list of caveats at** :ref:`bulk_operations` **before using this method, and fully test and confirm the functionality of all code developed using these systems.** :param objects: a list of mapped object instances. The mapped objects are persisted as is, and are **not** associated with the :class:`.Session` afterwards. For each object, whether the object is sent as an INSERT or an UPDATE is dependent on the same rules used by the :class:`.Session` in traditional operation; if the object has the :attr:`.InstanceState.key` attribute set, then the object is assumed to be "detached" and will result in an UPDATE. Otherwise, an INSERT is used. In the case of an UPDATE, statements are grouped based on which attributes have changed, and are thus to be the subject of each SET clause. If ``update_changed_only`` is False, then all attributes present within each object are applied to the UPDATE statement, which may help in allowing the statements to be grouped together into a larger executemany(), and will also reduce the overhead of checking history on attributes. :param return_defaults: when True, rows that are missing values which generate defaults, namely integer primary key defaults and sequences, will be inserted **one at a time**, so that the primary key value is available. In particular this will allow joined-inheritance and other multi-table mappings to insert correctly without the need to provide primary key values ahead of time; however, :paramref:`.Session.bulk_save_objects.return_defaults` **greatly reduces the performance gains** of the method overall. :param update_changed_only: when True, UPDATE statements are rendered based on those attributes in each state that have logged changes. When False, all attributes present are rendered into the SET clause with the exception of primary key attributes. .. seealso:: :ref:`bulk_operations` :meth:`.Session.bulk_insert_mappings` :meth:`.Session.bulk_update_mappings` """ for (mapper, isupdate), states in itertools.groupby( (attributes.instance_state(obj) for obj in objects), lambda state: (state.mapper, state.key is not None) ): self._bulk_save_mappings( mapper, states, isupdate, True, return_defaults, update_changed_only, False) def bulk_insert_mappings( self, mapper, mappings, return_defaults=False, render_nulls=False): """Perform a bulk insert of the given list of mapping dictionaries. The bulk insert feature allows plain Python dictionaries to be used as the source of simple INSERT operations which can be more easily grouped together into higher performing "executemany" operations. Using dictionaries, there is no "history" or session state management features in use, reducing latency when inserting large numbers of simple rows. The values within the dictionaries as given are typically passed without modification into Core :meth:`.Insert` constructs, after organizing the values within them across the tables to which the given mapper is mapped. .. versionadded:: 1.0.0 .. warning:: The bulk insert feature allows for a lower-latency INSERT of rows at the expense of most other unit-of-work features. Features such as object management, relationship handling, and SQL clause support are **silently omitted** in favor of raw INSERT of records. **Please read the list of caveats at** :ref:`bulk_operations` **before using this method, and fully test and confirm the functionality of all code developed using these systems.** :param mapper: a mapped class, or the actual :class:`.Mapper` object, representing the single kind of object represented within the mapping list. :param mappings: a list of dictionaries, each one containing the state of the mapped row to be inserted, in terms of the attribute names on the mapped class. If the mapping refers to multiple tables, such as a joined-inheritance mapping, each dictionary must contain all keys to be populated into all tables. :param return_defaults: when True, rows that are missing values which generate defaults, namely integer primary key defaults and sequences, will be inserted **one at a time**, so that the primary key value is available. In particular this will allow joined-inheritance and other multi-table mappings to insert correctly without the need to provide primary key values ahead of time; however, :paramref:`.Session.bulk_insert_mappings.return_defaults` **greatly reduces the performance gains** of the method overall. If the rows to be inserted only refer to a single table, then there is no reason this flag should be set as the returned default information is not used. :param render_nulls: When True, a value of ``None`` will result in a NULL value being included in the INSERT statement, rather than the column being omitted from the INSERT. This allows all the rows being INSERTed to have the identical set of columns which allows the full set of rows to be batched to the DBAPI. Normally, each column-set that contains a different combination of NULL values than the previous row must omit a different series of columns from the rendered INSERT statement, which means it must be emitted as a separate statement. By passing this flag, the full set of rows are guaranteed to be batchable into one batch; the cost however is that server-side defaults which are invoked by an omitted column will be skipped, so care must be taken to ensure that these are not necessary. .. warning:: When this flag is set, **server side default SQL values will not be invoked** for those columns that are inserted as NULL; the NULL value will be sent explicitly. Care must be taken to ensure that no server-side default functions need to be invoked for the operation as a whole. .. versionadded:: 1.1 .. seealso:: :ref:`bulk_operations` :meth:`.Session.bulk_save_objects` :meth:`.Session.bulk_update_mappings` """ self._bulk_save_mappings( mapper, mappings, False, False, return_defaults, False, render_nulls) def bulk_update_mappings(self, mapper, mappings): """Perform a bulk update of the given list of mapping dictionaries. The bulk update feature allows plain Python dictionaries to be used as the source of simple UPDATE operations which can be more easily grouped together into higher performing "executemany" operations. Using dictionaries, there is no "history" or session state management features in use, reducing latency when updating large numbers of simple rows. .. versionadded:: 1.0.0 .. warning:: The bulk update feature allows for a lower-latency UPDATE of rows at the expense of most other unit-of-work features. Features such as object management, relationship handling, and SQL clause support are **silently omitted** in favor of raw UPDATES of records. **Please read the list of caveats at** :ref:`bulk_operations` **before using this method, and fully test and confirm the functionality of all code developed using these systems.** :param mapper: a mapped class, or the actual :class:`.Mapper` object, representing the single kind of object represented within the mapping list. :param mappings: a list of dictionaries, each one containing the state of the mapped row to be updated, in terms of the attribute names on the mapped class. If the mapping refers to multiple tables, such as a joined-inheritance mapping, each dictionary may contain keys corresponding to all tables. All those keys which are present and are not part of the primary key are applied to the SET clause of the UPDATE statement; the primary key values, which are required, are applied to the WHERE clause. .. seealso:: :ref:`bulk_operations` :meth:`.Session.bulk_insert_mappings` :meth:`.Session.bulk_save_objects` """ self._bulk_save_mappings( mapper, mappings, True, False, False, False, False) def _bulk_save_mappings( self, mapper, mappings, isupdate, isstates, return_defaults, update_changed_only, render_nulls): mapper = _class_to_mapper(mapper) self._flushing = True transaction = self.begin( subtransactions=True) try: if isupdate: persistence._bulk_update( mapper, mappings, transaction, isstates, update_changed_only) else: persistence._bulk_insert( mapper, mappings, transaction, isstates, return_defaults, render_nulls) transaction.commit() except: with util.safe_reraise(): transaction.rollback(_capture_exception=True) finally: self._flushing = False def is_modified(self, instance, include_collections=True, passive=True): """Return ``True`` if the given instance has locally modified attributes. This method retrieves the history for each instrumented attribute on the instance and performs a comparison of the current value to its previously committed value, if any. It is in effect a more expensive and accurate version of checking for the given instance in the :attr:`.Session.dirty` collection; a full test for each attribute's net "dirty" status is performed. E.g.:: return session.is_modified(someobject) .. versionchanged:: 0.8 When using SQLAlchemy 0.7 and earlier, the ``passive`` flag should **always** be explicitly set to ``True``, else SQL loads/autoflushes may proceed which can affect the modified state itself: ``session.is_modified(someobject, passive=True)``\ . In 0.8 and above, the behavior is corrected and this flag is ignored. A few caveats to this method apply: * Instances present in the :attr:`.Session.dirty` collection may report ``False`` when tested with this method. This is because the object may have received change events via attribute mutation, thus placing it in :attr:`.Session.dirty`, but ultimately the state is the same as that loaded from the database, resulting in no net change here. * Scalar attributes may not have recorded the previously set value when a new value was applied, if the attribute was not loaded, or was expired, at the time the new value was received - in these cases, the attribute is assumed to have a change, even if there is ultimately no net change against its database value. SQLAlchemy in most cases does not need the "old" value when a set event occurs, so it skips the expense of a SQL call if the old value isn't present, based on the assumption that an UPDATE of the scalar value is usually needed, and in those few cases where it isn't, is less expensive on average than issuing a defensive SELECT. The "old" value is fetched unconditionally upon set only if the attribute container has the ``active_history`` flag set to ``True``. This flag is set typically for primary key attributes and scalar object references that are not a simple many-to-one. To set this flag for any arbitrary mapped column, use the ``active_history`` argument with :func:`.column_property`. :param instance: mapped instance to be tested for pending changes. :param include_collections: Indicates if multivalued collections should be included in the operation. Setting this to ``False`` is a way to detect only local-column based properties (i.e. scalar columns or many-to-one foreign keys) that would result in an UPDATE for this instance upon flush. :param passive: .. versionchanged:: 0.8 Ignored for backwards compatibility. When using SQLAlchemy 0.7 and earlier, this flag should always be set to ``True``. """ state = object_state(instance) if not state.modified: return False dict_ = state.dict for attr in state.manager.attributes: if \ ( not include_collections and hasattr(attr.impl, 'get_collection') ) or not hasattr(attr.impl, 'get_history'): continue (added, unchanged, deleted) = \ attr.impl.get_history(state, dict_, passive=attributes.NO_CHANGE) if added or deleted: return True else: return False @property def is_active(self): """True if this :class:`.Session` is in "transaction mode" and is not in "partial rollback" state. The :class:`.Session` in its default mode of ``autocommit=False`` is essentially always in "transaction mode", in that a :class:`.SessionTransaction` is associated with it as soon as it is instantiated. This :class:`.SessionTransaction` is immediately replaced with a new one as soon as it is ended, due to a rollback, commit, or close operation. "Transaction mode" does *not* indicate whether or not actual database connection resources are in use; the :class:`.SessionTransaction` object coordinates among zero or more actual database transactions, and starts out with none, accumulating individual DBAPI connections as different data sources are used within its scope. The best way to track when a particular :class:`.Session` has actually begun to use DBAPI resources is to implement a listener using the :meth:`.SessionEvents.after_begin` method, which will deliver both the :class:`.Session` as well as the target :class:`.Connection` to a user-defined event listener. The "partial rollback" state refers to when an "inner" transaction, typically used during a flush, encounters an error and emits a rollback of the DBAPI connection. At this point, the :class:`.Session` is in "partial rollback" and awaits for the user to call :meth:`.Session.rollback`, in order to close out the transaction stack. It is in this "partial rollback" period that the :attr:`.is_active` flag returns False. After the call to :meth:`.Session.rollback`, the :class:`.SessionTransaction` is replaced with a new one and :attr:`.is_active` returns ``True`` again. When a :class:`.Session` is used in ``autocommit=True`` mode, the :class:`.SessionTransaction` is only instantiated within the scope of a flush call, or when :meth:`.Session.begin` is called. So :attr:`.is_active` will always be ``False`` outside of a flush or :meth:`.Session.begin` block in this mode, and will be ``True`` within the :meth:`.Session.begin` block as long as it doesn't enter "partial rollback" state. From all the above, it follows that the only purpose to this flag is for application frameworks that wish to detect is a "rollback" is necessary within a generic error handling routine, for :class:`.Session` objects that would otherwise be in "partial rollback" mode. In a typical integration case, this is also not necessary as it is standard practice to emit :meth:`.Session.rollback` unconditionally within the outermost exception catch. To track the transactional state of a :class:`.Session` fully, use event listeners, primarily the :meth:`.SessionEvents.after_begin`, :meth:`.SessionEvents.after_commit`, :meth:`.SessionEvents.after_rollback` and related events. """ return self.transaction and self.transaction.is_active identity_map = None """A mapping of object identities to objects themselves. Iterating through ``Session.identity_map.values()`` provides access to the full set of persistent objects (i.e., those that have row identity) currently in the session. .. seealso:: :func:`.identity_key` - helper function to produce the keys used in this dictionary. """ @property def _dirty_states(self): """The set of all persistent states considered dirty. This method returns all states that were modified including those that were possibly deleted. """ return self.identity_map._dirty_states() @property def dirty(self): """The set of all persistent instances considered dirty. E.g.:: some_mapped_object in session.dirty Instances are considered dirty when they were modified but not deleted. Note that this 'dirty' calculation is 'optimistic'; most attribute-setting or collection modification operations will mark an instance as 'dirty' and place it in this set, even if there is no net change to the attribute's value. At flush time, the value of each attribute is compared to its previously saved value, and if there's no net change, no SQL operation will occur (this is a more expensive operation so it's only done at flush time). To check if an instance has actionable net changes to its attributes, use the :meth:`.Session.is_modified` method. """ return util.IdentitySet( [state.obj() for state in self._dirty_states if state not in self._deleted]) @property def deleted(self): "The set of all instances marked as 'deleted' within this ``Session``" return util.IdentitySet(list(self._deleted.values())) @property def new(self): "The set of all instances marked as 'new' within this ``Session``." return util.IdentitySet(list(self._new.values())) class sessionmaker(_SessionClassMethods): """A configurable :class:`.Session` factory. The :class:`.sessionmaker` factory generates new :class:`.Session` objects when called, creating them given the configurational arguments established here. e.g.:: # global scope Session = sessionmaker(autoflush=False) # later, in a local scope, create and use a session: sess = Session() Any keyword arguments sent to the constructor itself will override the "configured" keywords:: Session = sessionmaker() # bind an individual session to a connection sess = Session(bind=connection) The class also includes a method :meth:`.configure`, which can be used to specify additional keyword arguments to the factory, which will take effect for subsequent :class:`.Session` objects generated. This is usually used to associate one or more :class:`.Engine` objects with an existing :class:`.sessionmaker` factory before it is first used:: # application starts Session = sessionmaker() # ... later engine = create_engine('sqlite:///foo.db') Session.configure(bind=engine) sess = Session() .. seealso: :ref:`session_getting` - introductory text on creating sessions using :class:`.sessionmaker`. """ def __init__(self, bind=None, class_=Session, autoflush=True, autocommit=False, expire_on_commit=True, info=None, **kw): """Construct a new :class:`.sessionmaker`. All arguments here except for ``class_`` correspond to arguments accepted by :class:`.Session` directly. See the :meth:`.Session.__init__` docstring for more details on parameters. :param bind: a :class:`.Engine` or other :class:`.Connectable` with which newly created :class:`.Session` objects will be associated. :param class_: class to use in order to create new :class:`.Session` objects. Defaults to :class:`.Session`. :param autoflush: The autoflush setting to use with newly created :class:`.Session` objects. :param autocommit: The autocommit setting to use with newly created :class:`.Session` objects. :param expire_on_commit=True: the expire_on_commit setting to use with newly created :class:`.Session` objects. :param info: optional dictionary of information that will be available via :attr:`.Session.info`. Note this dictionary is *updated*, not replaced, when the ``info`` parameter is specified to the specific :class:`.Session` construction operation. .. versionadded:: 0.9.0 :param \**kw: all other keyword arguments are passed to the constructor of newly created :class:`.Session` objects. """ kw['bind'] = bind kw['autoflush'] = autoflush kw['autocommit'] = autocommit kw['expire_on_commit'] = expire_on_commit if info is not None: kw['info'] = info self.kw = kw # make our own subclass of the given class, so that # events can be associated with it specifically. self.class_ = type(class_.__name__, (class_,), {}) def __call__(self, **local_kw): """Produce a new :class:`.Session` object using the configuration established in this :class:`.sessionmaker`. In Python, the ``__call__`` method is invoked on an object when it is "called" in the same way as a function:: Session = sessionmaker() session = Session() # invokes sessionmaker.__call__() """ for k, v in self.kw.items(): if k == 'info' and 'info' in local_kw: d = v.copy() d.update(local_kw['info']) local_kw['info'] = d else: local_kw.setdefault(k, v) return self.class_(**local_kw) def configure(self, **new_kw): """(Re)configure the arguments for this sessionmaker. e.g.:: Session = sessionmaker() Session.configure(bind=create_engine('sqlite://')) """ self.kw.update(new_kw) def __repr__(self): return "%s(class_=%r,%s)" % ( self.__class__.__name__, self.class_.__name__, ", ".join("%s=%r" % (k, v) for k, v in self.kw.items()) ) def make_transient(instance): """Alter the state of the given instance so that it is :term:`transient`. .. note:: :func:`.make_transient` is a special-case function for advanced use cases only. The given mapped instance is assumed to be in the :term:`persistent` or :term:`detached` state. The function will remove its association with any :class:`.Session` as well as its :attr:`.InstanceState.identity`. The effect is that the object will behave as though it were newly constructed, except retaining any attribute / collection values that were loaded at the time of the call. The :attr:`.InstanceState.deleted` flag is also reset if this object had been deleted as a result of using :meth:`.Session.delete`. .. warning:: :func:`.make_transient` does **not** "unexpire" or otherwise eagerly load ORM-mapped attributes that are not currently loaded at the time the function is called. This includes attributes which: * were expired via :meth:`.Session.expire` * were expired as the natural effect of committing a session transaction, e.g. :meth:`.Session.commit` * are normally :term:`lazy loaded` but are not currently loaded * are "deferred" via :ref:`deferred` and are not yet loaded * were not present in the query which loaded this object, such as that which is common in joined table inheritance and other scenarios. After :func:`.make_transient` is called, unloaded attributes such as those above will normally resolve to the value ``None`` when accessed, or an empty collection for a collection-oriented attribute. As the object is transient and un-associated with any database identity, it will no longer retrieve these values. .. seealso:: :func:`.make_transient_to_detached` """ state = attributes.instance_state(instance) s = _state_session(state) if s: s._expunge_states([state]) # remove expired state state.expired_attributes.clear() # remove deferred callables if state.callables: del state.callables if state.key: del state.key if state._deleted: del state._deleted def make_transient_to_detached(instance): """Make the given transient instance :term:`detached`. .. note:: :func:`.make_transient_to_detached` is a special-case function for advanced use cases only. All attribute history on the given instance will be reset as though the instance were freshly loaded from a query. Missing attributes will be marked as expired. The primary key attributes of the object, which are required, will be made into the "key" of the instance. The object can then be added to a session, or merged possibly with the load=False flag, at which point it will look as if it were loaded that way, without emitting SQL. This is a special use case function that differs from a normal call to :meth:`.Session.merge` in that a given persistent state can be manufactured without any SQL calls. .. versionadded:: 0.9.5 .. seealso:: :func:`.make_transient` """ state = attributes.instance_state(instance) if state.session_id or state.key: raise sa_exc.InvalidRequestError( "Given object must be transient") state.key = state.mapper._identity_key_from_state(state) if state._deleted: del state._deleted state._commit_all(state.dict) state._expire_attributes(state.dict, state.unloaded) def object_session(instance): """Return the :class:`.Session` to which the given instance belongs. This is essentially the same as the :attr:`.InstanceState.session` accessor. See that attribute for details. """ try: state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) else: return _state_session(state) _new_sessionid = util.counter()
dcjohnson1989/selenium
refs/heads/master
py/test/selenium/test_prompts.py
65
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from selenium import selenium import unittest import time class TestPrompts(unittest.TestCase): def setUp(self): self.selenium = selenium("localhost", \ 4444, "*firefoxproxy", "http://www.w3schools.com") self.selenium.start() def test_alert(self): sel = self.selenium sel.open("/js/tryit.asp?filename=tryjs_alert") sel.select_frame("view") sel.click("css=input[value='Show alert box']") self.assertEqual(sel.get_alert(), "Hello! I am an alert box!") def test_confirm_accept(self): sel = self.selenium sel.open("/js/tryit.asp?filename=tryjs_confirm") sel.select_frame("view") sel.choose_ok_on_next_confirmation() sel.click("css=input[value='Show a confirm box']") self.assertEqual(sel.get_alert(), "You pressed OK!") def test_confirm_cancel(self): sel = self.selenium sel.open("/js/tryit.asp?filename=tryjs_confirm") sel.select_frame("view") sel.choose_ok_on_next_confirmation() sel.click("css=input[value='Show a confirm box']") self.assertEqual(sel.get_alert(), "You pressed OK!") def test_prompt(self): sel = self.selenium sel.open("/js/tryit.asp?filename=tryjs_prompt") sel.select_frame("view") sel.answer_on_next_prompt('Flying Monkey') sel.click("css=input[value='Show prompt box']") self.assertEqual(sel.get_html_source(), '<head></head><body>Hello Flying Monkey! How are you today?</body>') def tearDown(self): self.selenium.stop() if __name__ == "__main__": unittest.main()
lgscofield/odoo
refs/heads/8.0
addons/event_sale/__init__.py
428
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import event_sale
AutorestCI/azure-sdk-for-python
refs/heads/master
azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/models/application_insights_component_quota_status.py
2
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class ApplicationInsightsComponentQuotaStatus(Model): """An Application Insights component daily data volume cap status. Variables are only populated by the server, and will be ignored when sending a request. :ivar app_id: The Application ID for the Application Insights component. :vartype app_id: str :ivar should_be_throttled: The daily data volume cap is met, and data ingestion will be stopped. :vartype should_be_throttled: bool :ivar expiration_time: Date and time when the daily data volume cap will be reset, and data ingestion will resume. :vartype expiration_time: str """ _validation = { 'app_id': {'readonly': True}, 'should_be_throttled': {'readonly': True}, 'expiration_time': {'readonly': True}, } _attribute_map = { 'app_id': {'key': 'AppId', 'type': 'str'}, 'should_be_throttled': {'key': 'ShouldBeThrottled', 'type': 'bool'}, 'expiration_time': {'key': 'ExpirationTime', 'type': 'str'}, } def __init__(self): super(ApplicationInsightsComponentQuotaStatus, self).__init__() self.app_id = None self.should_be_throttled = None self.expiration_time = None
severb/flowy-website
refs/heads/master
js/Lib/unittest/test/testmock/testhelpers.py
4
import unittest from unittest.mock import ( call, _Call, create_autospec, MagicMock, Mock, ANY, _CallList, patch, PropertyMock ) from datetime import datetime class SomeClass(object): def one(self, a, b): pass def two(self): pass def three(self, a=None): pass class AnyTest(unittest.TestCase): def test_any(self): self.assertEqual(ANY, object()) mock = Mock() mock(ANY) mock.assert_called_with(ANY) mock = Mock() mock(foo=ANY) mock.assert_called_with(foo=ANY) def test_repr(self): self.assertEqual(repr(ANY), '<ANY>') self.assertEqual(str(ANY), '<ANY>') def test_any_and_datetime(self): mock = Mock() mock(datetime.now(), foo=datetime.now()) mock.assert_called_with(ANY, foo=ANY) def test_any_mock_calls_comparison_order(self): mock = Mock() d = datetime.now() class Foo(object): def __eq__(self, other): return False def __ne__(self, other): return True for d in datetime.now(), Foo(): mock.reset_mock() mock(d, foo=d, bar=d) mock.method(d, zinga=d, alpha=d) mock().method(a1=d, z99=d) expected = [ call(ANY, foo=ANY, bar=ANY), call.method(ANY, zinga=ANY, alpha=ANY), call(), call().method(a1=ANY, z99=ANY) ] self.assertEqual(expected, mock.mock_calls) self.assertEqual(mock.mock_calls, expected) class CallTest(unittest.TestCase): def test_call_with_call(self): kall = _Call() self.assertEqual(kall, _Call()) self.assertEqual(kall, _Call(('',))) self.assertEqual(kall, _Call(((),))) self.assertEqual(kall, _Call(({},))) self.assertEqual(kall, _Call(('', ()))) self.assertEqual(kall, _Call(('', {}))) self.assertEqual(kall, _Call(('', (), {}))) self.assertEqual(kall, _Call(('foo',))) self.assertEqual(kall, _Call(('bar', ()))) self.assertEqual(kall, _Call(('baz', {}))) self.assertEqual(kall, _Call(('spam', (), {}))) kall = _Call(((1, 2, 3),)) self.assertEqual(kall, _Call(((1, 2, 3),))) self.assertEqual(kall, _Call(('', (1, 2, 3)))) self.assertEqual(kall, _Call(((1, 2, 3), {}))) self.assertEqual(kall, _Call(('', (1, 2, 3), {}))) kall = _Call(((1, 2, 4),)) self.assertNotEqual(kall, _Call(('', (1, 2, 3)))) self.assertNotEqual(kall, _Call(('', (1, 2, 3), {}))) kall = _Call(('foo', (1, 2, 4),)) self.assertNotEqual(kall, _Call(('', (1, 2, 4)))) self.assertNotEqual(kall, _Call(('', (1, 2, 4), {}))) self.assertNotEqual(kall, _Call(('bar', (1, 2, 4)))) self.assertNotEqual(kall, _Call(('bar', (1, 2, 4), {}))) kall = _Call(({'a': 3},)) self.assertEqual(kall, _Call(('', (), {'a': 3}))) self.assertEqual(kall, _Call(('', {'a': 3}))) self.assertEqual(kall, _Call(((), {'a': 3}))) self.assertEqual(kall, _Call(({'a': 3},))) def test_empty__Call(self): args = _Call() self.assertEqual(args, ()) self.assertEqual(args, ('foo',)) self.assertEqual(args, ((),)) self.assertEqual(args, ('foo', ())) self.assertEqual(args, ('foo',(), {})) self.assertEqual(args, ('foo', {})) self.assertEqual(args, ({},)) def test_named_empty_call(self): args = _Call(('foo', (), {})) self.assertEqual(args, ('foo',)) self.assertEqual(args, ('foo', ())) self.assertEqual(args, ('foo',(), {})) self.assertEqual(args, ('foo', {})) self.assertNotEqual(args, ((),)) self.assertNotEqual(args, ()) self.assertNotEqual(args, ({},)) self.assertNotEqual(args, ('bar',)) self.assertNotEqual(args, ('bar', ())) self.assertNotEqual(args, ('bar', {})) def test_call_with_args(self): args = _Call(((1, 2, 3), {})) self.assertEqual(args, ((1, 2, 3),)) self.assertEqual(args, ('foo', (1, 2, 3))) self.assertEqual(args, ('foo', (1, 2, 3), {})) self.assertEqual(args, ((1, 2, 3), {})) def test_named_call_with_args(self): args = _Call(('foo', (1, 2, 3), {})) self.assertEqual(args, ('foo', (1, 2, 3))) self.assertEqual(args, ('foo', (1, 2, 3), {})) self.assertNotEqual(args, ((1, 2, 3),)) self.assertNotEqual(args, ((1, 2, 3), {})) def test_call_with_kwargs(self): args = _Call(((), dict(a=3, b=4))) self.assertEqual(args, (dict(a=3, b=4),)) self.assertEqual(args, ('foo', dict(a=3, b=4))) self.assertEqual(args, ('foo', (), dict(a=3, b=4))) self.assertEqual(args, ((), dict(a=3, b=4))) def test_named_call_with_kwargs(self): args = _Call(('foo', (), dict(a=3, b=4))) self.assertEqual(args, ('foo', dict(a=3, b=4))) self.assertEqual(args, ('foo', (), dict(a=3, b=4))) self.assertNotEqual(args, (dict(a=3, b=4),)) self.assertNotEqual(args, ((), dict(a=3, b=4))) def test_call_with_args_call_empty_name(self): args = _Call(((1, 2, 3), {})) self.assertEqual(args, call(1, 2, 3)) self.assertEqual(call(1, 2, 3), args) self.assertTrue(call(1, 2, 3) in [args]) def test_call_ne(self): self.assertNotEqual(_Call(((1, 2, 3),)), call(1, 2)) self.assertFalse(_Call(((1, 2, 3),)) != call(1, 2, 3)) self.assertTrue(_Call(((1, 2), {})) != call(1, 2, 3)) def test_call_non_tuples(self): kall = _Call(((1, 2, 3),)) for value in 1, None, self, int: self.assertNotEqual(kall, value) self.assertFalse(kall == value) def test_repr(self): self.assertEqual(repr(_Call()), 'call()') self.assertEqual(repr(_Call(('foo',))), 'call.foo()') self.assertEqual(repr(_Call(((1, 2, 3), {'a': 'b'}))), "call(1, 2, 3, a='b')") self.assertEqual(repr(_Call(('bar', (1, 2, 3), {'a': 'b'}))), "call.bar(1, 2, 3, a='b')") self.assertEqual(repr(call), 'call') self.assertEqual(str(call), 'call') self.assertEqual(repr(call()), 'call()') self.assertEqual(repr(call(1)), 'call(1)') self.assertEqual(repr(call(zz='thing')), "call(zz='thing')") self.assertEqual(repr(call().foo), 'call().foo') self.assertEqual(repr(call(1).foo.bar(a=3).bing), 'call().foo.bar().bing') self.assertEqual( repr(call().foo(1, 2, a=3)), "call().foo(1, 2, a=3)" ) self.assertEqual(repr(call()()), "call()()") self.assertEqual(repr(call(1)(2)), "call()(2)") self.assertEqual( repr(call()().bar().baz.beep(1)), "call()().bar().baz.beep(1)" ) def test_call(self): self.assertEqual(call(), ('', (), {})) self.assertEqual(call('foo', 'bar', one=3, two=4), ('', ('foo', 'bar'), {'one': 3, 'two': 4})) mock = Mock() mock(1, 2, 3) mock(a=3, b=6) self.assertEqual(mock.call_args_list, [call(1, 2, 3), call(a=3, b=6)]) def test_attribute_call(self): self.assertEqual(call.foo(1), ('foo', (1,), {})) self.assertEqual(call.bar.baz(fish='eggs'), ('bar.baz', (), {'fish': 'eggs'})) mock = Mock() mock.foo(1, 2 ,3) mock.bar.baz(a=3, b=6) self.assertEqual(mock.method_calls, [call.foo(1, 2, 3), call.bar.baz(a=3, b=6)]) def test_extended_call(self): result = call(1).foo(2).bar(3, a=4) self.assertEqual(result, ('().foo().bar', (3,), dict(a=4))) mock = MagicMock() mock(1, 2, a=3, b=4) self.assertEqual(mock.call_args, call(1, 2, a=3, b=4)) self.assertNotEqual(mock.call_args, call(1, 2, 3)) self.assertEqual(mock.call_args_list, [call(1, 2, a=3, b=4)]) self.assertEqual(mock.mock_calls, [call(1, 2, a=3, b=4)]) mock = MagicMock() mock.foo(1).bar()().baz.beep(a=6) last_call = call.foo(1).bar()().baz.beep(a=6) self.assertEqual(mock.mock_calls[-1], last_call) self.assertEqual(mock.mock_calls, last_call.call_list()) def test_call_list(self): mock = MagicMock() mock(1) self.assertEqual(call(1).call_list(), mock.mock_calls) mock = MagicMock() mock(1).method(2) self.assertEqual(call(1).method(2).call_list(), mock.mock_calls) mock = MagicMock() mock(1).method(2)(3) self.assertEqual(call(1).method(2)(3).call_list(), mock.mock_calls) mock = MagicMock() int(mock(1).method(2)(3).foo.bar.baz(4)(5)) kall = call(1).method(2)(3).foo.bar.baz(4)(5).__int__() self.assertEqual(kall.call_list(), mock.mock_calls) def test_call_any(self): self.assertEqual(call, ANY) m = MagicMock() int(m) self.assertEqual(m.mock_calls, [ANY]) self.assertEqual([ANY], m.mock_calls) def test_two_args_call(self): args = _Call(((1, 2), {'a': 3}), two=True) self.assertEqual(len(args), 2) self.assertEqual(args[0], (1, 2)) self.assertEqual(args[1], {'a': 3}) other_args = _Call(((1, 2), {'a': 3})) self.assertEqual(args, other_args) class SpecSignatureTest(unittest.TestCase): def _check_someclass_mock(self, mock): self.assertRaises(AttributeError, getattr, mock, 'foo') mock.one(1, 2) mock.one.assert_called_with(1, 2) self.assertRaises(AssertionError, mock.one.assert_called_with, 3, 4) self.assertRaises(TypeError, mock.one, 1) mock.two() mock.two.assert_called_with() self.assertRaises(AssertionError, mock.two.assert_called_with, 3) self.assertRaises(TypeError, mock.two, 1) mock.three() mock.three.assert_called_with() self.assertRaises(AssertionError, mock.three.assert_called_with, 3) self.assertRaises(TypeError, mock.three, 3, 2) mock.three(1) mock.three.assert_called_with(1) mock.three(a=1) mock.three.assert_called_with(a=1) def test_basic(self): for spec in (SomeClass, SomeClass()): mock = create_autospec(spec) self._check_someclass_mock(mock) def test_create_autospec_return_value(self): def f(): pass mock = create_autospec(f, return_value='foo') self.assertEqual(mock(), 'foo') class Foo(object): pass mock = create_autospec(Foo, return_value='foo') self.assertEqual(mock(), 'foo') def test_autospec_reset_mock(self): m = create_autospec(int) int(m) m.reset_mock() self.assertEqual(m.__int__.call_count, 0) def test_mocking_unbound_methods(self): class Foo(object): def foo(self, foo): pass p = patch.object(Foo, 'foo') mock_foo = p.start() Foo().foo(1) mock_foo.assert_called_with(1) def test_create_autospec_unbound_methods(self): # see mock issue 128 # this is expected to fail until the issue is fixed return class Foo(object): def foo(self): pass klass = create_autospec(Foo) instance = klass() self.assertRaises(TypeError, instance.foo, 1) # Note: no type checking on the "self" parameter klass.foo(1) klass.foo.assert_called_with(1) self.assertRaises(TypeError, klass.foo) def test_create_autospec_keyword_arguments(self): class Foo(object): a = 3 m = create_autospec(Foo, a='3') self.assertEqual(m.a, '3') def test_create_autospec_keyword_only_arguments(self): def foo(a, *, b=None): pass m = create_autospec(foo) m(1) m.assert_called_with(1) self.assertRaises(TypeError, m, 1, 2) m(2, b=3) m.assert_called_with(2, b=3) def test_function_as_instance_attribute(self): obj = SomeClass() def f(a): pass obj.f = f mock = create_autospec(obj) mock.f('bing') mock.f.assert_called_with('bing') def test_spec_as_list(self): # because spec as a list of strings in the mock constructor means # something very different we treat a list instance as the type. mock = create_autospec([]) mock.append('foo') mock.append.assert_called_with('foo') self.assertRaises(AttributeError, getattr, mock, 'foo') class Foo(object): foo = [] mock = create_autospec(Foo) mock.foo.append(3) mock.foo.append.assert_called_with(3) self.assertRaises(AttributeError, getattr, mock.foo, 'foo') def test_attributes(self): class Sub(SomeClass): attr = SomeClass() sub_mock = create_autospec(Sub) for mock in (sub_mock, sub_mock.attr): self._check_someclass_mock(mock) def test_builtin_functions_types(self): # we could replace builtin functions / methods with a function # with *args / **kwargs signature. Using the builtin method type # as a spec seems to work fairly well though. class BuiltinSubclass(list): def bar(self, arg): pass sorted = sorted attr = {} mock = create_autospec(BuiltinSubclass) mock.append(3) mock.append.assert_called_with(3) self.assertRaises(AttributeError, getattr, mock.append, 'foo') mock.bar('foo') mock.bar.assert_called_with('foo') self.assertRaises(TypeError, mock.bar, 'foo', 'bar') self.assertRaises(AttributeError, getattr, mock.bar, 'foo') mock.sorted([1, 2]) mock.sorted.assert_called_with([1, 2]) self.assertRaises(AttributeError, getattr, mock.sorted, 'foo') mock.attr.pop(3) mock.attr.pop.assert_called_with(3) self.assertRaises(AttributeError, getattr, mock.attr, 'foo') def test_method_calls(self): class Sub(SomeClass): attr = SomeClass() mock = create_autospec(Sub) mock.one(1, 2) mock.two() mock.three(3) expected = [call.one(1, 2), call.two(), call.three(3)] self.assertEqual(mock.method_calls, expected) mock.attr.one(1, 2) mock.attr.two() mock.attr.three(3) expected.extend( [call.attr.one(1, 2), call.attr.two(), call.attr.three(3)] ) self.assertEqual(mock.method_calls, expected) def test_magic_methods(self): class BuiltinSubclass(list): attr = {} mock = create_autospec(BuiltinSubclass) self.assertEqual(list(mock), []) self.assertRaises(TypeError, int, mock) self.assertRaises(TypeError, int, mock.attr) self.assertEqual(list(mock), []) self.assertIsInstance(mock['foo'], MagicMock) self.assertIsInstance(mock.attr['foo'], MagicMock) def test_spec_set(self): class Sub(SomeClass): attr = SomeClass() for spec in (Sub, Sub()): mock = create_autospec(spec, spec_set=True) self._check_someclass_mock(mock) self.assertRaises(AttributeError, setattr, mock, 'foo', 'bar') self.assertRaises(AttributeError, setattr, mock.attr, 'foo', 'bar') def test_descriptors(self): class Foo(object): @classmethod def f(cls, a, b): pass @staticmethod def g(a, b): pass class Bar(Foo): pass class Baz(SomeClass, Bar): pass for spec in (Foo, Foo(), Bar, Bar(), Baz, Baz()): mock = create_autospec(spec) mock.f(1, 2) mock.f.assert_called_once_with(1, 2) mock.g(3, 4) mock.g.assert_called_once_with(3, 4) def test_recursive(self): class A(object): def a(self): pass foo = 'foo bar baz' bar = foo A.B = A mock = create_autospec(A) mock() self.assertFalse(mock.B.called) mock.a() mock.B.a() self.assertEqual(mock.method_calls, [call.a(), call.B.a()]) self.assertIs(A.foo, A.bar) self.assertIsNot(mock.foo, mock.bar) mock.foo.lower() self.assertRaises(AssertionError, mock.bar.lower.assert_called_with) def test_spec_inheritance_for_classes(self): class Foo(object): def a(self): pass class Bar(object): def f(self): pass class_mock = create_autospec(Foo) self.assertIsNot(class_mock, class_mock()) for this_mock in class_mock, class_mock(): this_mock.a() this_mock.a.assert_called_with() self.assertRaises(TypeError, this_mock.a, 'foo') self.assertRaises(AttributeError, getattr, this_mock, 'b') instance_mock = create_autospec(Foo()) instance_mock.a() instance_mock.a.assert_called_with() self.assertRaises(TypeError, instance_mock.a, 'foo') self.assertRaises(AttributeError, getattr, instance_mock, 'b') # The return value isn't isn't callable self.assertRaises(TypeError, instance_mock) instance_mock.Bar.f() instance_mock.Bar.f.assert_called_with() self.assertRaises(AttributeError, getattr, instance_mock.Bar, 'g') instance_mock.Bar().f() instance_mock.Bar().f.assert_called_with() self.assertRaises(AttributeError, getattr, instance_mock.Bar(), 'g') def test_inherit(self): class Foo(object): a = 3 Foo.Foo = Foo # class mock = create_autospec(Foo) instance = mock() self.assertRaises(AttributeError, getattr, instance, 'b') attr_instance = mock.Foo() self.assertRaises(AttributeError, getattr, attr_instance, 'b') # instance mock = create_autospec(Foo()) self.assertRaises(AttributeError, getattr, mock, 'b') self.assertRaises(TypeError, mock) # attribute instance call_result = mock.Foo() self.assertRaises(AttributeError, getattr, call_result, 'b') def test_builtins(self): # used to fail with infinite recursion create_autospec(1) create_autospec(int) create_autospec('foo') create_autospec(str) create_autospec({}) create_autospec(dict) create_autospec([]) create_autospec(list) create_autospec(set()) create_autospec(set) create_autospec(1.0) create_autospec(float) create_autospec(1j) create_autospec(complex) create_autospec(False) create_autospec(True) def test_function(self): def f(a, b): pass mock = create_autospec(f) self.assertRaises(TypeError, mock) mock(1, 2) mock.assert_called_with(1, 2) f.f = f mock = create_autospec(f) self.assertRaises(TypeError, mock.f) mock.f(3, 4) mock.f.assert_called_with(3, 4) def test_skip_attributeerrors(self): class Raiser(object): def __get__(self, obj, type=None): if obj is None: raise AttributeError('Can only be accessed via an instance') class RaiserClass(object): raiser = Raiser() @staticmethod def existing(a, b): return a + b s = create_autospec(RaiserClass) self.assertRaises(TypeError, lambda x: s.existing(1, 2, 3)) s.existing(1, 2) self.assertRaises(AttributeError, lambda: s.nonexisting) # check we can fetch the raiser attribute and it has no spec obj = s.raiser obj.foo, obj.bar def test_signature_class(self): class Foo(object): def __init__(self, a, b=3): pass mock = create_autospec(Foo) self.assertRaises(TypeError, mock) mock(1) mock.assert_called_once_with(1) mock(4, 5) mock.assert_called_with(4, 5) def test_class_with_no_init(self): # this used to raise an exception # due to trying to get a signature from object.__init__ class Foo(object): pass create_autospec(Foo) def test_signature_callable(self): class Callable(object): def __init__(self): pass def __call__(self, a): pass mock = create_autospec(Callable) mock() mock.assert_called_once_with() self.assertRaises(TypeError, mock, 'a') instance = mock() self.assertRaises(TypeError, instance) instance(a='a') instance.assert_called_once_with(a='a') instance('a') instance.assert_called_with('a') mock = create_autospec(Callable()) mock(a='a') mock.assert_called_once_with(a='a') self.assertRaises(TypeError, mock) mock('a') mock.assert_called_with('a') def test_signature_noncallable(self): class NonCallable(object): def __init__(self): pass mock = create_autospec(NonCallable) instance = mock() mock.assert_called_once_with() self.assertRaises(TypeError, mock, 'a') self.assertRaises(TypeError, instance) self.assertRaises(TypeError, instance, 'a') mock = create_autospec(NonCallable()) self.assertRaises(TypeError, mock) self.assertRaises(TypeError, mock, 'a') def test_create_autospec_none(self): class Foo(object): bar = None mock = create_autospec(Foo) none = mock.bar self.assertNotIsInstance(none, type(None)) none.foo() none.foo.assert_called_once_with() def test_autospec_functions_with_self_in_odd_place(self): class Foo(object): def f(a, self): pass a = create_autospec(Foo) a.f(self=10) a.f.assert_called_with(self=10) def test_autospec_property(self): class Foo(object): @property def foo(self): return 3 foo = create_autospec(Foo) mock_property = foo.foo # no spec on properties self.assertTrue(isinstance(mock_property, MagicMock)) mock_property(1, 2, 3) mock_property.abc(4, 5, 6) mock_property.assert_called_once_with(1, 2, 3) mock_property.abc.assert_called_once_with(4, 5, 6) def test_autospec_slots(self): class Foo(object): __slots__ = ['a'] foo = create_autospec(Foo) mock_slot = foo.a # no spec on slots mock_slot(1, 2, 3) mock_slot.abc(4, 5, 6) mock_slot.assert_called_once_with(1, 2, 3) mock_slot.abc.assert_called_once_with(4, 5, 6) class TestCallList(unittest.TestCase): def test_args_list_contains_call_list(self): mock = Mock() self.assertIsInstance(mock.call_args_list, _CallList) mock(1, 2) mock(a=3) mock(3, 4) mock(b=6) for kall in call(1, 2), call(a=3), call(3, 4), call(b=6): self.assertTrue(kall in mock.call_args_list) calls = [call(a=3), call(3, 4)] self.assertTrue(calls in mock.call_args_list) calls = [call(1, 2), call(a=3)] self.assertTrue(calls in mock.call_args_list) calls = [call(3, 4), call(b=6)] self.assertTrue(calls in mock.call_args_list) calls = [call(3, 4)] self.assertTrue(calls in mock.call_args_list) self.assertFalse(call('fish') in mock.call_args_list) self.assertFalse([call('fish')] in mock.call_args_list) def test_call_list_str(self): mock = Mock() mock(1, 2) mock.foo(a=3) mock.foo.bar().baz('fish', cat='dog') expected = ( "[call(1, 2),\n" " call.foo(a=3),\n" " call.foo.bar(),\n" " call.foo.bar().baz('fish', cat='dog')]" ) self.assertEqual(str(mock.mock_calls), expected) def test_propertymock(self): p = patch('%s.SomeClass.one' % __name__, new_callable=PropertyMock) mock = p.start() try: SomeClass.one mock.assert_called_once_with() s = SomeClass() s.one mock.assert_called_with() self.assertEqual(mock.mock_calls, [call(), call()]) s.one = 3 self.assertEqual(mock.mock_calls, [call(), call(), call(3)]) finally: p.stop() def test_propertymock_returnvalue(self): m = MagicMock() p = PropertyMock() type(m).foo = p returned = m.foo p.assert_called_once_with() self.assertIsInstance(returned, MagicMock) self.assertNotIsInstance(returned, PropertyMock) if __name__ == '__main__': unittest.main()
sriki18/scipy
refs/heads/master
scipy/stats/tests/test_kdeoth.py
41
from __future__ import division, print_function, absolute_import from scipy import stats import numpy as np from numpy.testing import assert_almost_equal, assert_, assert_raises, \ assert_array_almost_equal, assert_array_almost_equal_nulp, run_module_suite def test_kde_1d(): #some basic tests comparing to normal distribution np.random.seed(8765678) n_basesample = 500 xn = np.random.randn(n_basesample) xnmean = xn.mean() xnstd = xn.std(ddof=1) # get kde for original sample gkde = stats.gaussian_kde(xn) # evaluate the density function for the kde for some points xs = np.linspace(-7,7,501) kdepdf = gkde.evaluate(xs) normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd) intervall = xs[1] - xs[0] assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01) prob1 = gkde.integrate_box_1d(xnmean, np.inf) prob2 = gkde.integrate_box_1d(-np.inf, xnmean) assert_almost_equal(prob1, 0.5, decimal=1) assert_almost_equal(prob2, 0.5, decimal=1) assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13) assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13) assert_almost_equal(gkde.integrate_kde(gkde), (kdepdf**2).sum()*intervall, decimal=2) assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2), (kdepdf*normpdf).sum()*intervall, decimal=2) def test_kde_2d(): #some basic tests comparing to normal distribution np.random.seed(8765678) n_basesample = 500 mean = np.array([1.0, 3.0]) covariance = np.array([[1.0, 2.0], [2.0, 6.0]]) # Need transpose (shape (2, 500)) for kde xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T # get kde for original sample gkde = stats.gaussian_kde(xn) # evaluate the density function for the kde for some points x, y = np.mgrid[-7:7:500j, -7:7:500j] grid_coords = np.vstack([x.ravel(), y.ravel()]) kdepdf = gkde.evaluate(grid_coords) kdepdf = kdepdf.reshape(500, 500) normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance) intervall = y.ravel()[1] - y.ravel()[0] assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01) small = -1e100 large = 1e100 prob1 = gkde.integrate_box([small, mean[1]], [large, large]) prob2 = gkde.integrate_box([small, small], [large, mean[1]]) assert_almost_equal(prob1, 0.5, decimal=1) assert_almost_equal(prob2, 0.5, decimal=1) assert_almost_equal(gkde.integrate_kde(gkde), (kdepdf**2).sum()*(intervall**2), decimal=2) assert_almost_equal(gkde.integrate_gaussian(mean, covariance), (kdepdf*normpdf).sum()*(intervall**2), decimal=2) def test_kde_bandwidth_method(): def scotts_factor(kde_obj): """Same as default, just check that it works.""" return np.power(kde_obj.n, -1./(kde_obj.d+4)) np.random.seed(8765678) n_basesample = 50 xn = np.random.randn(n_basesample) # Default gkde = stats.gaussian_kde(xn) # Supply a callable gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor) # Supply a scalar gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor) xs = np.linspace(-7,7,51) kdepdf = gkde.evaluate(xs) kdepdf2 = gkde2.evaluate(xs) assert_almost_equal(kdepdf, kdepdf2) kdepdf3 = gkde3.evaluate(xs) assert_almost_equal(kdepdf, kdepdf3) assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring') # Subclasses that should stay working (extracted from various sources). # Unfortunately the earlier design of gaussian_kde made it necessary for users # to create these kinds of subclasses, or call _compute_covariance() directly. class _kde_subclass1(stats.gaussian_kde): def __init__(self, dataset): self.dataset = np.atleast_2d(dataset) self.d, self.n = self.dataset.shape self.covariance_factor = self.scotts_factor self._compute_covariance() class _kde_subclass2(stats.gaussian_kde): def __init__(self, dataset): self.covariance_factor = self.scotts_factor super(_kde_subclass2, self).__init__(dataset) class _kde_subclass3(stats.gaussian_kde): def __init__(self, dataset, covariance): self.covariance = covariance stats.gaussian_kde.__init__(self, dataset) def _compute_covariance(self): self.inv_cov = np.linalg.inv(self.covariance) self._norm_factor = np.sqrt(np.linalg.det(2*np.pi * self.covariance)) \ * self.n class _kde_subclass4(stats.gaussian_kde): def covariance_factor(self): return 0.5 * self.silverman_factor() def test_gaussian_kde_subclassing(): x1 = np.array([-7, -5, 1, 4, 5], dtype=float) xs = np.linspace(-10, 10, num=50) # gaussian_kde itself kde = stats.gaussian_kde(x1) ys = kde(xs) # subclass 1 kde1 = _kde_subclass1(x1) y1 = kde1(xs) assert_array_almost_equal_nulp(ys, y1, nulp=10) # subclass 2 kde2 = _kde_subclass2(x1) y2 = kde2(xs) assert_array_almost_equal_nulp(ys, y2, nulp=10) # subclass 3 kde3 = _kde_subclass3(x1, kde.covariance) y3 = kde3(xs) assert_array_almost_equal_nulp(ys, y3, nulp=10) # subclass 4 kde4 = _kde_subclass4(x1) y4 = kde4(x1) y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017] assert_array_almost_equal(y_expected, y4, decimal=6) # Not a subclass, but check for use of _compute_covariance() kde5 = kde kde5.covariance_factor = lambda: kde.factor kde5._compute_covariance() y5 = kde5(xs) assert_array_almost_equal_nulp(ys, y5, nulp=10) def test_gaussian_kde_covariance_caching(): x1 = np.array([-7, -5, 1, 4, 5], dtype=float) xs = np.linspace(-10, 10, num=5) # These expected values are from scipy 0.10, before some changes to # gaussian_kde. They were not compared with any external reference. y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475] # Set the bandwidth, then reset it to the default. kde = stats.gaussian_kde(x1) kde.set_bandwidth(bw_method=0.5) kde.set_bandwidth(bw_method='scott') y2 = kde(xs) assert_array_almost_equal(y_expected, y2, decimal=7) def test_gaussian_kde_monkeypatch(): """Ugly, but people may rely on this. See scipy pull request 123, specifically the linked ML thread "Width of the Gaussian in stats.kde". If it is necessary to break this later on, that is to be discussed on ML. """ x1 = np.array([-7, -5, 1, 4, 5], dtype=float) xs = np.linspace(-10, 10, num=50) # The old monkeypatched version to get at Silverman's Rule. kde = stats.gaussian_kde(x1) kde.covariance_factor = kde.silverman_factor kde._compute_covariance() y1 = kde(xs) # The new saner version. kde2 = stats.gaussian_kde(x1, bw_method='silverman') y2 = kde2(xs) assert_array_almost_equal_nulp(y1, y2, nulp=10) def test_kde_integer_input(): """Regression test for #1181.""" x1 = np.arange(5) kde = stats.gaussian_kde(x1) y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721] assert_array_almost_equal(kde(x1), y_expected, decimal=6) def test_pdf_logpdf(): np.random.seed(1) n_basesample = 50 xn = np.random.randn(n_basesample) # Default gkde = stats.gaussian_kde(xn) xs = np.linspace(-15, 12, 25) pdf = gkde.evaluate(xs) pdf2 = gkde.pdf(xs) assert_almost_equal(pdf, pdf2, decimal=12) logpdf = np.log(pdf) logpdf2 = gkde.logpdf(xs) assert_almost_equal(logpdf, logpdf2, decimal=12) if __name__ == "__main__": run_module_suite()
phalt/django
refs/heads/master
django/contrib/staticfiles/views.py
581
""" Views and functions for serving static files. These are only to be used during development, and SHOULD NOT be used in a production setting. """ import os import posixpath from django.conf import settings from django.contrib.staticfiles import finders from django.http import Http404 from django.utils.six.moves.urllib.parse import unquote from django.views import static def serve(request, path, insecure=False, **kwargs): """ Serve static files below a given point in the directory structure or from locations inferred from the staticfiles finders. To use, put a URL pattern such as:: from django.contrib.staticfiles import views url(r'^(?P<path>.*)$', views.serve) in your URLconf. It uses the django.views.static.serve() view to serve the found files. """ if not settings.DEBUG and not insecure: raise Http404 normalized_path = posixpath.normpath(unquote(path)).lstrip('/') absolute_path = finders.find(normalized_path) if not absolute_path: if path.endswith('/') or path == '': raise Http404("Directory indexes are not allowed here.") raise Http404("'%s' could not be found" % path) document_root, path = os.path.split(absolute_path) return static.serve(request, path, document_root=document_root, **kwargs)
Yingmin-Li/pyspider
refs/heads/master
pyspider/webui/index.py
59
#!/usr/bin/env python # -*- encoding: utf-8 -*- # vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8: # Author: Binux<i@binux.me> # http://binux.me # Created on 2014-02-22 23:20:39 import socket from flask import render_template, request, json from flask.ext import login from .app import app index_fields = ['name', 'group', 'status', 'comments', 'rate', 'burst', 'updatetime'] @app.route('/') def index(): projectdb = app.config['projectdb'] return render_template("index.html", projects=projectdb.get_all(fields=index_fields)) @app.route('/queues') def get_queues(): def try_get_qsize(queue): if queue is None: return 'None' try: return queue.qsize() except NotImplementedError: return 'Not Available For OSX' except Exception as e: return "%r" % e result = {} queues = app.config.get('queues', {}) for key in queues: result[key] = try_get_qsize(queues[key]) return json.dumps(result), 200, {'Content-Type': 'application/json'} @app.route('/update', methods=['POST', ]) def project_update(): projectdb = app.config['projectdb'] project = request.form['pk'] name = request.form['name'] value = request.form['value'] project_info = projectdb.get(project, fields=('name', 'group')) if not project_info: return "no such project.", 404 if 'lock' in projectdb.split_group(project_info.get('group')) \ and not login.current_user.is_active(): return app.login_response if name not in ('group', 'status', 'rate'): return 'unknown field: %s' % name, 400 if name == 'rate': value = value.split('/') if len(value) != 2: return 'format error: rate/burst', 400 rate = float(value[0]) burst = float(value[1]) update = { 'rate': min(rate, app.config.get('max_rate', rate)), 'burst': min(burst, app.config.get('max_burst', burst)), } else: update = { name: value } ret = projectdb.update(project, update) if ret: rpc = app.config['scheduler_rpc'] if rpc is not None: try: rpc.update_project() except socket.error as e: app.logger.warning('connect to scheduler rpc error: %r', e) return 'rpc error', 200 return 'ok', 200 else: return 'update error', 500 @app.route('/counter') def counter(): rpc = app.config['scheduler_rpc'] if rpc is None: return json.dumps({}) result = {} try: for project, counter in rpc.counter('5m_time', 'avg').items(): result.setdefault(project, {})['5m_time'] = counter for project, counter in rpc.counter('5m', 'sum').items(): result.setdefault(project, {})['5m'] = counter for project, counter in rpc.counter('1h', 'sum').items(): result.setdefault(project, {})['1h'] = counter for project, counter in rpc.counter('1d', 'sum').items(): result.setdefault(project, {})['1d'] = counter for project, counter in rpc.counter('all', 'sum').items(): result.setdefault(project, {})['all'] = counter except socket.error as e: app.logger.warning('connect to scheduler rpc error: %r', e) return json.dumps({}), 200, {'Content-Type': 'application/json'} return json.dumps(result), 200, {'Content-Type': 'application/json'} @app.route('/run', methods=['POST', ]) def runtask(): rpc = app.config['scheduler_rpc'] if rpc is None: return json.dumps({}) projectdb = app.config['projectdb'] project = request.form['project'] project_info = projectdb.get(project, fields=('name', 'group')) if not project_info: return "no such project.", 404 if 'lock' in projectdb.split_group(project_info.get('group')) \ and not login.current_user.is_active(): return app.login_response newtask = { "project": project, "taskid": "on_start", "url": "data:,on_start", "process": { "callback": "on_start", }, "schedule": { "age": 0, "priority": 9, "force_update": True, }, } try: ret = rpc.newtask(newtask) except socket.error as e: app.logger.warning('connect to scheduler rpc error: %r', e) return json.dumps({"result": False}), 200, {'Content-Type': 'application/json'} return json.dumps({"result": ret}), 200, {'Content-Type': 'application/json'} @app.route('/robots.txt') def robots(): return """User-agent: * Disallow: / Allow: /$ Allow: /debug Disallow: /debug/*?taskid=* """, 200, {'Content-Type': 'text/plain'}
apanju/odoo
refs/heads/8.0
openerp/addons/base/tests/test_func.py
379
# -*- coding: utf-8 -*- import functools import unittest2 from openerp.tools.func import compose class TestCompose(unittest2.TestCase): def test_basic(self): str_add = compose(str, lambda a, b: a + b) self.assertEqual( str_add(1, 2), "3") def test_decorator(self): """ ensure compose() can be partially applied as a decorator """ @functools.partial(compose, unicode) def mul(a, b): return a * b self.assertEqual(mul(5, 42), u"210")
sudhanshuptl/Machine-Learning
refs/heads/master
TeST/plot2.py
1
__auther__='Sudhanshu Patel' import sklearn.datasets as data from sklearn.svm import SVC import numpy as np # Get digits Dataset**** data=data.load_digits() X=data.data #feature vector Y=data.target #Label Vector # ************************** #** Plot import matplotlib.pyplot as plt for x, y in zip(X, Y): plt.scatter( x, y, color='b' ) plt.show() if __name__=='__main__': pass
jsvelasquezv/agroind-mobile
refs/heads/master
platforms/browser/www/node_modules/node-gyp/gyp/pylib/gyp/input_test.py
1841
#!/usr/bin/env python # Copyright 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for the input.py file.""" import gyp.input import unittest import sys class TestFindCycles(unittest.TestCase): def setUp(self): self.nodes = {} for x in ('a', 'b', 'c', 'd', 'e'): self.nodes[x] = gyp.input.DependencyGraphNode(x) def _create_dependency(self, dependent, dependency): dependent.dependencies.append(dependency) dependency.dependents.append(dependent) def test_no_cycle_empty_graph(self): for label, node in self.nodes.iteritems(): self.assertEquals([], node.FindCycles()) def test_no_cycle_line(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['c']) self._create_dependency(self.nodes['c'], self.nodes['d']) for label, node in self.nodes.iteritems(): self.assertEquals([], node.FindCycles()) def test_no_cycle_dag(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['a'], self.nodes['c']) self._create_dependency(self.nodes['b'], self.nodes['c']) for label, node in self.nodes.iteritems(): self.assertEquals([], node.FindCycles()) def test_cycle_self_reference(self): self._create_dependency(self.nodes['a'], self.nodes['a']) self.assertEquals([[self.nodes['a'], self.nodes['a']]], self.nodes['a'].FindCycles()) def test_cycle_two_nodes(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['a']) self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['a']]], self.nodes['a'].FindCycles()) self.assertEquals([[self.nodes['b'], self.nodes['a'], self.nodes['b']]], self.nodes['b'].FindCycles()) def test_two_cycles(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['a']) self._create_dependency(self.nodes['b'], self.nodes['c']) self._create_dependency(self.nodes['c'], self.nodes['b']) cycles = self.nodes['a'].FindCycles() self.assertTrue( [self.nodes['a'], self.nodes['b'], self.nodes['a']] in cycles) self.assertTrue( [self.nodes['b'], self.nodes['c'], self.nodes['b']] in cycles) self.assertEquals(2, len(cycles)) def test_big_cycle(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['c']) self._create_dependency(self.nodes['c'], self.nodes['d']) self._create_dependency(self.nodes['d'], self.nodes['e']) self._create_dependency(self.nodes['e'], self.nodes['a']) self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['c'], self.nodes['d'], self.nodes['e'], self.nodes['a']]], self.nodes['a'].FindCycles()) if __name__ == '__main__': unittest.main()
karolmajta/officetune
refs/heads/master
server/src/officetune/officetune/settings.py
1
# Django settings for officetune project. import os PROJECT_DIR = os.path.dirname(__file__) DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'db.sqlite3', # Or path to database file if using sqlite3. # The following settings are not used with sqlite3: 'USER': '', 'PASSWORD': '', 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. 'PORT': '', # Set to empty string for default. } } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '2jn3w0_35ukzfn@q!3ndr+^-czet-rn00+s+ckqcq#qv5!6ak6' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'officetune.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'officetune.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(PROJECT_DIR, 'templates'), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', 'rest_framework', 'officetune', ) SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } LOGIN_URL = '/login/' LOGIN_REDIRECT_URL = '/'
RITC3/RC3_CTFD
refs/heads/master
populate.py
1
#!/usr/bin/python # -*- coding: utf-8 -*- from CTFd.models import Users, Solves, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Teams from CTFd import create_app from random import randint import datetime import random import hashlib import os import sys app = create_app() USER_AMOUNT = 50 TEAM_AMOUNT = 10 CHAL_AMOUNT = 20 categories = [ 'Exploitation', 'Reversing', 'Web', 'Forensics', 'Scripting', 'Cryptography', 'Networking', ] lorems = [ 'Lorem', 'ipsum', 'dolor', 'sit', 'amet,', 'consectetur', 'adipiscing', 'elit.', 'Proin', 'fringilla', 'elit', 'velit,', 'sed', 'scelerisque', 'tellus', 'dapibus', 'vel.', 'Aenean', 'at', 'urna', 'porta,', 'fringilla', 'erat', 'eget,', 'lobortis', 'quam.', 'Praesent', 'luctus,', 'quam', 'at', 'consequat', 'luctus,', 'mauris', 'sem', 'pretium', 'metus,', 'eu', 'viverra', 'dui', 'leo', 'in', 'tortor.', 'Cras', 'iaculis', 'enim', 'erat,', 'sed', 'gravida', 'velit', 'consectetur', 'a.', 'Duis', 'eget', 'fermentum', 'elit.', 'Vivamus', 'laoreet', 'elementum', 'massa,', 'ut', 'sodales', 'mi', 'gravida', 'at.', 'Vivamus', 'dignissim', 'in', 'eros', 'non', 'iaculis.', 'Vivamus', 'nec', 'sem', 'fringilla,', 'semper', 'lectus', 'in,', 'malesuada', 'tellus.', 'Vestibulum', 'mattis', 'commodo', 'enim', 'sit', 'amet', 'scelerisque.', 'Proin', 'at', 'condimentum', 'nisi,', 'nec', 'fringilla', 'ante.', 'Vestibulum', 'sit', 'amet', 'neque', 'sit', 'amet', 'elit', 'placerat', 'interdum', 'egestas', 'ac', 'malesuada', 'quis', 'arcu', 'ac', 'blandit.', 'Vivamus', 'in', 'massa', 'a', 'purus', 'bibendum', 'sagittis.', 'Nunc', 'venenatis', 'lacus', 'sed', 'nulla', 'dapibus,', 'consequat', 'laoreet', 'nisi', 'faucibus.', 'Nam', 'consequat', 'viverra', 'nibh', 'a', 'cursus.', 'Phasellus', 'tristique', 'justo', 'vitae', 'rutrum', 'pharetra.', 'Sed', 'sed', 'porttitor', 'lacus.', 'Nam', 'ornare', 'sit', 'amet', 'nisi', 'imperdiet', 'vulputate.', 'Maecenas', 'hendrerit', 'ullamcorper', 'elit,', 'sed', 'pellentesque', 'lacus', 'bibendum', 'sit', 'amet.', 'Aliquam', 'consectetur', 'odio', 'quis', 'tellus', 'ornare,', 'id', 'malesuada', 'dui', 'rhoncus.', 'Quisque', 'fringilla', 'pellentesque', 'nulla', 'id', 'congue.', 'Nulla', 'ultricies', 'dolor', 'tristique', 'facilisis', 'at', 'accumsan', 'nisi.', 'Praesent', 'commodo,', 'mauris', 'sit', 'amet', 'placerat', 'condimentum,', 'nibh', 'leo', 'pulvinar', 'justo,', 'vel', 'dignissim', 'mi', 'dolor', 'et', 'est.', 'Nulla', 'facilisi.', 'Sed', 'nunc', 'est,', 'lobortis', 'id', 'diam', 'nec,', 'vulputate', 'varius', 'orci.', 'Maecenas', 'iaculis', 'vehicula', 'eros', 'eu', 'congue.', 'Nam', 'tempor', 'commodo', 'lobortis.', 'Donec', 'eget', 'posuere', 'dolor,', 'ut', 'rhoncus', 'tortor.', 'Donec', 'et', 'quam', 'quis', 'urna', 'rhoncus', 'fermentum', 'et', 'ut', 'tellus.', 'Aliquam', 'erat', 'volutpat.', 'Morbi', 'porttitor', 'ante', 'nec', 'porta', 'mollis.', 'Ut', 'sodales', 'pellentesque', 'rutrum.', 'Nullam', 'elit', 'eros,', 'sollicitudin', 'ac', 'rutrum', 'sit', 'amet,', 'eleifend', 'vel', 'nulla.', 'Morbi', 'quis', 'lacinia', 'nisi.', 'Integer', 'at', 'neque', 'vel', 'velit', 'tincidunt', 'elementum', 'lobortis', 'sit', 'amet', 'tellus.', 'Nunc', 'volutpat', 'diam', 'ac', 'diam', 'lacinia,', 'id', 'molestie', 'quam', 'eu', 'ultricies', 'ligula.', 'Duis', 'iaculis', 'massa', 'massa,', 'eget', 'venenatis', 'dolor', 'fermentum', 'laoreet.', 'Nam', 'posuere,', 'erat', 'quis', 'tempor', 'consequat,', 'purus', 'erat', 'hendrerit', 'arcu,', 'nec', 'aliquam', 'ligula', 'augue', 'vitae', 'felis.', 'Vestibulum', 'tincidunt', 'ipsum', 'vel', 'pharetra', 'lacinia.', 'Quisque', 'dignissim,', 'arcu', 'non', 'feugiat', 'semper,', 'felis', 'est', 'commodo', 'lorem,', 'malesuada', 'elementum', 'nibh', 'lectus', 'porttitor', 'nisi.', 'Duis', 'non', 'lacinia', 'nisl.', 'Etiam', 'ante', 'nisl,', 'mattis', 'eget', 'convallis', 'vel,', 'ullamcorper', 'ac', 'nisl.', 'Duis', 'eu', 'massa', 'at', 'urna', 'laoreet', 'convallis.', 'Donec', 'tincidunt', 'sapien', 'sit', 'amet', 'varius', 'eu', 'dignissim', 'tortor,', 'elementum', 'gravida', 'eros.', 'Cras', 'viverra', 'accumsan', 'erat,', 'et', 'euismod', 'dui', 'placerat', 'ac.', 'Ut', 'tortor', 'arcu,', 'euismod', 'vitae', 'aliquam', 'in,', 'interdum', 'vitae', 'magna.', 'Vestibulum', 'leo', 'ante,', 'posuere', 'eget', 'est', 'non,', 'adipiscing', 'ultrices', 'erat.', 'Donec', 'suscipit', 'felis', 'molestie,', 'ultricies', 'dui', 'a,', 'facilisis', 'magna.', 'Cum', 'sociis', 'natoque', 'penatibus', 'et', 'magnis', 'dis', 'parturient', 'montes,', 'nascetur', 'ridiculus', 'mus.', 'Nulla', 'quis', 'odio', 'sit', 'amet', 'ante', 'tristique', 'accumsan', 'ut', 'iaculis', 'neque.', 'Vivamus', 'in', 'venenatis', 'enim.', 'Nunc', 'dignissim', 'justo', 'neque,', 'sed', 'ultricies', 'justo', 'dictum', 'in.', 'Nulla', 'eget', 'nunc', 'ac', 'arcu', 'vestibulum', 'bibendum', 'vitae', 'quis', 'tellus.', 'Morbi', 'bibendum,', 'quam', 'ac', 'cursus', 'posuere,', 'purus', 'lectus', 'tempor', 'est,', 'eu', 'iaculis', 'quam', 'enim', 'a', 'nibh.', 'Etiam', 'consequat', ] hipsters = [ 'Ethnic', 'narwhal', 'pickled', 'Odd', 'Future', 'cliche', 'VHS', 'whatever', 'Etsy', 'American', 'Apparel', 'kitsch', 'wolf', 'mlkshk', 'fashion', 'axe', 'ethnic', 'banh', 'mi', 'cornhole', 'scenester', 'Echo', 'Park', 'Dreamcatcher', 'tofu', 'fap', 'selvage', 'authentic', 'cliche', 'High', 'Life', 'brunch', 'pork', 'belly', 'viral', 'XOXO', 'drinking', 'vinegar', 'bitters', 'Wayfarers', 'gastropub', 'dreamcatcher', 'chillwave', 'Shoreditch', 'kale', 'chips', 'swag', 'street', 'art', 'put', 'a', 'bird', 'on', 'it', 'Vice', 'synth', 'cliche', 'retro', 'Master', 'cleanse', 'ugh', 'Austin', 'slow-carb', 'small', 'batch', 'Hashtag', 'food', 'truck', 'deep', 'v', 'semiotics', 'chia', 'normcore', 'bicycle', 'rights', 'Austin', 'drinking', 'vinegar', 'hella', 'readymade', 'farm-to-table', 'Wes', 'Anderson', 'put', 'a', 'bird', 'on', 'it', 'freegan', 'Synth', 'lo-fi', 'food', 'truck', 'chambray', 'Shoreditch', 'cliche', 'kogiSynth', 'lo-fi', 'fap', 'single-origin', 'coffee', 'brunch', 'butcher', 'Pickled', 'Etsy', 'locavore', 'forage', 'pug', 'stumptown', 'occupy', 'PBR&B', 'actually', 'shabby', 'chic', 'church-key', 'disrupt', 'lomo', 'hoodie', 'Tumblr', 'biodiesel', 'Pinterest', 'butcher', 'Hella', 'Carles', 'pour-over', 'YOLO', 'VHS', 'literally', 'Selvage', 'narwhal', 'flexitarian', 'wayfarers', 'kitsch', 'bespoke', 'sriracha', 'Banh', 'mi', '8-bit', 'cornhole', 'viral', 'Tonx', 'keytar', 'gastropub', 'YOLO', 'hashtag', 'food', 'truck', '3', 'wolf', 'moonFingerstache', 'flexitarian', 'craft', 'beer', 'shabby', 'chic', '8-bit', 'try-hard', 'semiotics', 'Helvetica', 'keytar', 'PBR', 'four', 'loko', 'scenester', 'keytar', '3', 'wolf', 'moon', 'sriracha', 'gluten-free', 'literally', 'try-hard', 'put', 'a', 'bird', 'on', 'it', 'cornhole', 'blog', 'fanny', 'pack', 'Mumblecore', 'pickled', 'distillery', 'butcher', 'Ennui', 'tote', 'bag', 'letterpress', 'disrupt', 'keffiyeh', 'art', 'party', 'aesthetic', 'Helvetica', 'stumptown', 'Wes', 'Anderson', 'next', 'level', "McSweeney's", 'cornhole', 'Schlitz', 'skateboard', 'pop-up', 'Chillwave', 'biodiesel', 'semiotics', 'seitan', 'authentic', 'bicycle', 'rights', 'wolf', 'pork', 'belly', 'letterpress', 'locavore', 'whatever', 'fixie', 'viral', 'mustache', 'beard', 'Hashtag', 'sustainable', 'lomo', 'cardigan', 'lo-fiWilliamsburg', 'craft', 'beer', 'bitters', 'iPhone', 'gastropub', 'messenger', 'bag', 'Organic', 'post-ironic', 'fingerstache', 'ennui', 'banh', 'mi', 'Art', 'party', 'bitters', 'twee', 'bespoke', 'church-key', 'Intelligentsia', 'sriracha', 'Echo', 'Park', 'Tofu', 'locavore', 'street', 'art', 'freegan', 'farm-to-table', 'distillery', 'hoodie', 'swag', 'ugh', 'YOLO', 'VHS', 'Cred', 'hella', 'readymade', 'distillery', 'Banh', 'mi', 'Echo', 'Park', "McSweeney's,", 'mlkshk', 'photo', 'booth', 'swag', 'Odd', 'Future', 'squid', 'Tonx', 'craft', 'beer', 'High', 'Life', 'tousled', 'PBR', 'you', 'probably', "haven't", 'heard', 'of', 'them', 'locavore', 'PBR&B', 'street', 'art', 'pop-up', ] names = [ 'James', 'John', 'Robert', 'Michael', 'William', 'David', 'Richard', 'Joseph', 'Charles', 'Thomas', 'Christopher', 'Daniel', 'Matthew', 'Donald', 'Anthony', 'Paul', 'Mark', 'George', 'Steven', 'Kenneth', 'Andrew', 'Edward', 'Brian', 'Joshua', 'Kevin', 'Ronald', 'Timothy', 'Jason', 'Jeffrey', 'Gary', 'Ryan', 'Nicholas', 'Eric', 'Stephen', 'Jacob', 'Larry', 'Frank', 'Jonathan', 'Scott', 'Justin', 'Raymond', 'Brandon', 'Gregory', 'Samuel', 'Patrick', 'Benjamin', 'Jack', 'Dennis', 'Jerry', 'Alexander', 'Tyler', 'Douglas', 'Henry', 'Peter', 'Walter', 'Aaron', 'Jose', 'Adam', 'Harold', 'Zachary', 'Nathan', 'Carl', 'Kyle', 'Arthur', 'Gerald', 'Lawrence', 'Roger', 'Albert', 'Keith', 'Jeremy', 'Terry', 'Joe', 'Sean', 'Willie', 'Jesse', 'Ralph', 'Billy', 'Austin', 'Bruce', 'Christian', 'Roy', 'Bryan', 'Eugene', 'Louis', 'Harry', 'Wayne', 'Ethan', 'Jordan', 'Russell', 'Alan', 'Philip', 'Randy', 'Juan', 'Howard', 'Vincent', 'Bobby', 'Dylan', 'Johnny', 'Phillip', 'Craig', 'Mary', 'Patricia', 'Elizabeth', 'Jennifer', 'Linda', 'Barbara', 'Susan', 'Margaret', 'Jessica', 'Dorothy', 'Sarah', 'Karen', 'Nancy', 'Betty', 'Lisa', 'Sandra', 'Helen', 'Donna', 'Ashley', 'Kimberly', 'Carol', 'Michelle', 'Amanda', 'Emily', 'Melissa', 'Laura', 'Deborah', 'Stephanie', 'Rebecca', 'Sharon', 'Cynthia', 'Ruth', 'Kathleen', 'Anna', 'Shirley', 'Amy', 'Angela', 'Virginia', 'Brenda', 'Pamela', 'Catherine', 'Katherine', 'Nicole', 'Christine', 'Janet', 'Debra', 'Carolyn', 'Samantha', 'Rachel', 'Heather', 'Maria', 'Diane', 'Frances', 'Joyce', 'Julie', 'Martha', 'Joan', 'Evelyn', 'Kelly', 'Christina', 'Emma', 'Lauren', 'Alice', 'Judith', 'Marie', 'Doris', 'Ann', 'Jean', 'Victoria', 'Cheryl', 'Megan', 'Kathryn', 'Andrea', 'Jacqueline', 'Gloria', 'Teresa', 'Janice', 'Sara', 'Rose', 'Julia', 'Hannah', 'Theresa', 'Judy', 'Mildred', 'Grace', 'Beverly', 'Denise', 'Marilyn', 'Amber', 'Danielle', 'Brittany', 'Diana', 'Jane', 'Lori', 'Olivia', 'Tiffany', 'Kathy', 'Tammy', 'Crystal', 'Madison', ] emails = [ '@gmail.com', '@yahoo.com', '@outlook.com', '@hotmail.com', '@mailinator.com', '@poly.edu', '@nyu.edu' ] extensions = [ '.doc', '.log', '.msg', '.rtf', '.txt', '.wpd', '.wps', '.123', '.csv', '.dat', '.db ', '.dll', '.mdb', '.pps', '.ppt', '.sql', '.wks', '.xls', '.xml', '.mng', '.pct', '.bmp', '.gif', '.jpe', '.jpg', '.png', '.psd', '.psp', '.tif', '.ai ', '.drw', '.dxf', '.eps', '.ps ', '.svg', '.3dm', '.3dm', '.ind', '.pdf', '.qxd', '.qxp', '.aac', '.aif', '.iff', '.m3u', '.mid', '.mid', '.mp3', '.mpa', '.ra ', '.ram', '.wav', '.wma', '.3gp', '.asf', '.asx', '.avi', '.mov', '.mp4', '.mpg', '.qt ', '.rm ', '.swf', '.wmv', '.asp', '.css', '.htm', '.htm', '.js ', '.jsp', '.php', '.xht', '.fnt', '.fon', '.otf', '.ttf', '.8bi', '.plu', '.xll', '.cab', '.cpl', '.cur', '.dmp', '.drv', '.key', '.lnk', '.sys', '.cfg', '.ini', '.reg', '.app', '.bat', '.cgi', '.com', '.exe', '.pif', '.vb ', '.ws ', '.deb', '.gz ', '.pkg', '.rar', '.sea', '.sit', '.sit', '.zip', '.bin', '.hqx', '.0 E', '.mim', '.uue', '.cpp', '.jav', '.pl ', '.bak', '.gho', '.old', '.ori', '.tmp', '.dmg', '.iso', '.toa', '.vcd', '.gam', '.nes', '.rom', '.sav', '.msi', ] def gen_sentence(): return ' '.join(random.sample(lorems, 50)) def gen_name(): return random.choice(names) def gen_email(): return random.choice(emails) def gen_category(): return random.choice(categories) def gen_value(): return random.choice(range(100, 500, 50)) def gen_word(): return random.choice(hipsters) def gen_file(): return gen_word() + random.choice(extensions) def random_date(start, end): return start + datetime.timedelta( seconds=randint(0, int((end - start).total_seconds()))) if __name__ == '__main__': with app.app_context(): db = app.db ### Generating Challenges print("GENERATING CHALLENGES") for x in range(CHAL_AMOUNT): word = gen_word() flags = [{'flag': word, 'type': 0}] db.session.add(Challenges(word, gen_sentence(), gen_value(), gen_category(), flags)) db.session.commit() db.session.add(Keys(x + 1, word, 0)) db.session.commit() ### Generating Files print("GENERATING FILES") AMT_CHALS_WITH_FILES = int(CHAL_AMOUNT * (3.0 / 4.0)) for x in range(AMT_CHALS_WITH_FILES): chal = random.randint(1, CHAL_AMOUNT) filename = gen_file() md5hash = hashlib.md5(filename).hexdigest() db.session.add(Files(chal, os.path.join('static/uploads', md5hash, filename))) db.session.commit() ### Generating Users print("GENERATING USERS") used = [] count = 0 while count < USER_AMOUNT: name = gen_name() if name not in used: used.append(name) team = Users(name, name.lower() + gen_email(), 'password') team.verified = True db.session.add(team) count += 1 db.session.commit() ### Generating Teams print("GENERATING TEAMS") used_names = [] used_users = [] count = 0 while count < TEAM_AMOUNT: name = gen_word() + ' ' + gen_word() user_id = random.randint(0, 50) if name not in used_names and user_id not in used_users: team = Teams(name, user_id) db.session.add(team) db.session.flush() user = Users.query.filter_by(id=user_id).first() user.teamid = team.id used_users.append(user_id) used_names.append(name) count += 1 db.session.commit() for user_id in range(1, 51): user = Users.query.filter_by(id=user_id).first() user.teamid = (user_id % 5) + 1 db.session.commit() ### Generating Solves print("GENERATING SOLVES") for x in range(USER_AMOUNT): used = [] base_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=-10000) for y in range(random.randint(1, CHAL_AMOUNT)): chalid = random.randint(1, CHAL_AMOUNT) if chalid not in used: used.append(chalid) solve = Solves(chalid, x + 1, '127.0.0.1', gen_word()) new_base = random_date(base_time, base_time + datetime.timedelta(minutes=random.randint(30, 60))) solve.date = new_base base_time = new_base db.session.add(solve) db.session.commit() ### Generating Wrong Keys print("GENERATING WRONG KEYS") for x in range(USER_AMOUNT): used = [] base_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=-10000) for y in range(random.randint(1, CHAL_AMOUNT * 20)): chalid = random.randint(1, CHAL_AMOUNT) if chalid not in used: used.append(chalid) wrong = WrongKeys(x + 1, chalid, gen_word()) new_base = random_date(base_time, base_time + datetime.timedelta(minutes=random.randint(30, 60))) wrong.date = new_base base_time = new_base db.session.add(wrong) db.session.commit() db.session.close()
yask123/django
refs/heads/master
tests/urlpatterns_reverse/views_broken.py
501
# I just raise an AttributeError to confuse the view loading mechanism raise AttributeError('I am here to confuse django.core.urlresolvers.get_callable')
fosstp/fosstp
refs/heads/master
alembic/env.py
1
from __future__ import with_statement from alembic import context from sqlalchemy import engine_from_config, pool from logging.config import fileConfig # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel from pyramid_sqlalchemy import BaseObject from fosstp.models.user import * from fosstp.models.workshop import * from fosstp.models.about import * from fosstp.models.forum import * from fosstp.models.news import * from fosstp.models.link import * from fosstp.models.planet import * target_metadata = BaseObject.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure( url=url, target_metadata=target_metadata, literal_binds=True) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ connectable = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online()
rajsadho/django
refs/heads/master
tests/migrations/test_executor.py
80
from django.apps.registry import apps as global_apps from django.db import connection from django.db.migrations.exceptions import InvalidMigrationPlan from django.db.migrations.executor import MigrationExecutor from django.db.migrations.graph import MigrationGraph from django.db.migrations.recorder import MigrationRecorder from django.db.utils import DatabaseError from django.test import TestCase, modify_settings, override_settings from .test_base import MigrationTestBase @modify_settings(INSTALLED_APPS={'append': 'migrations2'}) class ExecutorTests(MigrationTestBase): """ Tests the migration executor (full end-to-end running). Bear in mind that if these are failing you should fix the other test failures first, as they may be propagating into here. """ available_apps = ["migrations", "migrations2", "django.contrib.auth", "django.contrib.contenttypes"] @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_run(self): """ Tests running a simple set of migrations. """ executor = MigrationExecutor(connection) # Let's look at the plan first and make sure it's up to scratch plan = executor.migration_plan([("migrations", "0002_second")]) self.assertEqual( plan, [ (executor.loader.graph.nodes["migrations", "0001_initial"], False), (executor.loader.graph.nodes["migrations", "0002_second"], False), ], ) # Were the tables there before? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_book") # Alright, let's try running it executor.migrate([("migrations", "0002_second")]) # Are the tables there now? self.assertTableExists("migrations_author") self.assertTableExists("migrations_book") # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Alright, let's undo what we did plan = executor.migration_plan([("migrations", None)]) self.assertEqual( plan, [ (executor.loader.graph.nodes["migrations", "0002_second"], True), (executor.loader.graph.nodes["migrations", "0001_initial"], True), ], ) executor.migrate([("migrations", None)]) # Are the tables gone? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_book") @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}) def test_run_with_squashed(self): """ Tests running a squashed migration from zero (should ignore what it replaces) """ executor = MigrationExecutor(connection) # Check our leaf node is the squashed one leaves = [key for key in executor.loader.graph.leaf_nodes() if key[0] == "migrations"] self.assertEqual(leaves, [("migrations", "0001_squashed_0002")]) # Check the plan plan = executor.migration_plan([("migrations", "0001_squashed_0002")]) self.assertEqual( plan, [ (executor.loader.graph.nodes["migrations", "0001_squashed_0002"], False), ], ) # Were the tables there before? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_book") # Alright, let's try running it executor.migrate([("migrations", "0001_squashed_0002")]) # Are the tables there now? self.assertTableExists("migrations_author") self.assertTableExists("migrations_book") # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Alright, let's undo what we did. Should also just use squashed. plan = executor.migration_plan([("migrations", None)]) self.assertEqual( plan, [ (executor.loader.graph.nodes["migrations", "0001_squashed_0002"], True), ], ) executor.migrate([("migrations", None)]) # Are the tables gone? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_book") @override_settings(MIGRATION_MODULES={ "migrations": "migrations.test_migrations", "migrations2": "migrations2.test_migrations_2", }) def test_empty_plan(self): """ Tests that re-planning a full migration of a fully-migrated set doesn't perform spurious unmigrations and remigrations. There was previously a bug where the executor just always performed the backwards plan for applied migrations - which even for the most recent migration in an app, might include other, dependent apps, and these were being unmigrated. """ # Make the initial plan, check it executor = MigrationExecutor(connection) plan = executor.migration_plan([ ("migrations", "0002_second"), ("migrations2", "0001_initial"), ]) self.assertEqual( plan, [ (executor.loader.graph.nodes["migrations", "0001_initial"], False), (executor.loader.graph.nodes["migrations", "0002_second"], False), (executor.loader.graph.nodes["migrations2", "0001_initial"], False), ], ) # Fake-apply all migrations executor.migrate([ ("migrations", "0002_second"), ("migrations2", "0001_initial") ], fake=True) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Now plan a second time and make sure it's empty plan = executor.migration_plan([ ("migrations", "0002_second"), ("migrations2", "0001_initial"), ]) self.assertEqual(plan, []) # Erase all the fake records executor.recorder.record_unapplied("migrations2", "0001_initial") executor.recorder.record_unapplied("migrations", "0002_second") executor.recorder.record_unapplied("migrations", "0001_initial") @override_settings(MIGRATION_MODULES={ "migrations": "migrations.test_migrations", "migrations2": "migrations2.test_migrations_2_no_deps", }) def test_mixed_plan_not_supported(self): """ Although the MigrationExecutor interfaces allows for mixed migration plans (combined forwards and backwards migrations) this is not supported. """ # Prepare for mixed plan executor = MigrationExecutor(connection) plan = executor.migration_plan([("migrations", "0002_second")]) self.assertEqual( plan, [ (executor.loader.graph.nodes["migrations", "0001_initial"], False), (executor.loader.graph.nodes["migrations", "0002_second"], False), ], ) executor.migrate(None, plan) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() self.assertIn(('migrations', '0001_initial'), executor.loader.applied_migrations) self.assertIn(('migrations', '0002_second'), executor.loader.applied_migrations) self.assertNotIn(('migrations2', '0001_initial'), executor.loader.applied_migrations) # Generate mixed plan plan = executor.migration_plan([ ("migrations", None), ("migrations2", "0001_initial"), ]) msg = ( 'Migration plans with both forwards and backwards migrations are ' 'not supported. Please split your migration process into separate ' 'plans of only forwards OR backwards migrations.' ) with self.assertRaisesMessage(InvalidMigrationPlan, msg) as cm: executor.migrate(None, plan) self.assertEqual( cm.exception.args[1], [ (executor.loader.graph.nodes["migrations", "0002_second"], True), (executor.loader.graph.nodes["migrations", "0001_initial"], True), (executor.loader.graph.nodes["migrations2", "0001_initial"], False), ], ) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() executor.migrate([ ("migrations", None), ("migrations2", None), ]) # Are the tables gone? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_book") self.assertTableNotExists("migrations2_otherauthor") @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_soft_apply(self): """ Tests detection of initial migrations already having been applied. """ state = {"faked": None} def fake_storer(phase, migration=None, fake=None): state["faked"] = fake executor = MigrationExecutor(connection, progress_callback=fake_storer) # Were the tables there before? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") # Run it normally self.assertEqual( executor.migration_plan([("migrations", "0001_initial")]), [ (executor.loader.graph.nodes["migrations", "0001_initial"], False), ], ) executor.migrate([("migrations", "0001_initial")]) # Are the tables there now? self.assertTableExists("migrations_author") self.assertTableExists("migrations_tribble") # We shouldn't have faked that one self.assertEqual(state["faked"], False) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Fake-reverse that executor.migrate([("migrations", None)], fake=True) # Are the tables still there? self.assertTableExists("migrations_author") self.assertTableExists("migrations_tribble") # Make sure that was faked self.assertEqual(state["faked"], True) # Finally, migrate forwards; this should fake-apply our initial migration executor.loader.build_graph() self.assertEqual( executor.migration_plan([("migrations", "0001_initial")]), [ (executor.loader.graph.nodes["migrations", "0001_initial"], False), ], ) # Applying the migration should raise a database level error # because we haven't given the --fake-initial option with self.assertRaises(DatabaseError): executor.migrate([("migrations", "0001_initial")]) # Reset the faked state state = {"faked": None} # Allow faking of initial CreateModel operations executor.migrate([("migrations", "0001_initial")], fake_initial=True) self.assertEqual(state["faked"], True) # And migrate back to clean up the database executor.loader.build_graph() executor.migrate([("migrations", None)]) self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") @override_settings( MIGRATION_MODULES={ "migrations": "migrations.test_migrations_custom_user", "django.contrib.auth": "django.contrib.auth.migrations", }, AUTH_USER_MODEL="migrations.Author", ) def test_custom_user(self): """ Regression test for #22325 - references to a custom user model defined in the same app are not resolved correctly. """ executor = MigrationExecutor(connection) self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") # Migrate forwards executor.migrate([("migrations", "0001_initial")]) self.assertTableExists("migrations_author") self.assertTableExists("migrations_tribble") # Make sure the soft-application detection works (#23093) # Change table_names to not return auth_user during this as # it wouldn't be there in a normal run, and ensure migrations.Author # exists in the global app registry temporarily. old_table_names = connection.introspection.table_names connection.introspection.table_names = lambda c: [x for x in old_table_names(c) if x != "auth_user"] migrations_apps = executor.loader.project_state(("migrations", "0001_initial")).apps global_apps.get_app_config("migrations").models["author"] = migrations_apps.get_model("migrations", "author") try: migration = executor.loader.get_migration("auth", "0001_initial") self.assertEqual(executor.detect_soft_applied(None, migration)[0], True) finally: connection.introspection.table_names = old_table_names del global_apps.get_app_config("migrations").models["author"] # And migrate back to clean up the database executor.loader.build_graph() executor.migrate([("migrations", None)]) self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.lookuperror_a", "migrations.migrations_test_apps.lookuperror_b", "migrations.migrations_test_apps.lookuperror_c" ] ) def test_unrelated_model_lookups_forwards(self): """ #24123 - Tests that all models of apps already applied which are unrelated to the first app being applied are part of the initial model state. """ try: executor = MigrationExecutor(connection) self.assertTableNotExists("lookuperror_a_a1") self.assertTableNotExists("lookuperror_b_b1") self.assertTableNotExists("lookuperror_c_c1") executor.migrate([("lookuperror_b", "0003_b3")]) self.assertTableExists("lookuperror_b_b3") # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Migrate forwards -- This led to a lookup LookupErrors because # lookuperror_b.B2 is already applied executor.migrate([ ("lookuperror_a", "0004_a4"), ("lookuperror_c", "0003_c3"), ]) self.assertTableExists("lookuperror_a_a4") self.assertTableExists("lookuperror_c_c3") # Rebuild the graph to reflect the new DB state executor.loader.build_graph() finally: # Cleanup executor.migrate([ ("lookuperror_a", None), ("lookuperror_b", None), ("lookuperror_c", None), ]) self.assertTableNotExists("lookuperror_a_a1") self.assertTableNotExists("lookuperror_b_b1") self.assertTableNotExists("lookuperror_c_c1") @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.lookuperror_a", "migrations.migrations_test_apps.lookuperror_b", "migrations.migrations_test_apps.lookuperror_c" ] ) def test_unrelated_model_lookups_backwards(self): """ #24123 - Tests that all models of apps being unapplied which are unrelated to the first app being unapplied are part of the initial model state. """ try: executor = MigrationExecutor(connection) self.assertTableNotExists("lookuperror_a_a1") self.assertTableNotExists("lookuperror_b_b1") self.assertTableNotExists("lookuperror_c_c1") executor.migrate([ ("lookuperror_a", "0004_a4"), ("lookuperror_b", "0003_b3"), ("lookuperror_c", "0003_c3"), ]) self.assertTableExists("lookuperror_b_b3") self.assertTableExists("lookuperror_a_a4") self.assertTableExists("lookuperror_c_c3") # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Migrate backwards -- This led to a lookup LookupErrors because # lookuperror_b.B2 is not in the initial state (unrelated to app c) executor.migrate([("lookuperror_a", None)]) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() finally: # Cleanup executor.migrate([ ("lookuperror_b", None), ("lookuperror_c", None) ]) self.assertTableNotExists("lookuperror_a_a1") self.assertTableNotExists("lookuperror_b_b1") self.assertTableNotExists("lookuperror_c_c1") @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_process_callback(self): """ #24129 - Tests callback process """ call_args_list = [] def callback(*args): call_args_list.append(args) executor = MigrationExecutor(connection, progress_callback=callback) # Were the tables there before? self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") executor.migrate([ ("migrations", "0001_initial"), ("migrations", "0002_second"), ]) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() executor.migrate([ ("migrations", None), ("migrations", None), ]) self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") migrations = executor.loader.graph.nodes expected = [ ("render_start", ), ("render_success", ), ("apply_start", migrations['migrations', '0001_initial'], False), ("apply_success", migrations['migrations', '0001_initial'], False), ("apply_start", migrations['migrations', '0002_second'], False), ("apply_success", migrations['migrations', '0002_second'], False), ("render_start", ), ("render_success", ), ("unapply_start", migrations['migrations', '0002_second'], False), ("unapply_success", migrations['migrations', '0002_second'], False), ("unapply_start", migrations['migrations', '0001_initial'], False), ("unapply_success", migrations['migrations', '0001_initial'], False), ] self.assertEqual(call_args_list, expected) @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.alter_fk.author_app", "migrations.migrations_test_apps.alter_fk.book_app", ] ) def test_alter_id_type_with_fk(self): try: executor = MigrationExecutor(connection) self.assertTableNotExists("author_app_author") self.assertTableNotExists("book_app_book") # Apply initial migrations executor.migrate([ ("author_app", "0001_initial"), ("book_app", "0001_initial"), ]) self.assertTableExists("author_app_author") self.assertTableExists("book_app_book") # Rebuild the graph to reflect the new DB state executor.loader.build_graph() # Apply PK type alteration executor.migrate([("author_app", "0002_alter_id")]) # Rebuild the graph to reflect the new DB state executor.loader.build_graph() finally: # We can't simply unapply the migrations here because there is no # implicit cast from VARCHAR to INT on the database level. with connection.schema_editor() as editor: editor.execute(editor.sql_delete_table % {"table": "book_app_book"}) editor.execute(editor.sql_delete_table % {"table": "author_app_author"}) self.assertTableNotExists("author_app_author") self.assertTableNotExists("book_app_book") @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}) def test_apply_all_replaced_marks_replacement_as_applied(self): """ Applying all replaced migrations marks replacement as applied (#24628). """ recorder = MigrationRecorder(connection) # Place the database in a state where the replaced migrations are # partially applied: 0001 is applied, 0002 is not. recorder.record_applied("migrations", "0001_initial") executor = MigrationExecutor(connection) # Use fake because we don't actually have the first migration # applied, so the second will fail. And there's no need to actually # create/modify tables here, we're just testing the # MigrationRecord, which works the same with or without fake. executor.migrate([("migrations", "0002_second")], fake=True) # Because we've now applied 0001 and 0002 both, their squashed # replacement should be marked as applied. self.assertIn( ("migrations", "0001_squashed_0002"), recorder.applied_migrations(), ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}) def test_migrate_marks_replacement_applied_even_if_it_did_nothing(self): """ A new squash migration will be marked as applied even if all its replaced migrations were previously already applied (#24628). """ recorder = MigrationRecorder(connection) # Record all replaced migrations as applied recorder.record_applied("migrations", "0001_initial") recorder.record_applied("migrations", "0002_second") executor = MigrationExecutor(connection) executor.migrate([("migrations", "0001_squashed_0002")]) # Because 0001 and 0002 are both applied, even though this migrate run # didn't apply anything new, their squashed replacement should be # marked as applied. self.assertIn( ("migrations", "0001_squashed_0002"), recorder.applied_migrations(), ) class FakeLoader(object): def __init__(self, graph, applied): self.graph = graph self.applied_migrations = applied class FakeMigration(object): """Really all we need is any object with a debug-useful repr.""" def __init__(self, name): self.name = name def __repr__(self): return 'M<%s>' % self.name class ExecutorUnitTests(TestCase): """(More) isolated unit tests for executor methods.""" def test_minimize_rollbacks(self): """ Minimize unnecessary rollbacks in connected apps. When you say "./manage.py migrate appA 0001", rather than migrating to just after appA-0001 in the linearized migration plan (which could roll back migrations in other apps that depend on appA 0001, but don't need to be rolled back since we're not rolling back appA 0001), we migrate to just before appA-0002. """ a1_impl = FakeMigration('a1') a1 = ('a', '1') a2_impl = FakeMigration('a2') a2 = ('a', '2') b1_impl = FakeMigration('b1') b1 = ('b', '1') graph = MigrationGraph() graph.add_node(a1, a1_impl) graph.add_node(a2, a2_impl) graph.add_node(b1, b1_impl) graph.add_dependency(None, b1, a1) graph.add_dependency(None, a2, a1) executor = MigrationExecutor(None) executor.loader = FakeLoader(graph, {a1, b1, a2}) plan = executor.migration_plan({a1}) self.assertEqual(plan, [(a2_impl, True)]) def test_minimize_rollbacks_branchy(self): """ Minimize rollbacks when target has multiple in-app children. a: 1 <---- 3 <--\ \ \- 2 <--- 4 \ \ b: \- 1 <--- 2 """ a1_impl = FakeMigration('a1') a1 = ('a', '1') a2_impl = FakeMigration('a2') a2 = ('a', '2') a3_impl = FakeMigration('a3') a3 = ('a', '3') a4_impl = FakeMigration('a4') a4 = ('a', '4') b1_impl = FakeMigration('b1') b1 = ('b', '1') b2_impl = FakeMigration('b2') b2 = ('b', '2') graph = MigrationGraph() graph.add_node(a1, a1_impl) graph.add_node(a2, a2_impl) graph.add_node(a3, a3_impl) graph.add_node(a4, a4_impl) graph.add_node(b1, b1_impl) graph.add_node(b2, b2_impl) graph.add_dependency(None, a2, a1) graph.add_dependency(None, a3, a1) graph.add_dependency(None, a4, a2) graph.add_dependency(None, a4, a3) graph.add_dependency(None, b2, b1) graph.add_dependency(None, b1, a1) graph.add_dependency(None, b2, a2) executor = MigrationExecutor(None) executor.loader = FakeLoader(graph, {a1, b1, a2, b2, a3, a4}) plan = executor.migration_plan({a1}) should_be_rolled_back = [b2_impl, a4_impl, a2_impl, a3_impl] exp = [(m, True) for m in should_be_rolled_back] self.assertEqual(plan, exp) def test_backwards_nothing_to_do(self): """ If the current state satisfies the given target, do nothing. a: 1 <--- 2 b: \- 1 c: \- 1 If a1 is applied already and a2 is not, and we're asked to migrate to a1, don't apply or unapply b1 or c1, regardless of their current state. """ a1_impl = FakeMigration('a1') a1 = ('a', '1') a2_impl = FakeMigration('a2') a2 = ('a', '2') b1_impl = FakeMigration('b1') b1 = ('b', '1') c1_impl = FakeMigration('c1') c1 = ('c', '1') graph = MigrationGraph() graph.add_node(a1, a1_impl) graph.add_node(a2, a2_impl) graph.add_node(b1, b1_impl) graph.add_node(c1, c1_impl) graph.add_dependency(None, a2, a1) graph.add_dependency(None, b1, a1) graph.add_dependency(None, c1, a1) executor = MigrationExecutor(None) executor.loader = FakeLoader(graph, {a1, b1}) plan = executor.migration_plan({a1}) self.assertEqual(plan, [])
BartoszCichecki/onlinepython
refs/heads/master
onlinepython/pypy-2.4.0-win32/lib-python/2.7/plat-mac/lib-scriptpackages/SystemEvents/Disk_Folder_File_Suite.py
81
"""Suite Disk-Folder-File Suite: Terms and Events for controlling Disks, Folders, and Files Level 1, version 1 Generated from /System/Library/CoreServices/System Events.app AETE/AEUT resource version 1/0, language 0, script 0 """ import aetools import MacOS _code = 'cdis' class Disk_Folder_File_Suite_Events: _argmap_move = { 'to' : 'insh', } def move(self, _object, _attributes={}, **_arguments): """move: Move disk item(s) to a new location. Required argument: the object for the command Keyword argument to: The new location for the disk item(s). Keyword argument _attributes: AppleEvent attribute dictionary Returns: the reply for the command """ _code = 'core' _subcode = 'move' aetools.keysubst(_arguments, self._argmap_move) _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.get('errn', 0): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] class application(aetools.ComponentItem): """application - The Disk-Folder-File Suite host program """ want = 'capp' class _Prop__3c_Inheritance_3e_(aetools.NProperty): """<Inheritance> - All of the properties of the superclass. """ which = 'c@#^' want = 'capp' _3c_Inheritance_3e_ = _Prop__3c_Inheritance_3e_() class _Prop_folder_actions_enabled(aetools.NProperty): """folder actions enabled - Are Folder Actions currently being processed? """ which = 'faen' want = 'bool' folder_actions_enabled = _Prop_folder_actions_enabled() class _Prop_properties(aetools.NProperty): """properties - every property of the Disk-Folder-File Suite host program """ which = 'pALL' want = '****' properties = _Prop_properties() # element 'cdis' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'cfol' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'cobj' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'cwin' as ['name', 'indx', 'rele', 'rang', 'test', 'ID '] # element 'docu' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'file' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'foac' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'logi' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'pcap' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'pcda' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'prcs' as ['name', 'indx', 'rele', 'rang', 'test'] applications = application class disk(aetools.ComponentItem): """disk - A disk in the file system """ want = 'cdis' class _Prop_POSIX_path(aetools.NProperty): """POSIX path - the POSIX file system path of the disk """ which = 'posx' want = 'utxt' class _Prop_capacity(aetools.NProperty): """capacity - the total number of bytes (free or used) on the disk """ which = 'capa' want = 'magn' class _Prop_ejectable(aetools.NProperty): """ejectable - Can the media be ejected (floppies, CD's, and so on)? """ which = 'isej' want = 'bool' class _Prop_format(aetools.NProperty): """format - the file system format of this disk """ which = 'dfmt' want = 'edfm' class _Prop_free_space(aetools.NProperty): """free space - the number of free bytes left on the disk """ which = 'frsp' want = 'magn' class _Prop_ignore_privileges(aetools.NProperty): """ignore privileges - Ignore permissions on this disk? """ which = 'igpr' want = 'bool' class _Prop_local_volume(aetools.NProperty): """local volume - Is the media a local volume (as opposed to a file server? """ which = 'isrv' want = 'bool' class _Prop_name(aetools.NProperty): """name - the name of the disk """ which = 'pnam' want = 'utxt' class _Prop_path(aetools.NProperty): """path - the file system path of the disk """ which = 'ppth' want = 'utxt' class _Prop_startup(aetools.NProperty): """startup - Is this disk the boot disk? """ which = 'istd' want = 'bool' class _Prop_volume(aetools.NProperty): """volume - the volume on which the folder resides """ which = 'volu' want = 'utxt' # element 'cfol' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'cobj' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'file' as ['name', 'indx', 'rele', 'rang', 'test'] disks = disk class folder(aetools.ComponentItem): """folder - A folder in the file system """ want = 'cfol' # element 'cfol' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'cfol' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'cobj' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'cobj' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'file' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'file' as ['name', 'indx', 'rele', 'rang', 'test'] folders = folder class item(aetools.ComponentItem): """item - An item in the file system """ want = 'cobj' class _Prop_busy_status(aetools.NProperty): """busy status - Is the item busy? """ which = 'busy' want = 'bool' class _Prop_creation_date(aetools.NProperty): """creation date - the date on which the item was created """ which = 'ascd' want = '****' class _Prop_modification_date(aetools.NProperty): """modification date - the date on which the item was last modified """ which = 'asmo' want = '****' class _Prop_name_extension(aetools.NProperty): """name extension - the extension portion of the name """ which = 'extn' want = 'utxt' class _Prop_package_folder(aetools.NProperty): """package folder - Is the item a package? """ which = 'pkgf' want = 'bool' class _Prop_url(aetools.NProperty): """url - the url of the item """ which = 'url ' want = 'utxt' class _Prop_visible(aetools.NProperty): """visible - Is the item visible? """ which = 'visi' want = 'bool' # element 'cfol' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'cobj' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'file' as ['name', 'indx', 'rele', 'rang', 'test'] items = item class file(aetools.ComponentItem): """file - A file in the file system """ want = 'file' class _Prop_creator_type(aetools.NProperty): """creator type - the OSType identifying the application that created the item """ which = 'fcrt' want = 'utxt' class _Prop_file_type(aetools.NProperty): """file type - the OSType identifying the type of data contained in the item """ which = 'asty' want = 'utxt' class _Prop_physical_size(aetools.NProperty): """physical size - the actual space used by the file on disk """ which = 'phys' want = 'magn' class _Prop_product_version(aetools.NProperty): """product version - the version of the product (visible at the top of the ?et Info?window) """ which = 'ver2' want = 'utxt' class _Prop_size(aetools.NProperty): """size - the logical size of the file """ which = 'ptsz' want = 'magn' class _Prop_stationery(aetools.NProperty): """stationery - Is the file a stationery pad? """ which = 'pspd' want = 'bool' class _Prop_version(aetools.NProperty): """version - the version of the file (visible at the bottom of the ?et Info?window) """ which = 'vers' want = 'utxt' # element 'cfol' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'cobj' as ['name', 'indx', 'rele', 'rang', 'test'] # element 'file' as ['name', 'indx', 'rele', 'rang', 'test'] files = file application._superclassnames = [] import Standard_Suite import Folder_Actions_Suite import Login_Items_Suite import Processes_Suite application._privpropdict = { '_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_, 'folder_actions_enabled' : _Prop_folder_actions_enabled, 'properties' : _Prop_properties, } application._privelemdict = { 'application_process' : Processes_Suite.application_process, 'desk_accessory_process' : Processes_Suite.desk_accessory_process, 'disk' : disk, 'document' : Standard_Suite.document, 'file' : file, 'folder' : folder, 'folder_action' : Folder_Actions_Suite.folder_action, 'item' : item, 'login_item' : Login_Items_Suite.login_item, 'process' : Processes_Suite.process, 'window' : Standard_Suite.window, } disk._superclassnames = ['item'] disk._privpropdict = { 'POSIX_path' : _Prop_POSIX_path, '_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_, 'capacity' : _Prop_capacity, 'ejectable' : _Prop_ejectable, 'format' : _Prop_format, 'free_space' : _Prop_free_space, 'ignore_privileges' : _Prop_ignore_privileges, 'local_volume' : _Prop_local_volume, 'name' : _Prop_name, 'path' : _Prop_path, 'properties' : _Prop_properties, 'startup' : _Prop_startup, 'volume' : _Prop_volume, } disk._privelemdict = { 'file' : file, 'folder' : folder, 'item' : item, } folder._superclassnames = ['item'] folder._privpropdict = { 'POSIX_path' : _Prop_POSIX_path, '_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_, 'name' : _Prop_name, 'path' : _Prop_path, 'properties' : _Prop_properties, 'volume' : _Prop_volume, } folder._privelemdict = { 'file' : file, 'file' : file, 'folder' : folder, 'folder' : folder, 'item' : item, 'item' : item, } item._superclassnames = [] item._privpropdict = { 'POSIX_path' : _Prop_POSIX_path, '_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_, 'busy_status' : _Prop_busy_status, 'creation_date' : _Prop_creation_date, 'modification_date' : _Prop_modification_date, 'name' : _Prop_name, 'name_extension' : _Prop_name_extension, 'package_folder' : _Prop_package_folder, 'path' : _Prop_path, 'properties' : _Prop_properties, 'url' : _Prop_url, 'visible' : _Prop_visible, 'volume' : _Prop_volume, } item._privelemdict = { 'file' : file, 'folder' : folder, 'item' : item, } file._superclassnames = ['item'] file._privpropdict = { 'POSIX_path' : _Prop_POSIX_path, '_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_, 'creator_type' : _Prop_creator_type, 'file_type' : _Prop_file_type, 'name' : _Prop_name, 'path' : _Prop_path, 'physical_size' : _Prop_physical_size, 'product_version' : _Prop_product_version, 'properties' : _Prop_properties, 'size' : _Prop_size, 'stationery' : _Prop_stationery, 'version' : _Prop_version, 'volume' : _Prop_volume, } file._privelemdict = { 'file' : file, 'folder' : folder, 'item' : item, } _Enum_edfm = { 'MS_2d_DOS_format' : 'dfms', # MS-DOS format 'Apple_Photo_format' : 'dfph', # Apple Photo format 'ISO_9660_format' : 'df96', # ISO 9660 format 'QuickTake_format' : 'dfqt', # QuickTake format 'AppleShare_format' : 'dfas', # AppleShare format 'High_Sierra_format' : 'dfhs', # High Sierra format 'Mac_OS_Extended_format' : 'dfh+', # Mac OS Extended format 'UDF_format' : 'dfud', # UDF format 'unknown_format' : 'df??', # unknown format 'audio_format' : 'dfau', # audio format 'Mac_OS_format' : 'dfhf', # Mac OS format 'UFS_format' : 'dfuf', # UFS format 'NFS_format' : 'dfnf', # NFS format 'ProDOS_format' : 'dfpr', # ProDOS format 'WebDAV_format' : 'dfwd', # WebDAV format } # # Indices of types declared in this module # _classdeclarations = { 'capp' : application, 'cdis' : disk, 'cfol' : folder, 'cobj' : item, 'file' : file, } _propdeclarations = { 'ascd' : _Prop_creation_date, 'asmo' : _Prop_modification_date, 'asty' : _Prop_file_type, 'busy' : _Prop_busy_status, 'c@#^' : _Prop__3c_Inheritance_3e_, 'capa' : _Prop_capacity, 'dfmt' : _Prop_format, 'extn' : _Prop_name_extension, 'faen' : _Prop_folder_actions_enabled, 'fcrt' : _Prop_creator_type, 'frsp' : _Prop_free_space, 'igpr' : _Prop_ignore_privileges, 'isej' : _Prop_ejectable, 'isrv' : _Prop_local_volume, 'istd' : _Prop_startup, 'pALL' : _Prop_properties, 'phys' : _Prop_physical_size, 'pkgf' : _Prop_package_folder, 'pnam' : _Prop_name, 'posx' : _Prop_POSIX_path, 'ppth' : _Prop_path, 'pspd' : _Prop_stationery, 'ptsz' : _Prop_size, 'url ' : _Prop_url, 'ver2' : _Prop_product_version, 'vers' : _Prop_version, 'visi' : _Prop_visible, 'volu' : _Prop_volume, } _compdeclarations = { } _enumdeclarations = { 'edfm' : _Enum_edfm, }
andimarafioti/intercomunicador
refs/heads/master
helpers/worker/worker.py
1
import threading __author__ = 'Dev6' ''' This class simplifies thread usage. Examples: 1 - Worker.call(aFunction).withArgs(arg1, arg2..argN).start() / Runs a normal thread starting at aFunction 2 - Worker.call(aFunction).withArgs(arg1, arg2..argN).asDaemon.start() / Same as before, but uses a daemon thread 3 - Worker.call(aFunction).withArgs(arg1, arg2..argN).every(T).asDaemon.start() / Runs a thread every T seconds 4 - Worker.call(aFunction).withArgs(arg1, arg2..argN).after(T).asDaemon.start() / Runs a thread after T seconds NOTE: The 'call' method should be called first ALWAYS!! CronicWorker - Calling the 'every(seconds)' function returns a CronicWorker with the original Worker attributes. DeferredWorker - Calling the 'after(seconds)' function returns a DeferredWorker with the original Worker attributes. NOTE: Calling 'start()' more than once on a DeferredWorker will try to 'cancel()' the first thread before launching a new one ''' class Worker(object): def __init__(self): self._thread = None self._isDaemon = False self._function = None self._callback = lambda: None self._arguments = () @staticmethod def call(function): worker = Worker() worker._function = function return worker def withArgs(self, *args): self._arguments = args return self def withCallback(self, callback): self._callback = callback return self @property def asDaemon(self): self._isDaemon = True return self def start(self): this = self callback = self._callback def callFunctionAndThenCallback(*args): this._function(*args) callback() self._thread = threading.Thread(target=callFunctionAndThenCallback, args=self._arguments) self._thread.daemon = self._isDaemon self._thread.start() return self def isWorking(self): return self._thread.isAlive() if self._thread else False def join(self): if self.isWorking(): self._thread.join() def every(self, seconds): from helpers.worker.cronicWorker import CronicWorker cronicWorker = CronicWorker.fromWorker(self) cronicWorker._repeatInterval = seconds return cronicWorker def after(self, seconds): from helpers.worker.deferredWorker import DeferredWorker deferredWorker = DeferredWorker.fromWorker(self) deferredWorker._waitTime = seconds return deferredWorker def _reset(self): self._thread = None self._isDaemon = False self._function = None self._callback = lambda: None self._arguments = ()
vitan/hue
refs/heads/master
desktop/core/ext-py/Paste-1.7.2/paste/auth/grantip.py
28
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php """ Grant roles and logins based on IP address. """ from paste.util import ip4 class GrantIPMiddleware(object): """ On each request, ``ip_map`` is checked against ``REMOTE_ADDR`` and logins and roles are assigned based on that. ``ip_map`` is a map of {ip_mask: (username, roles)}. Either ``username`` or ``roles`` may be None. Roles may also be prefixed with ``-``, like ``'-system'`` meaning that role should be revoked. ``'__remove__'`` for a username will remove the username. If ``clobber_username`` is true (default) then any user specification will override the current value of ``REMOTE_USER``. ``'__remove__'`` will always clobber the username. ``ip_mask`` is something that `paste.util.ip4:IP4Range <class-paste.util.ip4.IP4Range.html>`_ can parse. Simple IP addresses, IP/mask, ip<->ip ranges, and hostnames are allowed. """ def __init__(self, app, ip_map, clobber_username=True): self.app = app self.ip_map = [] for key, value in ip_map.items(): self.ip_map.append((ip4.IP4Range(key), self._convert_user_role(value[0], value[1]))) self.clobber_username = clobber_username def _convert_user_role(self, username, roles): if roles and isinstance(roles, basestring): roles = roles.split(',') return (username, roles) def __call__(self, environ, start_response): addr = ip4.ip2int(environ['REMOTE_ADDR'], False) remove_user = False add_roles = [] for range, (username, roles) in self.ip_map: if addr in range: if roles: add_roles.extend(roles) if username == '__remove__': remove_user = True elif username: if (not environ.get('REMOTE_USER') or self.clobber_username): environ['REMOTE_USER'] = username if (remove_user and 'REMOTE_USER' in environ): del environ['REMOTE_USER'] if roles: self._set_roles(environ, add_roles) return self.app(environ, start_response) def _set_roles(self, environ, roles): cur_roles = environ.get('REMOTE_USER_TOKENS', '').split(',') # Get rid of empty roles: cur_roles = filter(None, cur_roles) remove_roles = [] for role in roles: if role.startswith('-'): remove_roles.append(role[1:]) else: if role not in cur_roles: cur_roles.append(role) for role in remove_roles: if role in cur_roles: cur_roles.remove(role) environ['REMOTE_USER_TOKENS'] = ','.join(cur_roles) def make_grantip(app, global_conf, clobber_username=False, **kw): """ Grant roles or usernames based on IP addresses. Config looks like this:: [filter:grant] use = egg:Paste#grantip clobber_username = true # Give localhost system role (no username): 127.0.0.1 = -:system # Give everyone in 192.168.0.* editor role: 192.168.0.0/24 = -:editor # Give one IP the username joe: 192.168.0.7 = joe # And one IP is should not be logged in: 192.168.0.10 = __remove__:-editor """ from paste.deploy.converters import asbool clobber_username = asbool(clobber_username) ip_map = {} for key, value in kw.items(): if ':' in value: username, role = value.split(':', 1) else: username = value role = '' if username == '-': username = '' if role == '-': role = '' ip_map[key] = value return GrantIPMiddleware(app, ip_map, clobber_username)
silviolima/EstudoAppengine
refs/heads/master
tekton/tekton-master/test/web_test/home.py
2
__author__ = 'renzo' def index(): pass
p4datasystems/CarnotKE
refs/heads/master
jyhton/out/production/jyhton/support.py
13
import sys is_jython = sys.platform[:4] == "java" import re, exceptions, thread, os, shutil import support_config as cfg if is_jython: import jarray from java.io import FileInputStream from java.io import FileOutputStream from java.util.jar import JarEntry from java.util.jar import JarFile from java.util.jar import JarInputStream from java.util.jar import JarOutputStream from java.util.jar import Manifest UNIX = os.pathsep == ":" WIN = os.pathsep == ";" test_jythonc = 1 if not UNIX ^ WIN: raise TestError("Unknown platform") class TestError(exceptions.Exception): def __init__(self, args): exceptions.Exception.__init__(self, args) class TestWarning(exceptions.Exception): def __init__(self, args): exceptions.Exception.__init__(self, args) class TestSkip(exceptions.Exception): def __init__(self, args): exceptions.Exception.__init__(self, args) def compare(s, pattern): m = re.search(pattern, str(s)) if m is None: raise TestError("string compare error\n '" + str(s) + "'\n '" + pattern + "'") def StreamReader(instream, outstream): while 1: ch = instream.read() if ch == -1: break outstream.write(ch) def execCmd(cmd, kw): __doc__ = """execute a command, and wait for its results returns 0 if everything was ok raises a TestError if the command did not end normally""" if kw.has_key("verbose") and kw["verbose"]: print cmd import java r = java.lang.Runtime.getRuntime() e = getattr(r, "exec") p = e(cmd) if kw.has_key("output"): outstream = java.io.FileOutputStream(kw['output']) else: outstream = java.lang.System.out if kw.has_key("error"): errstream = java.io.FileOutputStream(kw['error']) else: errstream = java.lang.System.out thread.start_new_thread(StreamReader, (p.inputStream, outstream)) thread.start_new_thread(StreamReader, (p.errorStream, errstream)) ret = p.waitFor() if ret != 0 and not kw.has_key("expectError"): raise TestError, "%s failed with %d" % (cmd, ret) return ret def compileJava(src, **kw): classfile = src.replace('.java', '.class') if not 'force' in kw and os.path.exists(classfile) and os.stat(src).st_mtime < os.stat(classfile).st_mtime: return 0 classpath = cfg.classpath if "classpath" in kw: classpath = os.pathsep.join([cfg.classpath, kw["classpath"]]) if UNIX: cmd = "%s/bin/javac -classpath %s %s" % (cfg.java_home, classpath, src) elif WIN: src = src.replace("/", "\\") cmd = 'cmd /C "%s/bin/javac.exe -classpath %s %s"' % (cfg.java_home, classpath, src) return execCmd(cmd, kw) def runJava(cls, **kw): classpath = cfg.classpath if "classpath" in kw: classpath = os.pathsep.join([cfg.classpath, kw["classpath"]]) if kw.get('pass_jython_home', 0): defs = "-Dpython.home=%s" % cfg.jython_home else: defs = '' if UNIX: cmd = ['/bin/sh', '-c', "%s/bin/java -classpath %s %s %s" % (cfg.java_home, classpath, defs, cls)] elif WIN: cmd = 'cmd /C "%s/bin/java.exe -classpath %s %s %s"' % (cfg.java_home, classpath, defs, cls) return execCmd(cmd, kw) def runJavaJar(jar, *args, **kw): argString = " ".join(args) if UNIX: cmd = ['/bin/sh', '-c', "%s/bin/java -jar %s %s" % (cfg.java_home, jar, argString)] elif WIN: cmd = 'cmd /C "%s/bin/java.exe -jar %s %s"' % (cfg.java_home, jar, argString) return execCmd(cmd, kw) def runJython(cls, **kw): javaargs = '' if 'javaargs' in kw: javaargs = kw['javaargs'] classpath = cfg.classpath if "classpath" in kw: classpath = os.pathsep.join([cfg.classpath, kw["classpath"]]) if UNIX: cmd = "%s/bin/java -classpath %s %s -Dpython.home=%s org.python.util.jython %s" % (cfg.java_home, classpath, javaargs, cfg.jython_home, cls) elif WIN: cmd = 'cmd /C "%s/bin/java.exe -classpath %s %s -Dpython.home=%s org.python.util.jython %s"' % (cfg.java_home, classpath, javaargs, cfg.jython_home, cls) return execCmd(cmd, kw) def compileJPythonc(*files, **kw): if not test_jythonc: raise TestSkip('Skipping pythonc') if os.path.isdir("jpywork") and not kw.has_key("keep"): shutil.rmtree("jpywork", 1) cmd = "-i " if kw.has_key("core"): cmd = cmd + "--core " if kw.has_key("deep"): cmd = cmd + "--deep " if kw.has_key("all"): cmd = cmd + "--all " if kw.has_key("package"): cmd = cmd + "--package %s " % kw['package'] if kw.has_key("addpackages"): cmd = cmd + "--addpackages %s " % kw['addpackages'] if kw.has_key("jar"): cmd = cmd + "--jar %s " % kw['jar'] if os.path.isfile(kw['jar']): os.remove(kw['jar']) cmd = cmd + " ".join(files) classpath = cfg.classpath if "classpath" in kw: classpath = os.pathsep.join([cfg.classpath, kw["classpath"]]) jythonc = "%s/Tools/jythonc/jythonc.py %s" % (cfg.jython_home, cmd) if UNIX: cmd = "%s/bin/java -classpath %s -Dpython.home=%s org.python.util.jython %s" % (cfg.java_home, classpath, cfg.jython_home, jythonc) elif WIN: cmd = 'cmd /C "%s/bin/java.exe -classpath \"%s\" -Dpython.home=%s org.python.util.jython %s"' % (cfg.java_home, classpath, cfg.jython_home, jythonc) return execCmd(cmd, kw) def grep(file, text, count=0): f = open(file, "r") lines = f.readlines() f.close() result = [] for line in lines: if re.search(text, line): result.append(line) if count: return len(result) return result class JarPacker: __doc__ = """helper class to pack stuff into a jar file - the terms 'file' and 'dir' mean java.io.File here """ def __init__(self, jarFile, bufsize=1024): self._jarFile = jarFile self._bufsize = bufsize self._manifest = None self._jarOutputStream = None def close(self): self.getJarOutputStream().close() def addManifestFile(self, manifestFile): __doc__ = """only one manifest file can be added""" self.addManifest(Manifest(FileInputStream(manifestFile))) def addManifest(self, manifest): if not self._manifest: self._manifest = manifest def addFile(self, file, parentDirName=None): buffer = jarray.zeros(self._bufsize, 'b') inputStream = FileInputStream(file) jarEntryName = file.getName() if parentDirName: jarEntryName = parentDirName + "/" + jarEntryName self.getJarOutputStream().putNextEntry(JarEntry(jarEntryName)) read = inputStream.read(buffer) while read <> -1: self.getJarOutputStream().write(buffer, 0, read) read = inputStream.read(buffer) self.getJarOutputStream().closeEntry() inputStream.close() def addDirectory(self, dir, parentDirName=None): if not dir.isDirectory(): return filesInDir = dir.listFiles() for currentFile in filesInDir: if currentFile.isFile(): if parentDirName: self.addFile(currentFile, parentDirName + "/" + dir.getName()) else: self.addFile(currentFile, dir.getName()) else: if parentDirName: newParentDirName = parentDirName + "/" + dir.getName() else: newParentDirName = dir.getName() self.addDirectory(currentFile, newParentDirName) def addJarFile(self, jarFile): __doc__ = """if you want to add a .jar file with a MANIFEST, add it first""" jarJarFile = JarFile(jarFile) self.addManifest(jarJarFile.getManifest()) jarJarFile.close() jarInputStream = JarInputStream(FileInputStream(jarFile)) jarEntry = jarInputStream.getNextJarEntry() while jarEntry: self.getJarOutputStream().putNextEntry(jarEntry) buffer = jarray.zeros(self._bufsize, 'b') read = jarInputStream.read(buffer) while read <> -1: self.getJarOutputStream().write(buffer, 0, read) read = jarInputStream.read(buffer) self.getJarOutputStream().closeEntry() jarEntry = jarInputStream.getNextJarEntry() def getJarOutputStream(self): if not self._jarOutputStream: if self._manifest: self._jarOutputStream = JarOutputStream(FileOutputStream(self._jarFile), self._manifest) else: self._jarOutputStream = JarOutputStream(FileOutputStream(self._jarFile)) return self._jarOutputStream
thomasgilgenast/gilgistatus-nonrel
refs/heads/master
django/utils/dates.py
488
"Commonly-used date structures" from django.utils.translation import ugettext_lazy as _, pgettext_lazy WEEKDAYS = { 0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'), 5:_('Saturday'), 6:_('Sunday') } WEEKDAYS_ABBR = { 0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'), 5:_('Sat'), 6:_('Sun') } WEEKDAYS_REV = { 'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4, 'saturday':5, 'sunday':6 } MONTHS = { 1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'), 7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'), 12:_('December') } MONTHS_3 = { 1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'), 7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec') } MONTHS_3_REV = { 'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8, 'sep':9, 'oct':10, 'nov':11, 'dec':12 } MONTHS_AP = { # month names in Associated Press style 1: pgettext_lazy('abbrev. month', 'Jan.'), 2: pgettext_lazy('abbrev. month', 'Feb.'), 3: pgettext_lazy('abbrev. month', 'March'), 4: pgettext_lazy('abbrev. month', 'April'), 5: pgettext_lazy('abbrev. month', 'May'), 6: pgettext_lazy('abbrev. month', 'June'), 7: pgettext_lazy('abbrev. month', 'July'), 8: pgettext_lazy('abbrev. month', 'Aug.'), 9: pgettext_lazy('abbrev. month', 'Sept.'), 10: pgettext_lazy('abbrev. month', 'Oct.'), 11: pgettext_lazy('abbrev. month', 'Nov.'), 12: pgettext_lazy('abbrev. month', 'Dec.') } MONTHS_ALT = { # required for long date representation by some locales 1: pgettext_lazy('alt. month', 'January'), 2: pgettext_lazy('alt. month', 'February'), 3: pgettext_lazy('alt. month', 'March'), 4: pgettext_lazy('alt. month', 'April'), 5: pgettext_lazy('alt. month', 'May'), 6: pgettext_lazy('alt. month', 'June'), 7: pgettext_lazy('alt. month', 'July'), 8: pgettext_lazy('alt. month', 'August'), 9: pgettext_lazy('alt. month', 'September'), 10: pgettext_lazy('alt. month', 'October'), 11: pgettext_lazy('alt. month', 'November'), 12: pgettext_lazy('alt. month', 'December') }
CyanogenMod/android_kernel_motorola_msm8610
refs/heads/cm-14.1
tools/perf/scripts/python/net_dropmonitor.py
1258
# Monitor the system for dropped packets and proudce a report of drop locations and counts import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * drop_log = {} kallsyms = [] def get_kallsyms_table(): global kallsyms try: f = open("/proc/kallsyms", "r") linecount = 0 for line in f: linecount = linecount+1 f.seek(0) except: return j = 0 for line in f: loc = int(line.split()[0], 16) name = line.split()[2] j = j +1 if ((j % 100) == 0): print "\r" + str(j) + "/" + str(linecount), kallsyms.append({ 'loc': loc, 'name' : name}) print "\r" + str(j) + "/" + str(linecount) kallsyms.sort() return def get_sym(sloc): loc = int(sloc) for i in kallsyms[::-1]: if loc >= i['loc']: return (i['name'], loc - i['loc']) return (None, 0) def print_drop_table(): print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") for i in drop_log.keys(): (sym, off) = get_sym(i) if sym == None: sym = i print "%25s %25s %25s" % (sym, off, drop_log[i]) def trace_begin(): print "Starting trace (Ctrl-C to dump results)" def trace_end(): print "Gathering kallsyms data" get_kallsyms_table() print_drop_table() # called from perf, when it finds a correspoinding event def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, location, protocol): slocation = str(location) try: drop_log[slocation] = drop_log[slocation] + 1 except: drop_log[slocation] = 1
mgogoulos/libcloud
refs/heads/trunk
libcloud/backup/drivers/dummy.py
31
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from libcloud.backup.base import BackupDriver class DummyBackupDriver(BackupDriver): """ Dummy Backup driver. >>> from libcloud.backup.drivers.dummy import DummyBackupDriver >>> driver = DummyBackupDriver('key', 'secret') >>> driver.name 'Dummy Backup Provider' """ name = 'Dummy Backup Provider' website = 'http://example.com' def __init__(self, api_key, api_secret): """ :param api_key: API key or username to used (required) :type api_key: ``str`` :param api_secret: Secret password to be used (required) :type api_secret: ``str`` :rtype: ``None`` """
kanagasabapathi/python-for-android
refs/heads/master
python-modules/twisted/twisted/test/test_stringtransport.py
56
# Copyright (c) 2009-2010 Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.test.proto_helpers}. """ from zope.interface.verify import verifyObject from twisted.internet.interfaces import (ITransport, IPushProducer, IConsumer, IReactorTCP, IReactorSSL, IReactorUNIX, IAddress, IListeningPort, IConnector) from twisted.internet.address import IPv4Address from twisted.trial.unittest import TestCase from twisted.test.proto_helpers import (StringTransport, MemoryReactor, RaisingMemoryReactor) from twisted.internet.protocol import ClientFactory, Factory class StringTransportTests(TestCase): """ Tests for L{twisted.test.proto_helpers.StringTransport}. """ def setUp(self): self.transport = StringTransport() def test_interfaces(self): """ L{StringTransport} instances provide L{ITransport}, L{IPushProducer}, and L{IConsumer}. """ self.assertTrue(verifyObject(ITransport, self.transport)) self.assertTrue(verifyObject(IPushProducer, self.transport)) self.assertTrue(verifyObject(IConsumer, self.transport)) def test_registerProducer(self): """ L{StringTransport.registerProducer} records the arguments supplied to it as instance attributes. """ producer = object() streaming = object() self.transport.registerProducer(producer, streaming) self.assertIdentical(self.transport.producer, producer) self.assertIdentical(self.transport.streaming, streaming) def test_disallowedRegisterProducer(self): """ L{StringTransport.registerProducer} raises L{RuntimeError} if a producer is already registered. """ producer = object() self.transport.registerProducer(producer, True) self.assertRaises( RuntimeError, self.transport.registerProducer, object(), False) self.assertIdentical(self.transport.producer, producer) self.assertTrue(self.transport.streaming) def test_unregisterProducer(self): """ L{StringTransport.unregisterProducer} causes the transport to forget about the registered producer and makes it possible to register a new one. """ oldProducer = object() newProducer = object() self.transport.registerProducer(oldProducer, False) self.transport.unregisterProducer() self.assertIdentical(self.transport.producer, None) self.transport.registerProducer(newProducer, True) self.assertIdentical(self.transport.producer, newProducer) self.assertTrue(self.transport.streaming) def test_invalidUnregisterProducer(self): """ L{StringTransport.unregisterProducer} raises L{RuntimeError} if called when no producer is registered. """ self.assertRaises(RuntimeError, self.transport.unregisterProducer) def test_initialProducerState(self): """ L{StringTransport.producerState} is initially C{'producing'}. """ self.assertEqual(self.transport.producerState, 'producing') def test_pauseProducing(self): """ L{StringTransport.pauseProducing} changes the C{producerState} of the transport to C{'paused'}. """ self.transport.pauseProducing() self.assertEqual(self.transport.producerState, 'paused') def test_resumeProducing(self): """ L{StringTransport.resumeProducing} changes the C{producerState} of the transport to C{'producing'}. """ self.transport.pauseProducing() self.transport.resumeProducing() self.assertEqual(self.transport.producerState, 'producing') def test_stopProducing(self): """ L{StringTransport.stopProducing} changes the C{'producerState'} of the transport to C{'stopped'}. """ self.transport.stopProducing() self.assertEqual(self.transport.producerState, 'stopped') def test_stoppedTransportCannotPause(self): """ L{StringTransport.pauseProducing} raises L{RuntimeError} if the transport has been stopped. """ self.transport.stopProducing() self.assertRaises(RuntimeError, self.transport.pauseProducing) def test_stoppedTransportCannotResume(self): """ L{StringTransport.resumeProducing} raises L{RuntimeError} if the transport has been stopped. """ self.transport.stopProducing() self.assertRaises(RuntimeError, self.transport.resumeProducing) def test_disconnectingTransportCannotPause(self): """ L{StringTransport.pauseProducing} raises L{RuntimeError} if the transport is being disconnected. """ self.transport.loseConnection() self.assertRaises(RuntimeError, self.transport.pauseProducing) def test_disconnectingTransportCannotResume(self): """ L{StringTransport.resumeProducing} raises L{RuntimeError} if the transport is being disconnected. """ self.transport.loseConnection() self.assertRaises(RuntimeError, self.transport.resumeProducing) def test_loseConnectionSetsDisconnecting(self): """ L{StringTransport.loseConnection} toggles the C{disconnecting} instance variable to C{True}. """ self.assertFalse(self.transport.disconnecting) self.transport.loseConnection() self.assertTrue(self.transport.disconnecting) def test_specifiedHostAddress(self): """ If a host address is passed to L{StringTransport.__init__}, that value is returned from L{StringTransport.getHost}. """ address = object() self.assertIdentical(StringTransport(address).getHost(), address) def test_specifiedPeerAddress(self): """ If a peer address is passed to L{StringTransport.__init__}, that value is returned from L{StringTransport.getPeer}. """ address = object() self.assertIdentical( StringTransport(peerAddress=address).getPeer(), address) def test_defaultHostAddress(self): """ If no host address is passed to L{StringTransport.__init__}, an L{IPv4Address} is returned from L{StringTransport.getHost}. """ address = StringTransport().getHost() self.assertIsInstance(address, IPv4Address) def test_defaultPeerAddress(self): """ If no peer address is passed to L{StringTransport.__init__}, an L{IPv4Address} is returned from L{StringTransport.getPeer}. """ address = StringTransport().getPeer() self.assertIsInstance(address, IPv4Address) class ReactorTests(TestCase): """ Tests for L{MemoryReactor} and L{RaisingMemoryReactor}. """ def test_memoryReactorProvides(self): """ L{MemoryReactor} provides all of the attributes described by the interfaces it advertises. """ memoryReactor = MemoryReactor() verifyObject(IReactorTCP, memoryReactor) verifyObject(IReactorSSL, memoryReactor) verifyObject(IReactorUNIX, memoryReactor) def test_raisingReactorProvides(self): """ L{RaisingMemoryReactor} provides all of the attributes described by the interfaces it advertises. """ raisingReactor = RaisingMemoryReactor() verifyObject(IReactorTCP, raisingReactor) verifyObject(IReactorSSL, raisingReactor) verifyObject(IReactorUNIX, raisingReactor) def test_connectDestination(self): """ L{MemoryReactor.connectTCP}, L{MemoryReactor.connectSSL}, and L{MemoryReactor.connectUNIX} will return an L{IConnector} whose C{getDestination} method returns an L{IAddress} with attributes which reflect the values passed. """ memoryReactor = MemoryReactor() for connector in [memoryReactor.connectTCP( "test.example.com", 8321, ClientFactory()), memoryReactor.connectSSL( "test.example.com", 8321, ClientFactory(), None)]: verifyObject(IConnector, connector) address = connector.getDestination() verifyObject(IAddress, address) self.assertEquals(address.host, "test.example.com") self.assertEquals(address.port, 8321) connector = memoryReactor.connectUNIX("/fake/path", ClientFactory()) verifyObject(IConnector, connector) address = connector.getDestination() verifyObject(IAddress, address) self.assertEquals(address.name, "/fake/path") def test_listenDefaultHost(self): """ L{MemoryReactor.listenTCP}, L{MemoryReactor.listenSSL} and L{MemoryReactor.listenUNIX} will return an L{IListeningPort} whose C{getHost} method returns an L{IAddress}; C{listenTCP} and C{listenSSL} will have a default host of C{'0.0.0.0'}, and a port that reflects the value passed, and C{listenUNIX} will have a name that reflects the path passed. """ memoryReactor = MemoryReactor() for port in [memoryReactor.listenTCP(8242, Factory()), memoryReactor.listenSSL(8242, Factory(), None)]: verifyObject(IListeningPort, port) address = port.getHost() verifyObject(IAddress, address) self.assertEquals(address.host, '0.0.0.0') self.assertEquals(address.port, 8242) port = memoryReactor.listenUNIX("/path/to/socket", Factory()) verifyObject(IListeningPort, port) address = port.getHost() verifyObject(IAddress, address) self.assertEquals(address.name, "/path/to/socket")
pixelated-project/pixelated-user-agent
refs/heads/master
service/test/perf/tags/__init__.py
159
# # Copyright (c) 2014 ThoughtWorks, Inc. # # Pixelated is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pixelated is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
sarvex/django
refs/heads/master
tests/admin_autodiscover/admin.py
513
from django.contrib import admin from .models import Story admin.site.register(Story) raise Exception("Bad admin module")
akeym/cyder
refs/heads/master
vendor-local/src/django-extensions/build/lib/django_extensions/management/commands/passwd.py
41
from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import User import getpass class Command(BaseCommand): help = "Clone of the UNIX program ``passwd'', for django.contrib.auth." requires_model_validation = False def handle(self, *args, **options): if len(args) > 1: raise CommandError("need exactly one or zero arguments for username") if args: username, = args else: username = getpass.getuser() try: u = User.objects.get(username=username) except User.DoesNotExist: raise CommandError("user %s does not exist" % username) print "Changing password for user", u.username p1 = p2 = "" while "" in (p1, p2) or p1 != p2: p1 = getpass.getpass() p2 = getpass.getpass("Password (again): ") if p1 != p2: print "Passwords do not match, try again" elif "" in (p1, p2): raise CommandError("aborted") u.set_password(p1) u.save() return "Password changed successfully for user %s\n" % u.username
antoinecarme/pyaf
refs/heads/master
tests/artificial/transf_Integration/trend_Lag1Trend/cycle_12/ar_12/test_artificial_1024_Integration_Lag1Trend_12_12_0.py
1
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 12, transform = "Integration", sigma = 0.0, exog_count = 0, ar_order = 12);
wesleywiser/cargo
refs/heads/master
src/etc/dl-snapshot.py
1
import distutils.spawn import hashlib import os import subprocess import sys import tarfile import shutil import contextlib with open('src/snapshots.txt') as f: lines = f.readlines() date = lines[0] linux32 = lines[1] linux64 = lines[2] mac32 = lines[3] mac64 = lines[4] win32 = lines[5] win64 = lines[6] triple = sys.argv[1] ts = triple.split('-') arch = ts[0] if len(ts) == 2: vendor = 'unknown' target_os = ts[1] else: vendor = ts[1] target_os = ts[2] intel32 = (arch == 'i686') or (arch == 'i586') me = None if target_os == 'linux': if intel32: me = linux32 new_triple = 'i686-unknown-linux-gnu' elif arch == 'x86_64': me = linux64 new_triple = 'x86_64-unknown-linux-gnu' elif target_os == 'darwin': if intel32: me = mac32 new_triple = 'i686-apple-darwin' elif arch == 'x86_64': me = mac64 new_triple = 'x86_64-apple-darwin' elif target_os == 'windows': if intel32: me = win32 new_triple = 'i686-pc-windows-gnu' elif arch == 'x86_64': me = win64 new_triple = 'x86_64-pc-windows-gnu' if me is None: raise Exception("no snapshot for the triple: " + triple) triple = new_triple platform, hash = me.strip().split() tarball = 'cargo-nightly-' + triple + '.tar.gz' url = 'https://static-rust-lang-org.s3.amazonaws.com/cargo-dist/' + date.strip() + '/' + tarball dl_path = "target/dl/" + tarball dst = "target/snapshot" if not os.path.isdir('target/dl'): os.makedirs('target/dl') if os.path.isdir(dst): shutil.rmtree(dst) exists = False if os.path.exists(dl_path): h = hashlib.sha1(open(dl_path, 'rb').read()).hexdigest() if h == hash: print("file already present %s (%s)" % (dl_path, hash,)) exists = True if not exists: ret = subprocess.call(["curl", "-o", dl_path, url]) if ret != 0: raise Exception("failed to fetch url") h = hashlib.sha1(open(dl_path, 'rb').read()).hexdigest() if h != hash: raise Exception("failed to verify the checksum of the snapshot") with contextlib.closing(tarfile.open(dl_path)) as tar: for p in tar.getnames(): name = p.replace("cargo-nightly-" + triple + "/", "", 1) fp = os.path.join(dst, name) print("extracting " + p) tar.extract(p, dst) tp = os.path.join(dst, p) if os.path.isdir(tp) and os.path.exists(fp): continue shutil.move(tp, fp) shutil.rmtree(os.path.join(dst, 'cargo-nightly-' + triple))
dhruvsrivastava/OJ
refs/heads/master
flask/lib/python2.7/site-packages/setuptools/command/develop.py
450
from distutils.util import convert_path from distutils import log from distutils.errors import DistutilsError, DistutilsOptionError import os import glob from pkg_resources import Distribution, PathMetadata, normalize_path from setuptools.command.easy_install import easy_install from setuptools.compat import PY3 import setuptools class develop(easy_install): """Set up package for development""" description = "install package in 'development mode'" user_options = easy_install.user_options + [ ("uninstall", "u", "Uninstall this source package"), ("egg-path=", None, "Set the path to be used in the .egg-link file"), ] boolean_options = easy_install.boolean_options + ['uninstall'] command_consumes_arguments = False # override base def run(self): if self.uninstall: self.multi_version = True self.uninstall_link() else: self.install_for_development() self.warn_deprecated_options() def initialize_options(self): self.uninstall = None self.egg_path = None easy_install.initialize_options(self) self.setup_path = None self.always_copy_from = '.' # always copy eggs installed in curdir def finalize_options(self): ei = self.get_finalized_command("egg_info") if ei.broken_egg_info: template = "Please rename %r to %r before using 'develop'" args = ei.egg_info, ei.broken_egg_info raise DistutilsError(template % args) self.args = [ei.egg_name] easy_install.finalize_options(self) self.expand_basedirs() self.expand_dirs() # pick up setup-dir .egg files only: no .egg-info self.package_index.scan(glob.glob('*.egg')) self.egg_link = os.path.join(self.install_dir, ei.egg_name + '.egg-link') self.egg_base = ei.egg_base if self.egg_path is None: self.egg_path = os.path.abspath(ei.egg_base) target = normalize_path(self.egg_base) egg_path = normalize_path(os.path.join(self.install_dir, self.egg_path)) if egg_path != target: raise DistutilsOptionError( "--egg-path must be a relative path from the install" " directory to " + target ) # Make a distribution for the package's source self.dist = Distribution( target, PathMetadata(target, os.path.abspath(ei.egg_info)), project_name=ei.egg_name ) p = self.egg_base.replace(os.sep, '/') if p != os.curdir: p = '../' * (p.count('/') + 1) self.setup_path = p p = normalize_path(os.path.join(self.install_dir, self.egg_path, p)) if p != normalize_path(os.curdir): raise DistutilsOptionError( "Can't get a consistent path to setup script from" " installation directory", p, normalize_path(os.curdir)) def install_for_development(self): if PY3 and getattr(self.distribution, 'use_2to3', False): # If we run 2to3 we can not do this inplace: # Ensure metadata is up-to-date self.reinitialize_command('build_py', inplace=0) self.run_command('build_py') bpy_cmd = self.get_finalized_command("build_py") build_path = normalize_path(bpy_cmd.build_lib) # Build extensions self.reinitialize_command('egg_info', egg_base=build_path) self.run_command('egg_info') self.reinitialize_command('build_ext', inplace=0) self.run_command('build_ext') # Fixup egg-link and easy-install.pth ei_cmd = self.get_finalized_command("egg_info") self.egg_path = build_path self.dist.location = build_path # XXX self.dist._provider = PathMetadata(build_path, ei_cmd.egg_info) else: # Without 2to3 inplace works fine: self.run_command('egg_info') # Build extensions in-place self.reinitialize_command('build_ext', inplace=1) self.run_command('build_ext') self.install_site_py() # ensure that target dir is site-safe if setuptools.bootstrap_install_from: self.easy_install(setuptools.bootstrap_install_from) setuptools.bootstrap_install_from = None # create an .egg-link in the installation dir, pointing to our egg log.info("Creating %s (link to %s)", self.egg_link, self.egg_base) if not self.dry_run: f = open(self.egg_link, "w") f.write(self.egg_path + "\n" + self.setup_path) f.close() # postprocess the installed distro, fixing up .pth, installing scripts, # and handling requirements self.process_distribution(None, self.dist, not self.no_deps) def uninstall_link(self): if os.path.exists(self.egg_link): log.info("Removing %s (link to %s)", self.egg_link, self.egg_base) egg_link_file = open(self.egg_link) contents = [line.rstrip() for line in egg_link_file] egg_link_file.close() if contents not in ([self.egg_path], [self.egg_path, self.setup_path]): log.warn("Link points to %s: uninstall aborted", contents) return if not self.dry_run: os.unlink(self.egg_link) if not self.dry_run: self.update_pth(self.dist) # remove any .pth link to us if self.distribution.scripts: # XXX should also check for entry point scripts! log.warn("Note: you must uninstall or replace scripts manually!") def install_egg_scripts(self, dist): if dist is not self.dist: # Installing a dependency, so fall back to normal behavior return easy_install.install_egg_scripts(self, dist) # create wrapper scripts in the script dir, pointing to dist.scripts # new-style... self.install_wrapper_scripts(dist) # ...and old-style for script_name in self.distribution.scripts or []: script_path = os.path.abspath(convert_path(script_name)) script_name = os.path.basename(script_path) f = open(script_path, 'rU') script_text = f.read() f.close() self.install_script(dist, script_name, script_text, script_path)
emorozov/django-select2
refs/heads/master
testapp/testapp/testmain/views.py
10
import json from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect, HttpResponse from django.shortcuts import render, get_object_or_404 from .forms import EmployeeForm, DeptForm, MixedForm, InitialValueForm, QuestionForm, QuestionNonAutoForm, WordsForm, SchoolForm, \ GetSearchTestForm, AnotherWordForm from .models import Employee, Dept, Question, WordList, School, Tag def test_single_value_model_field(request): return render(request, 'list.html', { 'title': 'Employees', 'href': 'test_single_value_model_field1', 'object_list': Employee.objects.all(), 'create_new_href': '' }) def test_single_value_model_field1(request, id): emp = get_object_or_404(Employee, pk=id) if request.POST: form = EmployeeForm(data=request.POST, instance=emp) if form.is_valid(): form.save() return HttpResponseRedirect(reverse('home')) else: form = EmployeeForm(instance=emp) return render(request, 'form.html', {'form': form}) def test_multi_values_model_field(request): return render(request, 'list.html', { 'title': 'Departments', 'href': 'test_multi_values_model_field1', 'object_list': Dept.objects.all(), 'create_new_href': '' }) def test_multi_values_model_field1(request, id): dept = get_object_or_404(Dept, pk=id) if request.POST: form = DeptForm(data=request.POST, instance=dept) if form.is_valid(): form.save() return HttpResponseRedirect(reverse('home')) else: form = DeptForm(instance=dept) return render(request, 'form.html', {'form': form}) def test_mixed_form(request): if request.POST: form = MixedForm(request.POST) form.is_valid() else: form = MixedForm() return render(request, 'form.html', {'form': form}) def test_init_values(request): return render(request, 'form.html', {'form': InitialValueForm()}) def test_list_questions(request): return render(request, 'list.html', { 'title': 'Questions', 'href': 'test_tagging', 'href_non_auto': 'test_tagging_non_auto', 'object_list': Question.objects.all(), 'create_new_href': 'test_tagging_new' }) def test_tagging_new(request): return test_tagging(request, None) def test_tagging(request, id): if id is None: question = Question() else: question = get_object_or_404(Question, pk=id) if request.POST: form = QuestionForm(data=request.POST, instance=question) if form.is_valid(): form.save() return HttpResponseRedirect(reverse('home')) else: form = QuestionForm(instance=question) return render(request, 'form.html', {'form': form}) def test_tagging_non_auto(request, id): if id is None: question = Question() else: question = get_object_or_404(Question, pk=id) if request.POST: form = QuestionNonAutoForm(data=request.POST, instance=question) if form.is_valid(): form.save() return HttpResponseRedirect(reverse('home')) else: form = QuestionNonAutoForm(instance=question) return render(request, 'form.html', {'form': form}) def test_tagging_tags(request): tags = Tag.objects.all() results = [{'id': t.id, 'text': t.tag} for t in tags] return HttpResponse(json.dumps({'err': 'nil', 'results': results}), content_type='application/json') def test_auto_multivalue_field(request): try: s = School.objects.get(id=1) except School.DoesNotExist: s = School(id=1) if request.POST: form = SchoolForm(data=request.POST, instance=s) if form.is_valid(): form.save() return HttpResponseRedirect(reverse('home')) else: form = SchoolForm(instance=s) return render(request, 'form.html', {'form': form}) def test_auto_heavy_perf(request): try: word = WordList.objects.get(kind='Word_Of_Day') except WordList.DoesNotExist: word = WordList(kind='Word_Of_Day') if request.POST: form = WordsForm(data=request.POST, instance=word) if form.is_valid(): form.save() return HttpResponseRedirect(reverse('home')) else: form = WordsForm(instance=word) return render(request, 'form.html', {'form': form}) def test_get_search_form(request): """ Test a search form using GET. Issue#66 """ if request.GET: form = GetSearchTestForm(request.GET) if form.is_valid(): results = Employee.objects.all() if form.cleaned_data['name'] != []: results = results.filter(name__in = form.cleaned_data['name']) if form.cleaned_data['dept'] != []: results = results.filter(dept__in = form.cleaned_data['dept']) else: form = GetSearchTestForm() results = Employee.objects.none() return render(request, 'formget.html', {'form': form, 'results' : results}) def test_issue_73(request): try: word = WordList.objects.get(kind='Word_Of_Day') except WordList.DoesNotExist: word = WordList(kind='Word_Of_Day') if request.POST: form = AnotherWordForm(request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(reverse('home')) else: form = AnotherWordForm(instance=word) return render(request, 'form.html', {'form': form})
maxsocl/django
refs/heads/master
tests/template_tests/syntax_tests/test_builtins.py
521
from django.test import SimpleTestCase from ..utils import setup class BuiltinsTests(SimpleTestCase): @setup({'builtins01': '{{ True }}'}) def test_builtins01(self): output = self.engine.render_to_string('builtins01') self.assertEqual(output, 'True') @setup({'builtins02': '{{ False }}'}) def test_builtins02(self): output = self.engine.render_to_string('builtins02') self.assertEqual(output, 'False') @setup({'builtins03': '{{ None }}'}) def test_builtins03(self): output = self.engine.render_to_string('builtins03') self.assertEqual(output, 'None')
mlperf/training_results_v0.7
refs/heads/master
Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/python/tvm/relay/_make.py
2
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ The constructors for all Relay AST nodes exposed from C++. This module includes MyPy type signatures for all of the exposed modules. """ from .._ffi.function import _init_api _init_api("relay._make", __name__)
MDI2017/hide_and_seek
refs/heads/master
jugadores/jugador.py
1
from pygame import * from gui.ficha import Ficha class Jugador: """ Clase que crea un jugador con nombre y avatar que le designemos """ def __init__(self, nombre, avatar): self.nombre = nombre self.avatar = avatar self.ficha = None
geekboxzone/lollipop_external_chromium_org
refs/heads/geekbox
build/android/pylib/device/device_blacklist.py
79
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import os import threading from pylib import constants _BLACKLIST_JSON = os.path.join( constants.DIR_SOURCE_ROOT, os.environ.get('CHROMIUM_OUT_DIR', 'out'), 'bad_devices.json') # Note that this only protects against concurrent accesses to the blacklist # within a process. _blacklist_lock = threading.RLock() def ReadBlacklist(): """Reads the blacklist from the _BLACKLIST_JSON file. Returns: A list containing bad devices. """ with _blacklist_lock: if not os.path.exists(_BLACKLIST_JSON): return [] with open(_BLACKLIST_JSON, 'r') as f: return json.load(f) def WriteBlacklist(blacklist): """Writes the provided blacklist to the _BLACKLIST_JSON file. Args: blacklist: list of bad devices to write to the _BLACKLIST_JSON file. """ with _blacklist_lock: with open(_BLACKLIST_JSON, 'w') as f: json.dump(list(set(blacklist)), f) def ExtendBlacklist(devices): """Adds devices to _BLACKLIST_JSON file. Args: devices: list of bad devices to be added to the _BLACKLIST_JSON file. """ with _blacklist_lock: blacklist = ReadBlacklist() blacklist.extend(devices) WriteBlacklist(blacklist) def ResetBlacklist(): """Erases the _BLACKLIST_JSON file if it exists.""" with _blacklist_lock: if os.path.exists(_BLACKLIST_JSON): os.remove(_BLACKLIST_JSON)
GrandmasterK/XScheduler
refs/heads/master
venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py
527
# Copyright 2014 Donald Stufft # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function import abc import functools import itertools import re from ._compat import string_types, with_metaclass from .version import Version, LegacyVersion, parse class InvalidSpecifier(ValueError): """ An invalid specifier was found, users should refer to PEP 440. """ class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): @abc.abstractmethod def __str__(self): """ Returns the str representation of this Specifier like object. This should be representative of the Specifier itself. """ @abc.abstractmethod def __hash__(self): """ Returns a hash value for this Specifier like object. """ @abc.abstractmethod def __eq__(self, other): """ Returns a boolean representing whether or not the two Specifier like objects are equal. """ @abc.abstractmethod def __ne__(self, other): """ Returns a boolean representing whether or not the two Specifier like objects are not equal. """ @abc.abstractproperty def prereleases(self): """ Returns whether or not pre-releases as a whole are allowed by this specifier. """ @prereleases.setter def prereleases(self, value): """ Sets whether or not pre-releases as a whole are allowed by this specifier. """ @abc.abstractmethod def contains(self, item, prereleases=None): """ Determines if the given item is contained within this specifier. """ @abc.abstractmethod def filter(self, iterable, prereleases=None): """ Takes an iterable of items and filters them so that only items which are contained within this specifier are allowed in it. """ class _IndividualSpecifier(BaseSpecifier): _operators = {} def __init__(self, spec="", prereleases=None): match = self._regex.search(spec) if not match: raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) self._spec = ( match.group("operator").strip(), match.group("version").strip(), ) # Store whether or not this Specifier should accept prereleases self._prereleases = prereleases def __repr__(self): pre = ( ", prereleases={0!r}".format(self.prereleases) if self._prereleases is not None else "" ) return "<{0}({1!r}{2})>".format( self.__class__.__name__, str(self), pre, ) def __str__(self): return "{0}{1}".format(*self._spec) def __hash__(self): return hash(self._spec) def __eq__(self, other): if isinstance(other, string_types): try: other = self.__class__(other) except InvalidSpecifier: return NotImplemented elif not isinstance(other, self.__class__): return NotImplemented return self._spec == other._spec def __ne__(self, other): if isinstance(other, string_types): try: other = self.__class__(other) except InvalidSpecifier: return NotImplemented elif not isinstance(other, self.__class__): return NotImplemented return self._spec != other._spec def _get_operator(self, op): return getattr(self, "_compare_{0}".format(self._operators[op])) def _coerce_version(self, version): if not isinstance(version, (LegacyVersion, Version)): version = parse(version) return version @property def operator(self): return self._spec[0] @property def version(self): return self._spec[1] @property def prereleases(self): return self._prereleases @prereleases.setter def prereleases(self, value): self._prereleases = value def __contains__(self, item): return self.contains(item) def contains(self, item, prereleases=None): # Determine if prereleases are to be allowed or not. if prereleases is None: prereleases = self.prereleases # Normalize item to a Version or LegacyVersion, this allows us to have # a shortcut for ``"2.0" in Specifier(">=2") item = self._coerce_version(item) # Determine if we should be supporting prereleases in this specifier # or not, if we do not support prereleases than we can short circuit # logic if this version is a prereleases. if item.is_prerelease and not prereleases: return False # Actually do the comparison to determine if this item is contained # within this Specifier or not. return self._get_operator(self.operator)(item, self.version) def filter(self, iterable, prereleases=None): yielded = False found_prereleases = [] kw = {"prereleases": prereleases if prereleases is not None else True} # Attempt to iterate over all the values in the iterable and if any of # them match, yield them. for version in iterable: parsed_version = self._coerce_version(version) if self.contains(parsed_version, **kw): # If our version is a prerelease, and we were not set to allow # prereleases, then we'll store it for later incase nothing # else matches this specifier. if (parsed_version.is_prerelease and not (prereleases or self.prereleases)): found_prereleases.append(version) # Either this is not a prerelease, or we should have been # accepting prereleases from the begining. else: yielded = True yield version # Now that we've iterated over everything, determine if we've yielded # any values, and if we have not and we have any prereleases stored up # then we will go ahead and yield the prereleases. if not yielded and found_prereleases: for version in found_prereleases: yield version class LegacySpecifier(_IndividualSpecifier): _regex = re.compile( r""" ^ \s* (?P<operator>(==|!=|<=|>=|<|>)) \s* (?P<version> [^\s]* # We just match everything, except for whitespace since this # is a "legacy" specifier and the version string can be just # about anything. ) \s* $ """, re.VERBOSE | re.IGNORECASE, ) _operators = { "==": "equal", "!=": "not_equal", "<=": "less_than_equal", ">=": "greater_than_equal", "<": "less_than", ">": "greater_than", } def _coerce_version(self, version): if not isinstance(version, LegacyVersion): version = LegacyVersion(str(version)) return version def _compare_equal(self, prospective, spec): return prospective == self._coerce_version(spec) def _compare_not_equal(self, prospective, spec): return prospective != self._coerce_version(spec) def _compare_less_than_equal(self, prospective, spec): return prospective <= self._coerce_version(spec) def _compare_greater_than_equal(self, prospective, spec): return prospective >= self._coerce_version(spec) def _compare_less_than(self, prospective, spec): return prospective < self._coerce_version(spec) def _compare_greater_than(self, prospective, spec): return prospective > self._coerce_version(spec) def _require_version_compare(fn): @functools.wraps(fn) def wrapped(self, prospective, spec): if not isinstance(prospective, Version): return False return fn(self, prospective, spec) return wrapped class Specifier(_IndividualSpecifier): _regex = re.compile( r""" ^ \s* (?P<operator>(~=|==|!=|<=|>=|<|>|===)) (?P<version> (?: # The identity operators allow for an escape hatch that will # do an exact string match of the version you wish to install. # This will not be parsed by PEP 440 and we cannot determine # any semantic meaning from it. This operator is discouraged # but included entirely as an escape hatch. (?<====) # Only match for the identity operator \s* [^\s]* # We just match everything, except for whitespace # since we are only testing for strict identity. ) | (?: # The (non)equality operators allow for wild card and local # versions to be specified so we have to define these two # operators separately to enable that. (?<===|!=) # Only match for equals and not equals \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? # You cannot use a wild card and a dev or local version # together so group them with a | and make them optional. (?: (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local | \.\* # Wild card syntax of .* )? ) | (?: # The compatible operator requires at least two digits in the # release segment. (?<=~=) # Only match for the compatible operator \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) | (?: # All other operators only allow a sub set of what the # (non)equality operators do. Specifically they do not allow # local versions to be specified nor do they allow the prefix # matching wild cards. (?<!==|!=|~=) # We have special cases for these # operators so we want to make sure they # don't match here. \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release (?: # pre release [-_\.]? (a|b|c|rc|alpha|beta|pre|preview) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) ) \s* $ """, re.VERBOSE | re.IGNORECASE, ) _operators = { "~=": "compatible", "==": "equal", "!=": "not_equal", "<=": "less_than_equal", ">=": "greater_than_equal", "<": "less_than", ">": "greater_than", "===": "arbitrary", } @_require_version_compare def _compare_compatible(self, prospective, spec): # Compatible releases have an equivalent combination of >= and ==. That # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to # implement this in terms of the other specifiers instead of # implementing it ourselves. The only thing we need to do is construct # the other specifiers. # We want everything but the last item in the version, but we want to # ignore post and dev releases and we want to treat the pre-release as # it's own separate segment. prefix = ".".join( list( itertools.takewhile( lambda x: (not x.startswith("post") and not x.startswith("dev")), _version_split(spec), ) )[:-1] ) # Add the prefix notation to the end of our string prefix += ".*" return (self._get_operator(">=")(prospective, spec) and self._get_operator("==")(prospective, prefix)) @_require_version_compare def _compare_equal(self, prospective, spec): # We need special logic to handle prefix matching if spec.endswith(".*"): # Split the spec out by dots, and pretend that there is an implicit # dot in between a release segment and a pre-release segment. spec = _version_split(spec[:-2]) # Remove the trailing .* # Split the prospective version out by dots, and pretend that there # is an implicit dot in between a release segment and a pre-release # segment. prospective = _version_split(str(prospective)) # Shorten the prospective version to be the same length as the spec # so that we can determine if the specifier is a prefix of the # prospective version or not. prospective = prospective[:len(spec)] # Pad out our two sides with zeros so that they both equal the same # length. spec, prospective = _pad_version(spec, prospective) else: # Convert our spec string into a Version spec = Version(spec) # If the specifier does not have a local segment, then we want to # act as if the prospective version also does not have a local # segment. if not spec.local: prospective = Version(prospective.public) return prospective == spec @_require_version_compare def _compare_not_equal(self, prospective, spec): return not self._compare_equal(prospective, spec) @_require_version_compare def _compare_less_than_equal(self, prospective, spec): return prospective <= Version(spec) @_require_version_compare def _compare_greater_than_equal(self, prospective, spec): return prospective >= Version(spec) @_require_version_compare def _compare_less_than(self, prospective, spec): # Convert our spec to a Version instance, since we'll want to work with # it as a version. spec = Version(spec) # Check to see if the prospective version is less than the spec # version. If it's not we can short circuit and just return False now # instead of doing extra unneeded work. if not prospective < spec: return False # This special case is here so that, unless the specifier itself # includes is a pre-release version, that we do not accept pre-release # versions for the version mentioned in the specifier (e.g. <3.1 should # not match 3.1.dev0, but should match 3.0.dev0). if not spec.is_prerelease and prospective.is_prerelease: if Version(prospective.base_version) == Version(spec.base_version): return False # If we've gotten to here, it means that prospective version is both # less than the spec version *and* it's not a pre-release of the same # version in the spec. return True @_require_version_compare def _compare_greater_than(self, prospective, spec): # Convert our spec to a Version instance, since we'll want to work with # it as a version. spec = Version(spec) # Check to see if the prospective version is greater than the spec # version. If it's not we can short circuit and just return False now # instead of doing extra unneeded work. if not prospective > spec: return False # This special case is here so that, unless the specifier itself # includes is a post-release version, that we do not accept # post-release versions for the version mentioned in the specifier # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). if not spec.is_postrelease and prospective.is_postrelease: if Version(prospective.base_version) == Version(spec.base_version): return False # Ensure that we do not allow a local version of the version mentioned # in the specifier, which is techincally greater than, to match. if prospective.local is not None: if Version(prospective.base_version) == Version(spec.base_version): return False # If we've gotten to here, it means that prospective version is both # greater than the spec version *and* it's not a pre-release of the # same version in the spec. return True def _compare_arbitrary(self, prospective, spec): return str(prospective).lower() == str(spec).lower() @property def prereleases(self): # If there is an explicit prereleases set for this, then we'll just # blindly use that. if self._prereleases is not None: return self._prereleases # Look at all of our specifiers and determine if they are inclusive # operators, and if they are if they are including an explicit # prerelease. operator, version = self._spec if operator in ["==", ">=", "<=", "~=", "==="]: # The == specifier can include a trailing .*, if it does we # want to remove before parsing. if operator == "==" and version.endswith(".*"): version = version[:-2] # Parse the version, and if it is a pre-release than this # specifier allows pre-releases. if parse(version).is_prerelease: return True return False @prereleases.setter def prereleases(self, value): self._prereleases = value _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") def _version_split(version): result = [] for item in version.split("."): match = _prefix_regex.search(item) if match: result.extend(match.groups()) else: result.append(item) return result def _pad_version(left, right): left_split, right_split = [], [] # Get the release segment of our versions left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) # Get the rest of our versions left_split.append(left[len(left_split):]) right_split.append(left[len(right_split):]) # Insert our padding left_split.insert( 1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])), ) right_split.insert( 1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])), ) return ( list(itertools.chain(*left_split)), list(itertools.chain(*right_split)), ) class SpecifierSet(BaseSpecifier): def __init__(self, specifiers="", prereleases=None): # Split on , to break each indidivual specifier into it's own item, and # strip each item to remove leading/trailing whitespace. specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] # Parsed each individual specifier, attempting first to make it a # Specifier and falling back to a LegacySpecifier. parsed = set() for specifier in specifiers: try: parsed.add(Specifier(specifier)) except InvalidSpecifier: parsed.add(LegacySpecifier(specifier)) # Turn our parsed specifiers into a frozen set and save them for later. self._specs = frozenset(parsed) # Store our prereleases value so we can use it later to determine if # we accept prereleases or not. self._prereleases = prereleases def __repr__(self): pre = ( ", prereleases={0!r}".format(self.prereleases) if self._prereleases is not None else "" ) return "<SpecifierSet({0!r}{1})>".format(str(self), pre) def __str__(self): return ",".join(sorted(str(s) for s in self._specs)) def __hash__(self): return hash(self._specs) def __and__(self, other): if isinstance(other, string_types): other = SpecifierSet(other) elif not isinstance(other, SpecifierSet): return NotImplemented specifier = SpecifierSet() specifier._specs = frozenset(self._specs | other._specs) if self._prereleases is None and other._prereleases is not None: specifier._prereleases = other._prereleases elif self._prereleases is not None and other._prereleases is None: specifier._prereleases = self._prereleases elif self._prereleases == other._prereleases: specifier._prereleases = self._prereleases else: raise ValueError( "Cannot combine SpecifierSets with True and False prerelease " "overrides." ) return specifier def __eq__(self, other): if isinstance(other, string_types): other = SpecifierSet(other) elif isinstance(other, _IndividualSpecifier): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented return self._specs == other._specs def __ne__(self, other): if isinstance(other, string_types): other = SpecifierSet(other) elif isinstance(other, _IndividualSpecifier): other = SpecifierSet(str(other)) elif not isinstance(other, SpecifierSet): return NotImplemented return self._specs != other._specs def __len__(self): return len(self._specs) def __iter__(self): return iter(self._specs) @property def prereleases(self): # If we have been given an explicit prerelease modifier, then we'll # pass that through here. if self._prereleases is not None: return self._prereleases # If we don't have any specifiers, and we don't have a forced value, # then we'll just return None since we don't know if this should have # pre-releases or not. if not self._specs: return None # Otherwise we'll see if any of the given specifiers accept # prereleases, if any of them do we'll return True, otherwise False. return any(s.prereleases for s in self._specs) @prereleases.setter def prereleases(self, value): self._prereleases = value def __contains__(self, item): return self.contains(item) def contains(self, item, prereleases=None): # Ensure that our item is a Version or LegacyVersion instance. if not isinstance(item, (LegacyVersion, Version)): item = parse(item) # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. if prereleases is None: prereleases = self.prereleases # We can determine if we're going to allow pre-releases by looking to # see if any of the underlying items supports them. If none of them do # and this item is a pre-release then we do not allow it and we can # short circuit that here. # Note: This means that 1.0.dev1 would not be contained in something # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 if not prereleases and item.is_prerelease: return False # We simply dispatch to the underlying specs here to make sure that the # given version is contained within all of them. # Note: This use of all() here means that an empty set of specifiers # will always return True, this is an explicit design decision. return all( s.contains(item, prereleases=prereleases) for s in self._specs ) def filter(self, iterable, prereleases=None): # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. if prereleases is None: prereleases = self.prereleases # If we have any specifiers, then we want to wrap our iterable in the # filter method for each one, this will act as a logical AND amongst # each specifier. if self._specs: for spec in self._specs: iterable = spec.filter(iterable, prereleases=bool(prereleases)) return iterable # If we do not have any specifiers, then we need to have a rough filter # which will filter out any pre-releases, unless there are no final # releases, and which will filter out LegacyVersion in general. else: filtered = [] found_prereleases = [] for item in iterable: # Ensure that we some kind of Version class for this item. if not isinstance(item, (LegacyVersion, Version)): parsed_version = parse(item) else: parsed_version = item # Filter out any item which is parsed as a LegacyVersion if isinstance(parsed_version, LegacyVersion): continue # Store any item which is a pre-release for later unless we've # already found a final version or we are accepting prereleases if parsed_version.is_prerelease and not prereleases: if not filtered: found_prereleases.append(item) else: filtered.append(item) # If we've found no items except for pre-releases, then we'll go # ahead and use the pre-releases if not filtered and found_prereleases and prereleases is None: return found_prereleases return filtered
dongguangming/python-twitter
refs/heads/master
tests/test_trend.py
16
import twitter import unittest import json class TrendTest(unittest.TestCase): SAMPLE_JSON = '''{"name": "Kesuke Miyagi", "query": "Kesuke Miyagi"}''' def _GetSampleTrend(self): return twitter.Trend(name='Kesuke Miyagi', query='Kesuke Miyagi', timestamp='Fri Jan 26 23:17:14 +0000 2007') def testInit(self): '''Test the twitter.Trend constructor''' trend = twitter.Trend(name='Kesuke Miyagi', query='Kesuke Miyagi', timestamp='Fri Jan 26 23:17:14 +0000 2007') def testProperties(self): '''Test all of the twitter.Trend properties''' trend = twitter.Trend() trend.name = 'Kesuke Miyagi' self.assertEqual('Kesuke Miyagi', trend.name) trend.query = 'Kesuke Miyagi' self.assertEqual('Kesuke Miyagi', trend.query) trend.timestamp = 'Fri Jan 26 23:17:14 +0000 2007' self.assertEqual('Fri Jan 26 23:17:14 +0000 2007', trend.timestamp) def testNewFromJsonDict(self): '''Test the twitter.Trend NewFromJsonDict method''' data = json.loads(TrendTest.SAMPLE_JSON) trend = twitter.Trend.NewFromJsonDict(data, timestamp='Fri Jan 26 23:17:14 +0000 2007') self.assertEqual(self._GetSampleTrend(), trend) def testEq(self): '''Test the twitter.Trend __eq__ method''' trend = twitter.Trend() trend.name = 'Kesuke Miyagi' trend.query = 'Kesuke Miyagi' trend.timestamp = 'Fri Jan 26 23:17:14 +0000 2007' self.assertEqual(trend, self._GetSampleTrend())
joshowen/django-allauth
refs/heads/master
allauth/socialaccount/providers/dropbox_oauth2/models.py
4209
# Create your models here.
B3AU/waveTree
refs/heads/waveTree
sklearn/manifold/isomap.py
12
"""Isomap for manifold learning""" # Author: Jake Vanderplas -- <vanderplas@astro.washington.edu> # License: BSD 3 clause (C) 2011 import numpy as np from ..base import BaseEstimator, TransformerMixin from ..neighbors import NearestNeighbors, kneighbors_graph from ..utils import check_arrays from ..utils.graph import graph_shortest_path from ..decomposition import KernelPCA from ..preprocessing import KernelCenterer class Isomap(BaseEstimator, TransformerMixin): """Isomap Embedding Non-linear dimensionality reduction through Isometric Mapping Parameters ---------- n_neighbors : integer number of neighbors to consider for each point. n_components : integer number of coordinates for the manifold eigen_solver : ['auto'|'arpack'|'dense'] 'auto' : Attempt to choose the most efficient solver for the given problem. 'arpack' : Use Arnoldi decomposition to find the eigenvalues and eigenvectors. 'dense' : Use a direct solver (i.e. LAPACK) for the eigenvalue decomposition. tol : float Convergence tolerance passed to arpack or lobpcg. not used if eigen_solver == 'dense'. max_iter : integer Maximum number of iterations for the arpack solver. not used if eigen_solver == 'dense'. path_method : string ['auto'|'FW'|'D'] Method to use in finding shortest path. 'auto' : attempt to choose the best algorithm automatically 'FW' : Floyd-Warshall algorithm 'D' : Dijkstra algorithm with Fibonacci Heaps neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree'] Algorithm to use for nearest neighbors search, passed to neighbors.NearestNeighbors instance. Attributes ---------- `embedding_` : array-like, shape (n_samples, n_components) Stores the embedding vectors. `kernel_pca_` : object `KernelPCA` object used to implement the embedding. `training_data_` : array-like, shape (n_samples, n_features) Stores the training data. `nbrs_` : sklearn.neighbors.NearestNeighbors instance Stores nearest neighbors instance, including BallTree or KDtree if applicable. `dist_matrix_` : array-like, shape (n_samples, n_samples) Stores the geodesic distance matrix of training data. References ---------- [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric framework for nonlinear dimensionality reduction. Science 290 (5500) """ def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto', tol=0, max_iter=None, path_method='auto', neighbors_algorithm='auto'): self.n_neighbors = n_neighbors self.n_components = n_components self.eigen_solver = eigen_solver self.tol = tol self.max_iter = max_iter self.path_method = path_method self.neighbors_algorithm = neighbors_algorithm self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors, algorithm=neighbors_algorithm) def _fit_transform(self, X): X, = check_arrays(X, sparse_format='dense') self.nbrs_.fit(X) self.training_data_ = self.nbrs_._fit_X self.kernel_pca_ = KernelPCA(n_components=self.n_components, kernel="precomputed", eigen_solver=self.eigen_solver, tol=self.tol, max_iter=self.max_iter) kng = kneighbors_graph(self.nbrs_, self.n_neighbors, mode='distance') self.dist_matrix_ = graph_shortest_path(kng, method=self.path_method, directed=False) G = self.dist_matrix_ ** 2 G *= -0.5 self.embedding_ = self.kernel_pca_.fit_transform(G) def reconstruction_error(self): """Compute the reconstruction error for the embedding. Returns ------- reconstruction_error : float Notes ------- The cost function of an isomap embedding is ``E = frobenius_norm[K(D) - K(D_fit)] / n_samples`` Where D is the matrix of distances for the input data X, D_fit is the matrix of distances for the output embedding X_fit, and K is the isomap kernel: ``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)`` """ G = -0.5 * self.dist_matrix_ ** 2 G_center = KernelCenterer().fit_transform(G) evals = self.kernel_pca_.lambdas_ return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0] def fit(self, X, y=None): """Compute the embedding vectors for data X Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array, precomputed tree, or NearestNeighbors object. Returns ------- self : returns an instance of self. """ self._fit_transform(X) return self def fit_transform(self, X, y=None): """Fit the model from data in X and transform X. Parameters ---------- X: {array-like, sparse matrix, BallTree, KDTree} Training vector, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new: array-like, shape (n_samples, n_components) """ self._fit_transform(X) return self.embedding_ def transform(self, X): """Transform X. This is implemented by linking the points X into the graph of geodesic distances of the training data. First the `n_neighbors` nearest neighbors of X are found in the training data, and from these the shortest geodesic distances from each point in X to each point in the training data are computed in order to construct the kernel. The embedding of X is the projection of this kernel onto the embedding vectors of the training set. Parameters ---------- X: array-like, shape (n_samples, n_features) Returns ------- X_new: array-like, shape (n_samples, n_components) """ distances, indices = self.nbrs_.kneighbors(X, return_distance=True) #Create the graph of shortest distances from X to self.training_data_ # via the nearest neighbors of X. #This can be done as a single array operation, but it potentially # takes a lot of memory. To avoid that, use a loop: G_X = np.zeros((X.shape[0], self.training_data_.shape[0])) for i in range(X.shape[0]): G_X[i] = np.min((self.dist_matrix_[indices[i]] + distances[i][:, None]), 0) G_X **= 2 G_X *= -0.5 return self.kernel_pca_.transform(G_X)
wuga214/Django-Wuga
refs/heads/master
env/lib/python2.7/site-packages/django/apps/registry.py
51
import sys import threading import warnings from collections import Counter, OrderedDict, defaultdict from functools import partial from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured from django.utils import lru_cache from .config import AppConfig class Apps(object): """ A registry that stores the configuration of installed applications. It also keeps track of models eg. to provide reverse-relations. """ def __init__(self, installed_apps=()): # installed_apps is set to None when creating the master registry # because it cannot be populated at that point. Other registries must # provide a list of installed apps and are populated immediately. if installed_apps is None and hasattr(sys.modules[__name__], 'apps'): raise RuntimeError("You must supply an installed_apps argument.") # Mapping of app labels => model names => model classes. Every time a # model is imported, ModelBase.__new__ calls apps.register_model which # creates an entry in all_models. All imported models are registered, # regardless of whether they're defined in an installed application # and whether the registry has been populated. Since it isn't possible # to reimport a module safely (it could reexecute initialization code) # all_models is never overridden or reset. self.all_models = defaultdict(OrderedDict) # Mapping of labels to AppConfig instances for installed apps. self.app_configs = OrderedDict() # Stack of app_configs. Used to store the current state in # set_available_apps and set_installed_apps. self.stored_app_configs = [] # Whether the registry is populated. self.apps_ready = self.models_ready = self.ready = False # Lock for thread-safe population. self._lock = threading.Lock() # Maps ("app_label", "modelname") tuples to lists of functions to be # called when the corresponding model is ready. Used by this class's # `lazy_model_operation()` and `do_pending_operations()` methods. self._pending_operations = defaultdict(list) # Populate apps and models, unless it's the master registry. if installed_apps is not None: self.populate(installed_apps) def populate(self, installed_apps=None): """ Loads application configurations and models. This method imports each application module and then each model module. It is thread safe and idempotent, but not reentrant. """ if self.ready: return # populate() might be called by two threads in parallel on servers # that create threads before initializing the WSGI callable. with self._lock: if self.ready: return # app_config should be pristine, otherwise the code below won't # guarantee that the order matches the order in INSTALLED_APPS. if self.app_configs: raise RuntimeError("populate() isn't reentrant") # Phase 1: initialize app configs and import app modules. for entry in installed_apps: if isinstance(entry, AppConfig): app_config = entry else: app_config = AppConfig.create(entry) if app_config.label in self.app_configs: raise ImproperlyConfigured( "Application labels aren't unique, " "duplicates: %s" % app_config.label) self.app_configs[app_config.label] = app_config app_config.apps = self # Check for duplicate app names. counts = Counter( app_config.name for app_config in self.app_configs.values()) duplicates = [ name for name, count in counts.most_common() if count > 1] if duplicates: raise ImproperlyConfigured( "Application names aren't unique, " "duplicates: %s" % ", ".join(duplicates)) self.apps_ready = True # Phase 2: import models modules. for app_config in self.app_configs.values(): app_config.import_models() self.clear_cache() self.models_ready = True # Phase 3: run ready() methods of app configs. for app_config in self.get_app_configs(): app_config.ready() self.ready = True def check_apps_ready(self): """ Raises an exception if all apps haven't been imported yet. """ if not self.apps_ready: raise AppRegistryNotReady("Apps aren't loaded yet.") def check_models_ready(self): """ Raises an exception if all models haven't been imported yet. """ if not self.models_ready: raise AppRegistryNotReady("Models aren't loaded yet.") def get_app_configs(self): """ Imports applications and returns an iterable of app configs. """ self.check_apps_ready() return self.app_configs.values() def get_app_config(self, app_label): """ Imports applications and returns an app config for the given label. Raises LookupError if no application exists with this label. """ self.check_apps_ready() try: return self.app_configs[app_label] except KeyError: message = "No installed app with label '%s'." % app_label for app_config in self.get_app_configs(): if app_config.name == app_label: message += " Did you mean '%s'?" % app_config.label break raise LookupError(message) # This method is performance-critical at least for Django's test suite. @lru_cache.lru_cache(maxsize=None) def get_models(self, include_auto_created=False, include_swapped=False): """ Returns a list of all installed models. By default, the following models aren't included: - auto-created models for many-to-many relations without an explicit intermediate table, - models that have been swapped out. Set the corresponding keyword argument to True to include such models. """ self.check_models_ready() result = [] for app_config in self.app_configs.values(): result.extend(list(app_config.get_models(include_auto_created, include_swapped))) return result def get_model(self, app_label, model_name=None, require_ready=True): """ Returns the model matching the given app_label and model_name. As a shortcut, this function also accepts a single argument in the form <app_label>.<model_name>. model_name is case-insensitive. Raises LookupError if no application exists with this label, or no model exists with this name in the application. Raises ValueError if called with a single argument that doesn't contain exactly one dot. """ if require_ready: self.check_models_ready() else: self.check_apps_ready() if model_name is None: app_label, model_name = app_label.split('.') app_config = self.get_app_config(app_label) if not require_ready and app_config.models is None: app_config.import_models() return app_config.get_model(model_name, require_ready=require_ready) def register_model(self, app_label, model): # Since this method is called when models are imported, it cannot # perform imports because of the risk of import loops. It mustn't # call get_app_config(). model_name = model._meta.model_name app_models = self.all_models[app_label] if model_name in app_models: if (model.__name__ == app_models[model_name].__name__ and model.__module__ == app_models[model_name].__module__): warnings.warn( "Model '%s.%s' was already registered. " "Reloading models is not advised as it can lead to inconsistencies, " "most notably with related models." % (app_label, model_name), RuntimeWarning, stacklevel=2) else: raise RuntimeError( "Conflicting '%s' models in application '%s': %s and %s." % (model_name, app_label, app_models[model_name], model)) app_models[model_name] = model self.do_pending_operations(model) self.clear_cache() def is_installed(self, app_name): """ Checks whether an application with this name exists in the registry. app_name is the full name of the app eg. 'django.contrib.admin'. """ self.check_apps_ready() return any(ac.name == app_name for ac in self.app_configs.values()) def get_containing_app_config(self, object_name): """ Look for an app config containing a given object. object_name is the dotted Python path to the object. Returns the app config for the inner application in case of nesting. Returns None if the object isn't in any registered app config. """ self.check_apps_ready() candidates = [] for app_config in self.app_configs.values(): if object_name.startswith(app_config.name): subpath = object_name[len(app_config.name):] if subpath == '' or subpath[0] == '.': candidates.append(app_config) if candidates: return sorted(candidates, key=lambda ac: -len(ac.name))[0] def get_registered_model(self, app_label, model_name): """ Similar to get_model(), but doesn't require that an app exists with the given app_label. It's safe to call this method at import time, even while the registry is being populated. """ model = self.all_models[app_label].get(model_name.lower()) if model is None: raise LookupError( "Model '%s.%s' not registered." % (app_label, model_name)) return model @lru_cache.lru_cache(maxsize=None) def get_swappable_settings_name(self, to_string): """ For a given model string (e.g. "auth.User"), return the name of the corresponding settings name if it refers to a swappable model. If the referred model is not swappable, return None. This method is decorated with lru_cache because it's performance critical when it comes to migrations. Since the swappable settings don't change after Django has loaded the settings, there is no reason to get the respective settings attribute over and over again. """ for model in self.get_models(include_swapped=True): swapped = model._meta.swapped # Is this model swapped out for the model given by to_string? if swapped and swapped == to_string: return model._meta.swappable # Is this model swappable and the one given by to_string? if model._meta.swappable and model._meta.label == to_string: return model._meta.swappable return None def set_available_apps(self, available): """ Restricts the set of installed apps used by get_app_config[s]. available must be an iterable of application names. set_available_apps() must be balanced with unset_available_apps(). Primarily used for performance optimization in TransactionTestCase. This method is safe is the sense that it doesn't trigger any imports. """ available = set(available) installed = set(app_config.name for app_config in self.get_app_configs()) if not available.issubset(installed): raise ValueError( "Available apps isn't a subset of installed apps, extra apps: %s" % ", ".join(available - installed) ) self.stored_app_configs.append(self.app_configs) self.app_configs = OrderedDict( (label, app_config) for label, app_config in self.app_configs.items() if app_config.name in available) self.clear_cache() def unset_available_apps(self): """ Cancels a previous call to set_available_apps(). """ self.app_configs = self.stored_app_configs.pop() self.clear_cache() def set_installed_apps(self, installed): """ Enables a different set of installed apps for get_app_config[s]. installed must be an iterable in the same format as INSTALLED_APPS. set_installed_apps() must be balanced with unset_installed_apps(), even if it exits with an exception. Primarily used as a receiver of the setting_changed signal in tests. This method may trigger new imports, which may add new models to the registry of all imported models. They will stay in the registry even after unset_installed_apps(). Since it isn't possible to replay imports safely (eg. that could lead to registering listeners twice), models are registered when they're imported and never removed. """ if not self.ready: raise AppRegistryNotReady("App registry isn't ready yet.") self.stored_app_configs.append(self.app_configs) self.app_configs = OrderedDict() self.apps_ready = self.models_ready = self.ready = False self.clear_cache() self.populate(installed) def unset_installed_apps(self): """ Cancels a previous call to set_installed_apps(). """ self.app_configs = self.stored_app_configs.pop() self.apps_ready = self.models_ready = self.ready = True self.clear_cache() def clear_cache(self): """ Clears all internal caches, for methods that alter the app registry. This is mostly used in tests. """ # Call expire cache on each model. This will purge # the relation tree and the fields cache. self.get_models.cache_clear() if self.ready: # Circumvent self.get_models() to prevent that the cache is refilled. # This particularly prevents that an empty value is cached while cloning. for app_config in self.app_configs.values(): for model in app_config.get_models(include_auto_created=True): model._meta._expire_cache() def lazy_model_operation(self, function, *model_keys): """ Take a function and a number of ("app_label", "modelname") tuples, and when all the corresponding models have been imported and registered, call the function with the model classes as its arguments. The function passed to this method must accept exactly n models as arguments, where n=len(model_keys). """ # Base case: no arguments, just execute the function. if not model_keys: function() # Recursive case: take the head of model_keys, wait for the # corresponding model class to be imported and registered, then apply # that argument to the supplied function. Pass the resulting partial # to lazy_model_operation() along with the remaining model args and # repeat until all models are loaded and all arguments are applied. else: next_model, more_models = model_keys[0], model_keys[1:] # This will be executed after the class corresponding to next_model # has been imported and registered. The `func` attribute provides # duck-type compatibility with partials. def apply_next_model(model): next_function = partial(apply_next_model.func, model) self.lazy_model_operation(next_function, *more_models) apply_next_model.func = function # If the model has already been imported and registered, partially # apply it to the function now. If not, add it to the list of # pending operations for the model, where it will be executed with # the model class as its sole argument once the model is ready. try: model_class = self.get_registered_model(*next_model) except LookupError: self._pending_operations[next_model].append(apply_next_model) else: apply_next_model(model_class) def do_pending_operations(self, model): """ Take a newly-prepared model and pass it to each function waiting for it. This is called at the very end of `Apps.register_model()`. """ key = model._meta.app_label, model._meta.model_name for function in self._pending_operations.pop(key, []): function(model) apps = Apps(installed_apps=None)
christophlsa/odoo
refs/heads/8.0
addons/l10n_eu_service/__init__.py
303
import wizard import models