repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
neumerance/cloudloon2
refs/heads/master
.venv/lib/python2.7/site-packages/django/core/management/commands/startapp.py
205
from django.core.management.base import CommandError from django.core.management.templates import TemplateCommand from django.utils.importlib import import_module class Command(TemplateCommand): help = ("Creates a Django app directory structure for the given app " "name in the current directory or optionally in the given " "directory.") def handle(self, app_name=None, target=None, **options): if app_name is None: raise CommandError("you must provide an app name") # Check that the app_name cannot be imported. try: import_module(app_name) except ImportError: pass else: raise CommandError("%r conflicts with the name of an existing " "Python module and cannot be used as an app " "name. Please try another name." % app_name) super(Command, self).handle('app', app_name, target, **options)
selwin/Django-facebook
refs/heads/master
docs/docs_env/Lib/site-packages/pip-1.0-py2.5.egg/pip/index.py
25
"""Routines related to PyPI, indexes""" import sys import os import re import mimetypes import threading import posixpath import pkg_resources import random import socket import string from pip.log import logger from pip.util import Inf from pip.util import normalize_name, splitext from pip.exceptions import DistributionNotFound from pip.backwardcompat import (WindowsError, Queue, httplib, urlparse, URLError, HTTPError, u, product, url2pathname) from pip.backwardcompat import Empty as QueueEmpty from pip.download import urlopen, path_to_url2, url_to_path, geturl, Urllib2HeadRequest __all__ = ['PackageFinder'] DEFAULT_MIRROR_URL = "last.pypi.python.org" class PackageFinder(object): """This finds packages. This is meant to match easy_install's technique for looking for packages, by reading pages and looking for appropriate links """ def __init__(self, find_links, index_urls, use_mirrors=False, mirrors=None, main_mirror_url=None): self.find_links = find_links self.index_urls = index_urls self.dependency_links = [] self.cache = PageCache() # These are boring links that have already been logged somehow: self.logged_links = set() if use_mirrors: self.mirror_urls = self._get_mirror_urls(mirrors, main_mirror_url) logger.info('Using PyPI mirrors: %s' % ', '.join(self.mirror_urls)) else: self.mirror_urls = [] def add_dependency_links(self, links): ## FIXME: this shouldn't be global list this, it should only ## apply to requirements of the package that specifies the ## dependency_links value ## FIXME: also, we should track comes_from (i.e., use Link) self.dependency_links.extend(links) @staticmethod def _sort_locations(locations): """ Sort locations into "files" (archives) and "urls", and return a pair of lists (files,urls) """ files = [] urls = [] # puts the url for the given file path into the appropriate # list def sort_path(path): url = path_to_url2(path) if mimetypes.guess_type(url, strict=False)[0] == 'text/html': urls.append(url) else: files.append(url) for url in locations: if url.startswith('file:'): path = url_to_path(url) if os.path.isdir(path): path = os.path.realpath(path) for item in os.listdir(path): sort_path(os.path.join(path, item)) elif os.path.isfile(path): sort_path(path) else: urls.append(url) return files, urls def find_requirement(self, req, upgrade): url_name = req.url_name # Only check main index if index URL is given: main_index_url = None if self.index_urls: # Check that we have the url_name correctly spelled: main_index_url = Link(posixpath.join(self.index_urls[0], url_name)) # This will also cache the page, so it's okay that we get it again later: page = self._get_page(main_index_url, req) if page is None: url_name = self._find_url_name(Link(self.index_urls[0]), url_name, req) or req.url_name # Combine index URLs with mirror URLs here to allow # adding more index URLs from requirements files all_index_urls = self.index_urls + self.mirror_urls def mkurl_pypi_url(url): loc = posixpath.join(url, url_name) # For maximum compatibility with easy_install, ensure the path # ends in a trailing slash. Although this isn't in the spec # (and PyPI can handle it without the slash) some other index # implementations might break if they relied on easy_install's behavior. if not loc.endswith('/'): loc = loc + '/' return loc if url_name is not None: locations = [ mkurl_pypi_url(url) for url in all_index_urls] + self.find_links else: locations = list(self.find_links) locations.extend(self.dependency_links) for version in req.absolute_versions: if url_name is not None and main_index_url is not None: locations = [ posixpath.join(main_index_url.url, version)] + locations file_locations, url_locations = self._sort_locations(locations) locations = [Link(url) for url in url_locations] logger.debug('URLs to search for versions for %s:' % req) for location in locations: logger.debug('* %s' % location) found_versions = [] found_versions.extend( self._package_versions( [Link(url, '-f') for url in self.find_links], req.name.lower())) page_versions = [] for page in self._get_pages(locations, req): logger.debug('Analyzing links from page %s' % page.url) logger.indent += 2 try: page_versions.extend(self._package_versions(page.links, req.name.lower())) finally: logger.indent -= 2 dependency_versions = list(self._package_versions( [Link(url) for url in self.dependency_links], req.name.lower())) if dependency_versions: logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions])) file_versions = list(self._package_versions( [Link(url) for url in file_locations], req.name.lower())) if not found_versions and not page_versions and not dependency_versions and not file_versions: logger.fatal('Could not find any downloads that satisfy the requirement %s' % req) raise DistributionNotFound('No distributions at all found for %s' % req) if req.satisfied_by is not None: found_versions.append((req.satisfied_by.parsed_version, Inf, req.satisfied_by.version)) if file_versions: file_versions.sort(reverse=True) logger.info('Local files found: %s' % ', '.join([url_to_path(link.url) for parsed, link, version in file_versions])) found_versions = file_versions + found_versions all_versions = found_versions + page_versions + dependency_versions applicable_versions = [] for (parsed_version, link, version) in all_versions: if version not in req.req: logger.info("Ignoring link %s, version %s doesn't match %s" % (link, version, ','.join([''.join(s) for s in req.req.specs]))) continue applicable_versions.append((link, version)) applicable_versions = sorted(applicable_versions, key=lambda v: pkg_resources.parse_version(v[1]), reverse=True) existing_applicable = bool([link for link, version in applicable_versions if link is Inf]) if not upgrade and existing_applicable: if applicable_versions[0][1] is Inf: logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement' % req.satisfied_by.version) else: logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)' % (req.satisfied_by.version, applicable_versions[0][1])) return None if not applicable_versions: logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)' % (req, ', '.join([version for parsed_version, link, version in found_versions]))) raise DistributionNotFound('No distributions matching the version for %s' % req) if applicable_versions[0][0] is Inf: # We have an existing version, and its the best version logger.info('Installed version (%s) is most up-to-date (past versions: %s)' % (req.satisfied_by.version, ', '.join([version for link, version in applicable_versions[1:]]) or 'none')) return None if len(applicable_versions) > 1: logger.info('Using version %s (newest of versions: %s)' % (applicable_versions[0][1], ', '.join([version for link, version in applicable_versions]))) return applicable_versions[0][0] def _find_url_name(self, index_url, url_name, req): """Finds the true URL name of a package, when the given name isn't quite correct. This is usually used to implement case-insensitivity.""" if not index_url.url.endswith('/'): # Vaguely part of the PyPI API... weird but true. ## FIXME: bad to modify this? index_url.url += '/' page = self._get_page(index_url, req) if page is None: logger.fatal('Cannot fetch index base URL %s' % index_url) return norm_name = normalize_name(req.url_name) for link in page.links: base = posixpath.basename(link.path.rstrip('/')) if norm_name == normalize_name(base): logger.notify('Real name of requirement %s is %s' % (url_name, base)) return base return None def _get_pages(self, locations, req): """Yields (page, page_url) from the given locations, skipping locations that have errors, and adding download/homepage links""" pending_queue = Queue() for location in locations: pending_queue.put(location) done = [] seen = set() threads = [] for i in range(min(10, len(locations))): t = threading.Thread(target=self._get_queued_page, args=(req, pending_queue, done, seen)) t.setDaemon(True) threads.append(t) t.start() for t in threads: t.join() return done _log_lock = threading.Lock() def _get_queued_page(self, req, pending_queue, done, seen): while 1: try: location = pending_queue.get(False) except QueueEmpty: return if location in seen: continue seen.add(location) page = self._get_page(location, req) if page is None: continue done.append(page) for link in page.rel_links(): pending_queue.put(link) _egg_fragment_re = re.compile(r'#egg=([^&]*)') _egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I) _py_version_re = re.compile(r'-py([123]\.[0-9])$') def _sort_links(self, links): "Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates" eggs, no_eggs = [], [] seen = set() for link in links: if link not in seen: seen.add(link) if link.egg_fragment: eggs.append(link) else: no_eggs.append(link) return no_eggs + eggs def _package_versions(self, links, search_name): for link in self._sort_links(links): for v in self._link_package_versions(link, search_name): yield v def _link_package_versions(self, link, search_name): """ Return an iterable of triples (pkg_resources_version_key, link, python_version) that can be extracted from the given link. Meant to be overridden by subclasses, not called by clients. """ if link.egg_fragment: egg_info = link.egg_fragment else: egg_info, ext = link.splitext() if not ext: if link not in self.logged_links: logger.debug('Skipping link %s; not a file' % link) self.logged_links.add(link) return [] if egg_info.endswith('.tar'): # Special double-extension case: egg_info = egg_info[:-4] ext = '.tar' + ext if ext not in ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip'): if link not in self.logged_links: logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext)) self.logged_links.add(link) return [] version = self._egg_info_matches(egg_info, search_name, link) if version is None: logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name)) return [] match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if py_version != sys.version[:3]: logger.debug('Skipping %s because Python version is incorrect' % link) return [] logger.debug('Found link %s, version: %s' % (link, version)) return [(pkg_resources.parse_version(version), link, version)] def _egg_info_matches(self, egg_info, search_name, link): match = self._egg_info_re.search(egg_info) if not match: logger.debug('Could not parse version from link: %s' % link) return None name = match.group(0).lower() # To match the "safe" name that pkg_resources creates: name = name.replace('_', '-') if name.startswith(search_name.lower()): return match.group(0)[len(search_name):].lstrip('-') else: return None def _get_page(self, link, req): return HTMLPage.get_page(link, req, cache=self.cache) def _get_mirror_urls(self, mirrors=None, main_mirror_url=None): """Retrieves a list of URLs from the main mirror DNS entry unless a list of mirror URLs are passed. """ if not mirrors: mirrors = get_mirrors(main_mirror_url) # Should this be made "less random"? E.g. netselect like? random.shuffle(mirrors) mirror_urls = set() for mirror_url in mirrors: # Make sure we have a valid URL if not ("http://" or "https://" or "file://") in mirror_url: mirror_url = "http://%s" % mirror_url if not mirror_url.endswith("/simple"): mirror_url = "%s/simple/" % mirror_url mirror_urls.add(mirror_url) return list(mirror_urls) class PageCache(object): """Cache of HTML pages""" failure_limit = 3 def __init__(self): self._failures = {} self._pages = {} self._archives = {} def too_many_failures(self, url): return self._failures.get(url, 0) >= self.failure_limit def get_page(self, url): return self._pages.get(url) def is_archive(self, url): return self._archives.get(url, False) def set_is_archive(self, url, value=True): self._archives[url] = value def add_page_failure(self, url, level): self._failures[url] = self._failures.get(url, 0)+level def add_page(self, urls, page): for url in urls: self._pages[url] = page class HTMLPage(object): """Represents one page, along with its URL""" ## FIXME: these regexes are horrible hacks: _homepage_re = re.compile(r'<th>\s*home\s*page', re.I) _download_re = re.compile(r'<th>\s*download\s+url', re.I) ## These aren't so aweful: _rel_re = re.compile("""<[^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*>""", re.I) _href_re = re.compile('href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))', re.I|re.S) _base_re = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I) def __init__(self, content, url, headers=None): self.content = content self.url = url self.headers = headers def __str__(self): return self.url @classmethod def get_page(cls, link, req, cache=None, skip_archives=True): url = link.url url = url.split('#', 1)[0] if cache.too_many_failures(url): return None # Check for VCS schemes that do not support lookup as web pages. from pip.vcs import VcsSupport for scheme in VcsSupport.schemes: if url.lower().startswith(scheme) and url[len(scheme)] in '+:': logger.debug('Cannot look at %(scheme)s URL %(link)s' % locals()) return None if cache is not None: inst = cache.get_page(url) if inst is not None: return inst try: if skip_archives: if cache is not None: if cache.is_archive(url): return None filename = link.filename for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']: if filename.endswith(bad_ext): content_type = cls._get_content_type(url) if content_type.lower().startswith('text/html'): break else: logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type)) if cache is not None: cache.set_is_archive(url) return None logger.debug('Getting page %s' % url) # Tack index.html onto file:// URLs that point to directories (scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url) if scheme == 'file' and os.path.isdir(url2pathname(path)): # add trailing slash if not present so urljoin doesn't trim final segment if not url.endswith('/'): url += '/' url = urlparse.urljoin(url, 'index.html') logger.debug(' file: URL is directory, getting %s' % url) resp = urlopen(url) real_url = geturl(resp) headers = resp.info() inst = cls(u(resp.read()), real_url, headers) except (HTTPError, URLError, socket.timeout, socket.error, OSError, WindowsError): e = sys.exc_info()[1] desc = str(e) if isinstance(e, socket.timeout): log_meth = logger.info level =1 desc = 'timed out' elif isinstance(e, URLError): log_meth = logger.info if hasattr(e, 'reason') and isinstance(e.reason, socket.timeout): desc = 'timed out' level = 1 else: level = 2 elif isinstance(e, HTTPError) and e.code == 404: ## FIXME: notify? log_meth = logger.info level = 2 else: log_meth = logger.info level = 1 log_meth('Could not fetch URL %s: %s' % (link, desc)) log_meth('Will skip URL %s when looking for download links for %s' % (link.url, req)) if cache is not None: cache.add_page_failure(url, level) return None if cache is not None: cache.add_page([url, real_url], inst) return inst @staticmethod def _get_content_type(url): """Get the Content-Type of the given url, using a HEAD request""" scheme, netloc, path, query, fragment = urlparse.urlsplit(url) if not scheme in ('http', 'https', 'ftp', 'ftps'): ## FIXME: some warning or something? ## assertion error? return '' req = Urllib2HeadRequest(url, headers={'Host': netloc}) resp = urlopen(req) try: if hasattr(resp, 'code') and resp.code != 200 and scheme not in ('ftp', 'ftps'): ## FIXME: doesn't handle redirects return '' return resp.info().get('content-type', '') finally: resp.close() @property def base_url(self): if not hasattr(self, "_base_url"): match = self._base_re.search(self.content) if match: self._base_url = match.group(1) else: self._base_url = self.url return self._base_url @property def links(self): """Yields all links in the page""" for match in self._href_re.finditer(self.content): url = match.group(1) or match.group(2) or match.group(3) url = self.clean_link(urlparse.urljoin(self.base_url, url)) yield Link(url, self) def rel_links(self): for url in self.explicit_rel_links(): yield url for url in self.scraped_rel_links(): yield url def explicit_rel_links(self, rels=('homepage', 'download')): """Yields all links with the given relations""" for match in self._rel_re.finditer(self.content): found_rels = match.group(1).lower().split() for rel in rels: if rel in found_rels: break else: continue match = self._href_re.search(match.group(0)) if not match: continue url = match.group(1) or match.group(2) or match.group(3) url = self.clean_link(urlparse.urljoin(self.base_url, url)) yield Link(url, self) def scraped_rel_links(self): for regex in (self._homepage_re, self._download_re): match = regex.search(self.content) if not match: continue href_match = self._href_re.search(self.content, pos=match.end()) if not href_match: continue url = match.group(1) or match.group(2) or match.group(3) if not url: continue url = self.clean_link(urlparse.urljoin(self.base_url, url)) yield Link(url, self) _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) def clean_link(self, url): """Makes sure a link is fully encoded. That is, if a ' ' shows up in the link, it will be rewritten to %20 (while not over-quoting % or other characters).""" return self._clean_re.sub( lambda match: '%%%2x' % ord(match.group(0)), url) class Link(object): def __init__(self, url, comes_from=None): self.url = url self.comes_from = comes_from def __str__(self): if self.comes_from: return '%s (from %s)' % (self.url, self.comes_from) else: return self.url def __repr__(self): return '<Link %s>' % self def __eq__(self, other): return self.url == other.url def __hash__(self): return hash(self.url) @property def filename(self): url = self.url url = url.split('#', 1)[0] url = url.split('?', 1)[0] url = url.rstrip('/') name = posixpath.basename(url) assert name, ( 'URL %r produced no filename' % url) return name @property def scheme(self): return urlparse.urlsplit(self.url)[0] @property def path(self): return urlparse.urlsplit(self.url)[2] def splitext(self): return splitext(posixpath.basename(self.path.rstrip('/'))) _egg_fragment_re = re.compile(r'#egg=([^&]*)') @property def egg_fragment(self): match = self._egg_fragment_re.search(self.url) if not match: return None return match.group(1) _md5_re = re.compile(r'md5=([a-f0-9]+)') @property def md5_hash(self): match = self._md5_re.search(self.url) if match: return match.group(1) return None @property def show_url(self): return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) def get_requirement_from_url(url): """Get a requirement from the URL, if possible. This looks for #egg in the URL""" link = Link(url) egg_info = link.egg_fragment if not egg_info: egg_info = splitext(link.filename)[0] return package_to_requirement(egg_info) def package_to_requirement(package_name): """Translate a name like Foo-1.2 to Foo==1.3""" match = re.search(r'^(.*?)-(dev|\d.*)', package_name) if match: name = match.group(1) version = match.group(2) else: name = package_name version = '' if version: return '%s==%s' % (name, version) else: return name def get_mirrors(hostname=None): """Return the list of mirrors from the last record found on the DNS entry:: >>> from pip.index import get_mirrors >>> get_mirrors() ['a.pypi.python.org', 'b.pypi.python.org', 'c.pypi.python.org', 'd.pypi.python.org'] Originally written for the distutils2 project by Alexis Metaireau. """ if hostname is None: hostname = DEFAULT_MIRROR_URL # return the last mirror registered on PyPI. try: hostname = socket.gethostbyname_ex(hostname)[0] except socket.gaierror: return [] end_letter = hostname.split(".", 1) # determine the list from the last one. return ["%s.%s" % (s, end_letter[1]) for s in string_range(end_letter[0])] def string_range(last): """Compute the range of string between "a" and last. This works for simple "a to z" lists, but also for "a to zz" lists. """ for k in range(len(last)): for x in product(string.ascii_lowercase, repeat=k+1): result = ''.join(x) yield result if result == last: return
ds-hwang/chromium-crosswalk
refs/heads/master
mojo/public/tools/bindings/pylib/mojom/generate/pack.py
23
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import module as mojom # This module provides a mechanism for determining the packed order and offsets # of a mojom.Struct. # # ps = pack.PackedStruct(struct) # ps.packed_fields will access a list of PackedField objects, each of which # will have an offset, a size and a bit (for mojom.BOOLs). # Size of struct header in bytes: num_bytes [4B] + version [4B]. HEADER_SIZE = 8 class PackedField(object): kind_to_size = { mojom.BOOL: 1, mojom.INT8: 1, mojom.UINT8: 1, mojom.INT16: 2, mojom.UINT16: 2, mojom.INT32: 4, mojom.UINT32: 4, mojom.FLOAT: 4, mojom.HANDLE: 4, mojom.MSGPIPE: 4, mojom.SHAREDBUFFER: 4, mojom.DCPIPE: 4, mojom.DPPIPE: 4, mojom.NULLABLE_HANDLE: 4, mojom.NULLABLE_MSGPIPE: 4, mojom.NULLABLE_SHAREDBUFFER: 4, mojom.NULLABLE_DCPIPE: 4, mojom.NULLABLE_DPPIPE: 4, mojom.INT64: 8, mojom.UINT64: 8, mojom.DOUBLE: 8, mojom.STRING: 8, mojom.NULLABLE_STRING: 8 } @classmethod def GetSizeForKind(cls, kind): if isinstance(kind, (mojom.Array, mojom.Map, mojom.Struct, mojom.Interface, mojom.AssociatedInterface)): return 8 if isinstance(kind, mojom.Union): return 16 if isinstance(kind, mojom.InterfaceRequest): kind = mojom.MSGPIPE if isinstance(kind, mojom.AssociatedInterfaceRequest): return 4 if isinstance(kind, mojom.Enum): # TODO(mpcomplete): what about big enums? return cls.kind_to_size[mojom.INT32] if not kind in cls.kind_to_size: raise Exception("Invalid kind: %s" % kind.spec) return cls.kind_to_size[kind] @classmethod def GetAlignmentForKind(cls, kind): if isinstance(kind, (mojom.Interface, mojom.AssociatedInterface)): return 4 if isinstance(kind, mojom.Union): return 8 return cls.GetSizeForKind(kind) def __init__(self, field, index, ordinal): """ Args: field: the original field. index: the position of the original field in the struct. ordinal: the ordinal of the field for serialization. """ self.field = field self.index = index self.ordinal = ordinal self.size = self.GetSizeForKind(field.kind) self.alignment = self.GetAlignmentForKind(field.kind) self.offset = None self.bit = None self.min_version = None def GetPad(offset, alignment): """Returns the pad necessary to reserve space so that |offset + pad| equals to some multiple of |alignment|.""" return (alignment - (offset % alignment)) % alignment def GetFieldOffset(field, last_field): """Returns a 2-tuple of the field offset and bit (for BOOLs).""" if (field.field.kind == mojom.BOOL and last_field.field.kind == mojom.BOOL and last_field.bit < 7): return (last_field.offset, last_field.bit + 1) offset = last_field.offset + last_field.size pad = GetPad(offset, field.alignment) return (offset + pad, 0) def GetPayloadSizeUpToField(field): """Returns the payload size (not including struct header) if |field| is the last field. """ if not field: return 0 offset = field.offset + field.size pad = GetPad(offset, 8) return offset + pad class PackedStruct(object): def __init__(self, struct): self.struct = struct # |packed_fields| contains all the fields, in increasing offset order. self.packed_fields = [] # |packed_fields_in_ordinal_order| refers to the same fields as # |packed_fields|, but in ordinal order. self.packed_fields_in_ordinal_order = [] # No fields. if (len(struct.fields) == 0): return # Start by sorting by ordinal. src_fields = self.packed_fields_in_ordinal_order ordinal = 0 for index, field in enumerate(struct.fields): if field.ordinal is not None: ordinal = field.ordinal src_fields.append(PackedField(field, index, ordinal)) ordinal += 1 src_fields.sort(key=lambda field: field.ordinal) # Set |min_version| for each field. next_min_version = 0 for packed_field in src_fields: if packed_field.field.min_version is None: assert next_min_version == 0 else: assert packed_field.field.min_version >= next_min_version next_min_version = packed_field.field.min_version packed_field.min_version = next_min_version if (packed_field.min_version != 0 and mojom.IsReferenceKind(packed_field.field.kind) and not packed_field.field.kind.is_nullable): raise Exception("Non-nullable fields are only allowed in version 0 of " "a struct. %s.%s is defined with [MinVersion=%d]." % (self.struct.name, packed_field.field.name, packed_field.min_version)) src_field = src_fields[0] src_field.offset = 0 src_field.bit = 0 dst_fields = self.packed_fields dst_fields.append(src_field) # Then find first slot that each field will fit. for src_field in src_fields[1:]: last_field = dst_fields[0] for i in xrange(1, len(dst_fields)): next_field = dst_fields[i] offset, bit = GetFieldOffset(src_field, last_field) if offset + src_field.size <= next_field.offset: # Found hole. src_field.offset = offset src_field.bit = bit dst_fields.insert(i, src_field) break last_field = next_field if src_field.offset is None: # Add to end src_field.offset, src_field.bit = GetFieldOffset(src_field, last_field) dst_fields.append(src_field) class ByteInfo(object): def __init__(self): self.is_padding = False self.packed_fields = [] def GetByteLayout(packed_struct): total_payload_size = GetPayloadSizeUpToField( packed_struct.packed_fields[-1] if packed_struct.packed_fields else None) bytes = [ByteInfo() for i in xrange(total_payload_size)] limit_of_previous_field = 0 for packed_field in packed_struct.packed_fields: for i in xrange(limit_of_previous_field, packed_field.offset): bytes[i].is_padding = True bytes[packed_field.offset].packed_fields.append(packed_field) limit_of_previous_field = packed_field.offset + packed_field.size for i in xrange(limit_of_previous_field, len(bytes)): bytes[i].is_padding = True for byte in bytes: # A given byte cannot both be padding and have a fields packed into it. assert not (byte.is_padding and byte.packed_fields) return bytes class VersionInfo(object): def __init__(self, version, num_fields, num_bytes): self.version = version self.num_fields = num_fields self.num_bytes = num_bytes def GetVersionInfo(packed_struct): """Get version information for a struct. Args: packed_struct: A PackedStruct instance. Returns: A non-empty list of VersionInfo instances, sorted by version in increasing order. Note: The version numbers may not be consecutive. """ versions = [] last_version = 0 last_num_fields = 0 last_payload_size = 0 for packed_field in packed_struct.packed_fields_in_ordinal_order: if packed_field.min_version != last_version: versions.append( VersionInfo(last_version, last_num_fields, last_payload_size + HEADER_SIZE)) last_version = packed_field.min_version last_num_fields += 1 # The fields are iterated in ordinal order here. However, the size of a # version is determined by the last field of that version in pack order, # instead of ordinal order. Therefore, we need to calculate the max value. last_payload_size = max(GetPayloadSizeUpToField(packed_field), last_payload_size) assert len(versions) == 0 or last_num_fields != versions[-1].num_fields versions.append(VersionInfo(last_version, last_num_fields, last_payload_size + HEADER_SIZE)) return versions
openstack/trove
refs/heads/master
trove/guestagent/utils/docker.py
1
# Copyright 2020 Catalyst Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import docker from oslo_log import log as logging from oslo_utils import encodeutils from trove.common import cfg LOG = logging.getLogger(__name__) CONF = cfg.CONF ANSI_ESCAPE = re.compile(r'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]') def stop_container(client, name="database"): try: container = client.containers.get(name) except docker.errors.NotFound: LOG.warning("Failed to get container %s", name) return container.stop(timeout=CONF.state_change_wait_time) def start_container(client, image, name="database", restart_policy="unless-stopped", volumes={}, ports={}, user="", network_mode="host", environment={}, command=""): """Start a docker container. :param client: docker client obj. :param image: docker image. :param name: container name. :param restart_policy: restart policy. :param volumes: e.g. {"/host/trove": {"bind": "/container/trove", "mode": "rw"}} :param ports: ports is ignored when network_mode="host". e.g. {"3306/tcp": 3306} :param user: e.g. "1000.1001" :param network_mode: One of bridge, none, host :param environment: Environment variables :param command: :return: """ try: container = client.containers.get(name) LOG.info(f'Starting existing container {name}') container.start() except docker.errors.NotFound: LOG.info( f"Creating docker container, image: {image}, " f"volumes: {volumes}, ports: {ports}, user: {user}, " f"network_mode: {network_mode}, environment: {environment}, " f"command: {command}") container = client.containers.run( image, name=name, restart_policy={"Name": restart_policy}, privileged=False, network_mode=network_mode, detach=True, volumes=volumes, ports=ports, user=user, environment=environment, command=command ) return container def _decode_output(output): output = encodeutils.safe_decode(output) output = ANSI_ESCAPE.sub('', output.strip()) return output.split('\n') def run_container(client, image, name, network_mode="host", volumes={}, command="", user=""): """Run command in a container and return the string output list. :returns output: The log output. :returns ret: True if no error occurs, otherwise False. """ try: container = client.containers.get(name) LOG.debug(f'Removing existing container {name}') container.remove(force=True) except docker.errors.NotFound: pass try: LOG.info( f'Running container {name}, image: {image}, ' f'network_mode: {network_mode}, volumes: {volumes}, ' f'command: {command}') output = client.containers.run( image, name=name, network_mode=network_mode, volumes=volumes, remove=False, command=command, user=user, ) except docker.errors.ContainerError as err: output = err.container.logs() return _decode_output(output), False return _decode_output(output), True def get_container_status(client, name="database"): try: container = client.containers.get(name) # One of created, restarting, running, removing, paused, exited, or # dead return container.status except docker.errors.NotFound: return "not running" except Exception: return "unknown" def run_command(client, command, name="database"): container = client.containers.get(name) # output is Bytes type ret, output = container.exec_run(command) if ret == 1: raise Exception('Running command error: %s' % output) return output def restart_container(client, name="database"): container = client.containers.get(name) container.restart(timeout=CONF.state_change_wait_time) def remove_container(client, name="database"): try: container = client.containers.get(name) container.remove(force=True) except docker.errors.NotFound: pass def get_container_logs(client, name='database', tail=50): container = client.containers.get(name) output = container.logs(tail=tail) return _decode_output(output) def prune_images(client): """Remove unused images.""" try: client.images.prune(filters={'dangling': False}) except Exception as e: LOG.warning(f"Prune image failed, error: {str(e)}")
akash1808/nova
refs/heads/master
nova/tests/unit/virt/xenapi/test_volumeops.py
65
# Copyright (c) 2012 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova import exception from nova import test from nova.tests.unit.virt.xenapi import stubs from nova.virt.xenapi import vm_utils from nova.virt.xenapi import volume_utils from nova.virt.xenapi import volumeops class VolumeOpsTestBase(stubs.XenAPITestBaseNoDB): def setUp(self): super(VolumeOpsTestBase, self).setUp() self._setup_mock_volumeops() def _setup_mock_volumeops(self): self.session = stubs.FakeSessionForVolumeTests('fake_uri') self.ops = volumeops.VolumeOps(self.session) class VolumeDetachTestCase(VolumeOpsTestBase): def test_detach_volume_call(self): registered_calls = [] def regcall(label): def side_effect(*args, **kwargs): registered_calls.append(label) return side_effect ops = volumeops.VolumeOps('session') self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup') self.mox.StubOutWithMock(volumeops.volume_utils, 'find_vbd_by_number') self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown') self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd') self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd') self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number') self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd') self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr') volumeops.vm_utils.lookup('session', 'instance_1').AndReturn( 'vmref') volumeops.volume_utils.get_device_number('mountpoint').AndReturn( 'devnumber') volumeops.volume_utils.find_vbd_by_number( 'session', 'vmref', 'devnumber').AndReturn('vbdref') volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn( False) volumeops.vm_utils.unplug_vbd('session', 'vbdref', 'vmref') volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects( regcall('destroy_vbd')) volumeops.volume_utils.find_sr_from_vbd( 'session', 'vbdref').WithSideEffects( regcall('find_sr_from_vbd')).AndReturn('srref') volumeops.volume_utils.purge_sr('session', 'srref') self.mox.ReplayAll() ops.detach_volume( dict(driver_volume_type='iscsi', data='conn_data'), 'instance_1', 'mountpoint') self.assertEqual( ['find_sr_from_vbd', 'destroy_vbd'], registered_calls) @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") @mock.patch.object(volume_utils, "find_vbd_by_number") @mock.patch.object(vm_utils, "vm_ref_or_raise") def test_detach_volume(self, mock_vm, mock_vbd, mock_detach): mock_vm.return_value = "vm_ref" mock_vbd.return_value = "vbd_ref" self.ops.detach_volume({}, "name", "/dev/xvdd") mock_vm.assert_called_once_with(self.session, "name") mock_vbd.assert_called_once_with(self.session, "vm_ref", 3) mock_detach.assert_called_once_with("vm_ref", ["vbd_ref"]) @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") @mock.patch.object(volume_utils, "find_vbd_by_number") @mock.patch.object(vm_utils, "vm_ref_or_raise") def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd, mock_detach): mock_vm.return_value = "vm_ref" mock_vbd.return_value = None self.ops.detach_volume({}, "name", "/dev/xvdd") self.assertFalse(mock_detach.called) @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") @mock.patch.object(volume_utils, "find_vbd_by_number") @mock.patch.object(vm_utils, "vm_ref_or_raise") def test_detach_volume_raises(self, mock_vm, mock_vbd, mock_detach): mock_vm.return_value = "vm_ref" mock_vbd.side_effect = test.TestingException self.assertRaises(test.TestingException, self.ops.detach_volume, {}, "name", "/dev/xvdd") self.assertFalse(mock_detach.called) @mock.patch.object(volume_utils, "purge_sr") @mock.patch.object(vm_utils, "destroy_vbd") @mock.patch.object(volume_utils, "find_sr_from_vbd") @mock.patch.object(vm_utils, "unplug_vbd") @mock.patch.object(vm_utils, "is_vm_shutdown") def test_detach_vbds_and_srs_not_shutdown(self, mock_shutdown, mock_unplug, mock_find_sr, mock_destroy, mock_purge): mock_shutdown.return_value = False mock_find_sr.return_value = "sr_ref" self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref"]) mock_shutdown.assert_called_once_with(self.session, "vm_ref") mock_find_sr.assert_called_once_with(self.session, "vbd_ref") mock_unplug.assert_called_once_with(self.session, "vbd_ref", "vm_ref") mock_destroy.assert_called_once_with(self.session, "vbd_ref") mock_purge.assert_called_once_with(self.session, "sr_ref") @mock.patch.object(volume_utils, "purge_sr") @mock.patch.object(vm_utils, "destroy_vbd") @mock.patch.object(volume_utils, "find_sr_from_vbd") @mock.patch.object(vm_utils, "unplug_vbd") @mock.patch.object(vm_utils, "is_vm_shutdown") def test_detach_vbds_and_srs_is_shutdown(self, mock_shutdown, mock_unplug, mock_find_sr, mock_destroy, mock_purge): mock_shutdown.return_value = True mock_find_sr.return_value = "sr_ref" self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref_1", "vbd_ref_2"]) expected = [mock.call(self.session, "vbd_ref_1"), mock.call(self.session, "vbd_ref_2")] self.assertEqual(expected, mock_destroy.call_args_list) mock_purge.assert_called_with(self.session, "sr_ref") self.assertFalse(mock_unplug.called) @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") def test_detach_all_no_volumes(self, mock_get_all, mock_detach): mock_get_all.return_value = [] self.ops.detach_all("vm_ref") mock_get_all.assert_called_once_with("vm_ref") self.assertFalse(mock_detach.called) @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") def test_detach_all_volumes(self, mock_get_all, mock_detach): mock_get_all.return_value = ["1"] self.ops.detach_all("vm_ref") mock_get_all.assert_called_once_with("vm_ref") mock_detach.assert_called_once_with("vm_ref", ["1"]) def test_get_all_volume_vbd_refs_no_vbds(self): with mock.patch.object(self.session.VM, "get_VBDs") as mock_get: with mock.patch.object(self.session.VBD, "get_other_config") as mock_conf: mock_get.return_value = [] result = self.ops._get_all_volume_vbd_refs("vm_ref") self.assertEqual([], list(result)) mock_get.assert_called_once_with("vm_ref") self.assertFalse(mock_conf.called) def test_get_all_volume_vbd_refs_no_volumes(self): with mock.patch.object(self.session.VM, "get_VBDs") as mock_get: with mock.patch.object(self.session.VBD, "get_other_config") as mock_conf: mock_get.return_value = ["1"] mock_conf.return_value = {} result = self.ops._get_all_volume_vbd_refs("vm_ref") self.assertEqual([], list(result)) mock_get.assert_called_once_with("vm_ref") mock_conf.assert_called_once_with("1") def test_get_all_volume_vbd_refs_with_volumes(self): with mock.patch.object(self.session.VM, "get_VBDs") as mock_get: with mock.patch.object(self.session.VBD, "get_other_config") as mock_conf: mock_get.return_value = ["1", "2"] mock_conf.return_value = {"osvol": True} result = self.ops._get_all_volume_vbd_refs("vm_ref") self.assertEqual(["1", "2"], list(result)) mock_get.assert_called_once_with("vm_ref") class AttachVolumeTestCase(VolumeOpsTestBase): @mock.patch.object(volumeops.VolumeOps, "_attach_volume") @mock.patch.object(vm_utils, "vm_ref_or_raise") def test_attach_volume_default_hotplug(self, mock_get_vm, mock_attach): mock_get_vm.return_value = "vm_ref" self.ops.attach_volume({}, "instance_name", "/dev/xvda") mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0, True) @mock.patch.object(volumeops.VolumeOps, "_attach_volume") @mock.patch.object(vm_utils, "vm_ref_or_raise") def test_attach_volume_hotplug(self, mock_get_vm, mock_attach): mock_get_vm.return_value = "vm_ref" self.ops.attach_volume({}, "instance_name", "/dev/xvda", False) mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0, False) @mock.patch.object(volumeops.VolumeOps, "_attach_volume") def test_attach_volume_default_hotplug_connect_volume(self, mock_attach): self.ops.connect_volume({}) mock_attach.assert_called_once_with({}) @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type") @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider") @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume") @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm") def test_attach_volume_with_defaults(self, mock_attach, mock_hypervisor, mock_provider, mock_driver): connection_info = {"data": {}} with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi: mock_provider.return_value = ("sr_ref", "sr_uuid") mock_vdi.return_value = "vdi_uuid" result = self.ops._attach_volume(connection_info) self.assertEqual(result, ("sr_uuid", "vdi_uuid")) mock_driver.assert_called_once_with(connection_info) mock_provider.assert_called_once_with({}, None) mock_hypervisor.assert_called_once_with("sr_ref", {}) self.assertFalse(mock_attach.called) @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type") @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider") @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume") @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm") def test_attach_volume_with_hot_attach(self, mock_attach, mock_hypervisor, mock_provider, mock_driver): connection_info = {"data": {}} with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi: mock_provider.return_value = ("sr_ref", "sr_uuid") mock_hypervisor.return_value = "vdi_ref" mock_vdi.return_value = "vdi_uuid" result = self.ops._attach_volume(connection_info, "vm_ref", "name", 2, True) self.assertEqual(result, ("sr_uuid", "vdi_uuid")) mock_driver.assert_called_once_with(connection_info) mock_provider.assert_called_once_with({}, "name") mock_hypervisor.assert_called_once_with("sr_ref", {}) mock_attach.assert_called_once_with("vdi_ref", "vm_ref", "name", 2, True) @mock.patch.object(volume_utils, "forget_sr") @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type") @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider") @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume") @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm") def test_attach_volume_cleanup(self, mock_attach, mock_hypervisor, mock_provider, mock_driver, mock_forget): connection_info = {"data": {}} mock_provider.return_value = ("sr_ref", "sr_uuid") mock_hypervisor.side_effect = test.TestingException self.assertRaises(test.TestingException, self.ops._attach_volume, connection_info) mock_driver.assert_called_once_with(connection_info) mock_provider.assert_called_once_with({}, None) mock_hypervisor.assert_called_once_with("sr_ref", {}) mock_forget.assert_called_once_with(self.session, "sr_ref") self.assertFalse(mock_attach.called) def test_check_is_supported_driver_type_pass_iscsi(self): conn_info = {"driver_volume_type": "iscsi"} self.ops._check_is_supported_driver_type(conn_info) def test_check_is_supported_driver_type_pass_xensm(self): conn_info = {"driver_volume_type": "xensm"} self.ops._check_is_supported_driver_type(conn_info) def test_check_is_supported_driver_type_pass_bad(self): conn_info = {"driver_volume_type": "bad"} self.assertRaises(exception.VolumeDriverNotFound, self.ops._check_is_supported_driver_type, conn_info) @mock.patch.object(volume_utils, "introduce_sr") @mock.patch.object(volume_utils, "find_sr_by_uuid") @mock.patch.object(volume_utils, "parse_sr_info") def test_connect_to_volume_provider_new_sr(self, mock_parse, mock_find_sr, mock_introduce_sr): mock_parse.return_value = ("uuid", "label", "params") mock_find_sr.return_value = None mock_introduce_sr.return_value = "sr_ref" ref, uuid = self.ops._connect_to_volume_provider({}, "name") self.assertEqual("sr_ref", ref) self.assertEqual("uuid", uuid) mock_parse.assert_called_once_with({}, "Disk-for:name") mock_find_sr.assert_called_once_with(self.session, "uuid") mock_introduce_sr.assert_called_once_with(self.session, "uuid", "label", "params") @mock.patch.object(volume_utils, "introduce_sr") @mock.patch.object(volume_utils, "find_sr_by_uuid") @mock.patch.object(volume_utils, "parse_sr_info") def test_connect_to_volume_provider_old_sr(self, mock_parse, mock_find_sr, mock_introduce_sr): mock_parse.return_value = ("uuid", "label", "params") mock_find_sr.return_value = "sr_ref" ref, uuid = self.ops._connect_to_volume_provider({}, "name") self.assertEqual("sr_ref", ref) self.assertEqual("uuid", uuid) mock_parse.assert_called_once_with({}, "Disk-for:name") mock_find_sr.assert_called_once_with(self.session, "uuid") self.assertFalse(mock_introduce_sr.called) @mock.patch.object(volume_utils, "introduce_vdi") def test_connect_hypervisor_to_volume_regular(self, mock_intro): mock_intro.return_value = "vdi" result = self.ops._connect_hypervisor_to_volume("sr", {}) self.assertEqual("vdi", result) mock_intro.assert_called_once_with(self.session, "sr") @mock.patch.object(volume_utils, "introduce_vdi") def test_connect_hypervisor_to_volume_vdi(self, mock_intro): mock_intro.return_value = "vdi" conn = {"vdi_uuid": "id"} result = self.ops._connect_hypervisor_to_volume("sr", conn) self.assertEqual("vdi", result) mock_intro.assert_called_once_with(self.session, "sr", vdi_uuid="id") @mock.patch.object(volume_utils, "introduce_vdi") def test_connect_hypervisor_to_volume_lun(self, mock_intro): mock_intro.return_value = "vdi" conn = {"target_lun": "lun"} result = self.ops._connect_hypervisor_to_volume("sr", conn) self.assertEqual("vdi", result) mock_intro.assert_called_once_with(self.session, "sr", target_lun="lun") @mock.patch.object(vm_utils, "is_vm_shutdown") @mock.patch.object(vm_utils, "create_vbd") def test_attach_volume_to_vm_plug(self, mock_vbd, mock_shutdown): mock_vbd.return_value = "vbd" mock_shutdown.return_value = False with mock.patch.object(self.session.VBD, "plug") as mock_plug: self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True) mock_plug.assert_called_once_with("vbd", "vm") mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2, bootable=False, osvol=True) mock_shutdown.assert_called_once_with(self.session, "vm") @mock.patch.object(vm_utils, "is_vm_shutdown") @mock.patch.object(vm_utils, "create_vbd") def test_attach_volume_to_vm_no_plug(self, mock_vbd, mock_shutdown): mock_vbd.return_value = "vbd" mock_shutdown.return_value = True with mock.patch.object(self.session.VBD, "plug") as mock_plug: self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True) self.assertFalse(mock_plug.called) mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2, bootable=False, osvol=True) mock_shutdown.assert_called_once_with(self.session, "vm") @mock.patch.object(vm_utils, "is_vm_shutdown") @mock.patch.object(vm_utils, "create_vbd") def test_attach_volume_to_vm_no_hotplug(self, mock_vbd, mock_shutdown): mock_vbd.return_value = "vbd" with mock.patch.object(self.session.VBD, "plug") as mock_plug: self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, False) self.assertFalse(mock_plug.called) mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2, bootable=False, osvol=True) self.assertFalse(mock_shutdown.called) class FindBadVolumeTestCase(VolumeOpsTestBase): @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") def test_find_bad_volumes_no_vbds(self, mock_get_all): mock_get_all.return_value = [] result = self.ops.find_bad_volumes("vm_ref") mock_get_all.assert_called_once_with("vm_ref") self.assertEqual([], result) @mock.patch.object(volume_utils, "find_sr_from_vbd") @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") def test_find_bad_volumes_no_bad_vbds(self, mock_get_all, mock_find_sr): mock_get_all.return_value = ["1", "2"] mock_find_sr.return_value = "sr_ref" with mock.patch.object(self.session.SR, "scan") as mock_scan: result = self.ops.find_bad_volumes("vm_ref") mock_get_all.assert_called_once_with("vm_ref") expected_find = [mock.call(self.session, "1"), mock.call(self.session, "2")] self.assertEqual(expected_find, mock_find_sr.call_args_list) expected_scan = [mock.call("sr_ref"), mock.call("sr_ref")] self.assertEqual(expected_scan, mock_scan.call_args_list) self.assertEqual([], result) @mock.patch.object(volume_utils, "find_sr_from_vbd") @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") def test_find_bad_volumes_bad_vbds(self, mock_get_all, mock_find_sr): mock_get_all.return_value = ["vbd_ref"] mock_find_sr.return_value = "sr_ref" class FakeException(Exception): details = ['SR_BACKEND_FAILURE_40', "", "", ""] session = mock.Mock() session.XenAPI.Failure = FakeException self.ops._session = session with mock.patch.object(session.SR, "scan") as mock_scan: with mock.patch.object(session.VBD, "get_device") as mock_get: mock_scan.side_effect = FakeException mock_get.return_value = "xvdb" result = self.ops.find_bad_volumes("vm_ref") mock_get_all.assert_called_once_with("vm_ref") mock_scan.assert_called_once_with("sr_ref") mock_get.assert_called_once_with("vbd_ref") self.assertEqual(["/dev/xvdb"], result) @mock.patch.object(volume_utils, "find_sr_from_vbd") @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") def test_find_bad_volumes_raises(self, mock_get_all, mock_find_sr): mock_get_all.return_value = ["vbd_ref"] mock_find_sr.return_value = "sr_ref" class FakeException(Exception): details = ['foo', "", "", ""] session = mock.Mock() session.XenAPI.Failure = FakeException self.ops._session = session with mock.patch.object(session.SR, "scan") as mock_scan: with mock.patch.object(session.VBD, "get_device") as mock_get: mock_scan.side_effect = FakeException mock_get.return_value = "xvdb" self.assertRaises(FakeException, self.ops.find_bad_volumes, "vm_ref") mock_scan.assert_called_once_with("sr_ref") class CleanupFromVDIsTestCase(VolumeOpsTestBase): def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs, sr_refs): find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref in vdi_refs] find_sr_from_vdi.assert_has_calls(find_sr_calls) purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref in sr_refs] purge_sr.assert_has_calls(purge_sr_calls) @mock.patch.object(volume_utils, 'find_sr_from_vdi') @mock.patch.object(volume_utils, 'purge_sr') def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi): vdi_refs = ['vdi_ref1', 'vdi_ref2'] sr_refs = ['sr_ref1', 'sr_ref2'] find_sr_from_vdi.side_effect = sr_refs self.ops.safe_cleanup_from_vdis(vdi_refs) self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, sr_refs) @mock.patch.object(volume_utils, 'find_sr_from_vdi', side_effect=[exception.StorageError(reason=''), 'sr_ref2']) @mock.patch.object(volume_utils, 'purge_sr') def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr, find_sr_from_vdi): vdi_refs = ['vdi_ref1', 'vdi_ref2'] sr_refs = ['sr_ref2'] find_sr_from_vdi.side_effect = [exception.StorageError(reason=''), sr_refs[0]] self.ops.safe_cleanup_from_vdis(vdi_refs) self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, sr_refs) @mock.patch.object(volume_utils, 'find_sr_from_vdi') @mock.patch.object(volume_utils, 'purge_sr') def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr, find_sr_from_vdi): vdi_refs = ['vdi_ref1', 'vdi_ref2'] sr_refs = ['sr_ref1', 'sr_ref2'] find_sr_from_vdi.side_effect = sr_refs purge_sr.side_effects = [test.TestingException, None] self.ops.safe_cleanup_from_vdis(vdi_refs) self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, sr_refs)
joannne/aggregator
refs/heads/master
aggregator/users/admin.py
60
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from django import forms from django.contrib import admin from django.contrib.auth.admin import UserAdmin as AuthUserAdmin from django.contrib.auth.forms import UserChangeForm, UserCreationForm from .models import User class MyUserChangeForm(UserChangeForm): class Meta(UserChangeForm.Meta): model = User class MyUserCreationForm(UserCreationForm): class Meta(UserCreationForm.Meta): model = User def clean_username(self): username = self.cleaned_data["username"] try: User.objects.get(username=username) except User.DoesNotExist: return username raise forms.ValidationError(self.error_messages['duplicate_username']) @admin.register(User) class UserAdmin(AuthUserAdmin): form = MyUserChangeForm add_form = MyUserCreationForm
huiyiqun/kernel_flo
refs/heads/working
tools/perf/scripts/python/sctop.py
11180
# system call top # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Periodically displays system-wide system call totals, broken down by # syscall. If a [comm] arg is specified, only syscalls called by # [comm] are displayed. If an [interval] arg is specified, the display # will be refreshed every [interval] seconds. The default interval is # 3 seconds. import os, sys, thread, time sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s sctop.py [comm] [interval]\n"; for_comm = None default_interval = 3 interval = default_interval if len(sys.argv) > 3: sys.exit(usage) if len(sys.argv) > 2: for_comm = sys.argv[1] interval = int(sys.argv[2]) elif len(sys.argv) > 1: try: interval = int(sys.argv[1]) except ValueError: for_comm = sys.argv[1] interval = default_interval syscalls = autodict() def trace_begin(): thread.start_new_thread(print_syscall_totals, (interval,)) pass def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(interval): while 1: clear_term() if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): try: print "%-40s %10d\n" % (syscall_name(id), val), except TypeError: pass syscalls.clear() time.sleep(interval)
ojii/sandlib
refs/heads/master
lib/lib-python/2.7/email/header.py
255
# Copyright (C) 2002-2006 Python Software Foundation # Author: Ben Gertzfield, Barry Warsaw # Contact: email-sig@python.org """Header encoding and decoding functionality.""" __all__ = [ 'Header', 'decode_header', 'make_header', ] import re import binascii import email.quoprimime import email.base64mime from email.errors import HeaderParseError from email.charset import Charset NL = '\n' SPACE = ' ' USPACE = u' ' SPACE8 = ' ' * 8 UEMPTYSTRING = u'' MAXLINELEN = 76 USASCII = Charset('us-ascii') UTF8 = Charset('utf-8') # Match encoded-word strings in the form =?charset?q?Hello_World?= ecre = re.compile(r''' =\? # literal =? (?P<charset>[^?]*?) # non-greedy up to the next ? is the charset \? # literal ? (?P<encoding>[qb]) # either a "q" or a "b", case insensitive \? # literal ? (?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string \?= # literal ?= (?=[ \t]|$) # whitespace or the end of the string ''', re.VERBOSE | re.IGNORECASE | re.MULTILINE) # Field name regexp, including trailing colon, but not separating whitespace, # according to RFC 2822. Character range is from tilde to exclamation mark. # For use with .match() fcre = re.compile(r'[\041-\176]+:$') # Find a header embedded in a putative header value. Used to check for # header injection attack. _embeded_header = re.compile(r'\n[^ \t]+:') # Helpers _max_append = email.quoprimime._max_append def decode_header(header): """Decode a message header value without converting charset. Returns a list of (decoded_string, charset) pairs containing each of the decoded parts of the header. Charset is None for non-encoded parts of the header, otherwise a lower-case string containing the name of the character set specified in the encoded string. An email.errors.HeaderParseError may be raised when certain decoding error occurs (e.g. a base64 decoding exception). """ # If no encoding, just return the header header = str(header) if not ecre.search(header): return [(header, None)] decoded = [] dec = '' for line in header.splitlines(): # This line might not have an encoding in it if not ecre.search(line): decoded.append((line, None)) continue parts = ecre.split(line) while parts: unenc = parts.pop(0).strip() if unenc: # Should we continue a long line? if decoded and decoded[-1][1] is None: decoded[-1] = (decoded[-1][0] + SPACE + unenc, None) else: decoded.append((unenc, None)) if parts: charset, encoding = [s.lower() for s in parts[0:2]] encoded = parts[2] dec = None if encoding == 'q': dec = email.quoprimime.header_decode(encoded) elif encoding == 'b': paderr = len(encoded) % 4 # Postel's law: add missing padding if paderr: encoded += '==='[:4 - paderr] try: dec = email.base64mime.decode(encoded) except binascii.Error: # Turn this into a higher level exception. BAW: Right # now we throw the lower level exception away but # when/if we get exception chaining, we'll preserve it. raise HeaderParseError if dec is None: dec = encoded if decoded and decoded[-1][1] == charset: decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1]) else: decoded.append((dec, charset)) del parts[0:3] return decoded def make_header(decoded_seq, maxlinelen=None, header_name=None, continuation_ws=' '): """Create a Header from a sequence of pairs as returned by decode_header() decode_header() takes a header value string and returns a sequence of pairs of the format (decoded_string, charset) where charset is the string name of the character set. This function takes one of those sequence of pairs and returns a Header instance. Optional maxlinelen, header_name, and continuation_ws are as in the Header constructor. """ h = Header(maxlinelen=maxlinelen, header_name=header_name, continuation_ws=continuation_ws) for s, charset in decoded_seq: # None means us-ascii but we can simply pass it on to h.append() if charset is not None and not isinstance(charset, Charset): charset = Charset(charset) h.append(s, charset) return h class Header: def __init__(self, s=None, charset=None, maxlinelen=None, header_name=None, continuation_ws=' ', errors='strict'): """Create a MIME-compliant header that can contain many character sets. Optional s is the initial header value. If None, the initial header value is not set. You can later append to the header with .append() method calls. s may be a byte string or a Unicode string, but see the .append() documentation for semantics. Optional charset serves two purposes: it has the same meaning as the charset argument to the .append() method. It also sets the default character set for all subsequent .append() calls that omit the charset argument. If charset is not provided in the constructor, the us-ascii charset is used both as s's initial charset and as the default for subsequent .append() calls. The maximum line length can be specified explicit via maxlinelen. For splitting the first line to a shorter value (to account for the field header which isn't included in s, e.g. `Subject') pass in the name of the field in header_name. The default maxlinelen is 76. continuation_ws must be RFC 2822 compliant folding whitespace (usually either a space or a hard tab) which will be prepended to continuation lines. errors is passed through to the .append() call. """ if charset is None: charset = USASCII if not isinstance(charset, Charset): charset = Charset(charset) self._charset = charset self._continuation_ws = continuation_ws cws_expanded_len = len(continuation_ws.replace('\t', SPACE8)) # BAW: I believe `chunks' and `maxlinelen' should be non-public. self._chunks = [] if s is not None: self.append(s, charset, errors) if maxlinelen is None: maxlinelen = MAXLINELEN if header_name is None: # We don't know anything about the field header so the first line # is the same length as subsequent lines. self._firstlinelen = maxlinelen else: # The first line should be shorter to take into account the field # header. Also subtract off 2 extra for the colon and space. self._firstlinelen = maxlinelen - len(header_name) - 2 # Second and subsequent lines should subtract off the length in # columns of the continuation whitespace prefix. self._maxlinelen = maxlinelen - cws_expanded_len def __str__(self): """A synonym for self.encode().""" return self.encode() def __unicode__(self): """Helper for the built-in unicode function.""" uchunks = [] lastcs = None for s, charset in self._chunks: # We must preserve spaces between encoded and non-encoded word # boundaries, which means for us we need to add a space when we go # from a charset to None/us-ascii, or from None/us-ascii to a # charset. Only do this for the second and subsequent chunks. nextcs = charset if uchunks: if lastcs not in (None, 'us-ascii'): if nextcs in (None, 'us-ascii'): uchunks.append(USPACE) nextcs = None elif nextcs not in (None, 'us-ascii'): uchunks.append(USPACE) lastcs = nextcs uchunks.append(unicode(s, str(charset))) return UEMPTYSTRING.join(uchunks) # Rich comparison operators for equality only. BAW: does it make sense to # have or explicitly disable <, <=, >, >= operators? def __eq__(self, other): # other may be a Header or a string. Both are fine so coerce # ourselves to a string, swap the args and do another comparison. return other == self.encode() def __ne__(self, other): return not self == other def append(self, s, charset=None, errors='strict'): """Append a string to the MIME header. Optional charset, if given, should be a Charset instance or the name of a character set (which will be converted to a Charset instance). A value of None (the default) means that the charset given in the constructor is used. s may be a byte string or a Unicode string. If it is a byte string (i.e. isinstance(s, str) is true), then charset is the encoding of that byte string, and a UnicodeError will be raised if the string cannot be decoded with that charset. If s is a Unicode string, then charset is a hint specifying the character set of the characters in the string. In this case, when producing an RFC 2822 compliant header using RFC 2047 rules, the Unicode string will be encoded using the following charsets in order: us-ascii, the charset hint, utf-8. The first character set not to provoke a UnicodeError is used. Optional `errors' is passed as the third argument to any unicode() or ustr.encode() call. """ if charset is None: charset = self._charset elif not isinstance(charset, Charset): charset = Charset(charset) # If the charset is our faux 8bit charset, leave the string unchanged if charset != '8bit': # We need to test that the string can be converted to unicode and # back to a byte string, given the input and output codecs of the # charset. if isinstance(s, str): # Possibly raise UnicodeError if the byte string can't be # converted to a unicode with the input codec of the charset. incodec = charset.input_codec or 'us-ascii' ustr = unicode(s, incodec, errors) # Now make sure that the unicode could be converted back to a # byte string with the output codec, which may be different # than the iput coded. Still, use the original byte string. outcodec = charset.output_codec or 'us-ascii' ustr.encode(outcodec, errors) elif isinstance(s, unicode): # Now we have to be sure the unicode string can be converted # to a byte string with a reasonable output codec. We want to # use the byte string in the chunk. for charset in USASCII, charset, UTF8: try: outcodec = charset.output_codec or 'us-ascii' s = s.encode(outcodec, errors) break except UnicodeError: pass else: assert False, 'utf-8 conversion failed' self._chunks.append((s, charset)) def _split(self, s, charset, maxlinelen, splitchars): # Split up a header safely for use with encode_chunks. splittable = charset.to_splittable(s) encoded = charset.from_splittable(splittable, True) elen = charset.encoded_header_len(encoded) # If the line's encoded length first, just return it if elen <= maxlinelen: return [(encoded, charset)] # If we have undetermined raw 8bit characters sitting in a byte # string, we really don't know what the right thing to do is. We # can't really split it because it might be multibyte data which we # could break if we split it between pairs. The least harm seems to # be to not split the header at all, but that means they could go out # longer than maxlinelen. if charset == '8bit': return [(s, charset)] # BAW: I'm not sure what the right test here is. What we're trying to # do is be faithful to RFC 2822's recommendation that ($2.2.3): # # "Note: Though structured field bodies are defined in such a way that # folding can take place between many of the lexical tokens (and even # within some of the lexical tokens), folding SHOULD be limited to # placing the CRLF at higher-level syntactic breaks." # # For now, I can only imagine doing this when the charset is us-ascii, # although it's possible that other charsets may also benefit from the # higher-level syntactic breaks. elif charset == 'us-ascii': return self._split_ascii(s, charset, maxlinelen, splitchars) # BAW: should we use encoded? elif elen == len(s): # We can split on _maxlinelen boundaries because we know that the # encoding won't change the size of the string splitpnt = maxlinelen first = charset.from_splittable(splittable[:splitpnt], False) last = charset.from_splittable(splittable[splitpnt:], False) else: # Binary search for split point first, last = _binsplit(splittable, charset, maxlinelen) # first is of the proper length so just wrap it in the appropriate # chrome. last must be recursively split. fsplittable = charset.to_splittable(first) fencoded = charset.from_splittable(fsplittable, True) chunk = [(fencoded, charset)] return chunk + self._split(last, charset, self._maxlinelen, splitchars) def _split_ascii(self, s, charset, firstlen, splitchars): chunks = _split_ascii(s, firstlen, self._maxlinelen, self._continuation_ws, splitchars) return zip(chunks, [charset]*len(chunks)) def _encode_chunks(self, newchunks, maxlinelen): # MIME-encode a header with many different charsets and/or encodings. # # Given a list of pairs (string, charset), return a MIME-encoded # string suitable for use in a header field. Each pair may have # different charsets and/or encodings, and the resulting header will # accurately reflect each setting. # # Each encoding can be email.utils.QP (quoted-printable, for # ASCII-like character sets like iso-8859-1), email.utils.BASE64 # (Base64, for non-ASCII like character sets like KOI8-R and # iso-2022-jp), or None (no encoding). # # Each pair will be represented on a separate line; the resulting # string will be in the format: # # =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n # =?charset2?b?SvxyZ2VuIEL2aW5n?=" chunks = [] for header, charset in newchunks: if not header: continue if charset is None or charset.header_encoding is None: s = header else: s = charset.header_encode(header) # Don't add more folding whitespace than necessary if chunks and chunks[-1].endswith(' '): extra = '' else: extra = ' ' _max_append(chunks, s, maxlinelen, extra) joiner = NL + self._continuation_ws return joiner.join(chunks) def encode(self, splitchars=';, '): """Encode a message header into an RFC-compliant format. There are many issues involved in converting a given string for use in an email header. Only certain character sets are readable in most email clients, and as header strings can only contain a subset of 7-bit ASCII, care must be taken to properly convert and encode (with Base64 or quoted-printable) header strings. In addition, there is a 75-character length limit on any given encoded header field, so line-wrapping must be performed, even with double-byte character sets. This method will do its best to convert the string to the correct character set used in email, and encode and line wrap it safely with the appropriate scheme for that character set. If the given charset is not known or an error occurs during conversion, this function will return the header untouched. Optional splitchars is a string containing characters to split long ASCII lines on, in rough support of RFC 2822's `highest level syntactic breaks'. This doesn't affect RFC 2047 encoded lines. """ newchunks = [] maxlinelen = self._firstlinelen lastlen = 0 for s, charset in self._chunks: # The first bit of the next chunk should be just long enough to # fill the next line. Don't forget the space separating the # encoded words. targetlen = maxlinelen - lastlen - 1 if targetlen < charset.encoded_header_len(''): # Stick it on the next line targetlen = maxlinelen newchunks += self._split(s, charset, targetlen, splitchars) lastchunk, lastcharset = newchunks[-1] lastlen = lastcharset.encoded_header_len(lastchunk) value = self._encode_chunks(newchunks, maxlinelen) if _embeded_header.search(value): raise HeaderParseError("header value appears to contain " "an embedded header: {!r}".format(value)) return value def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars): lines = [] maxlen = firstlen for line in s.splitlines(): # Ignore any leading whitespace (i.e. continuation whitespace) already # on the line, since we'll be adding our own. line = line.lstrip() if len(line) < maxlen: lines.append(line) maxlen = restlen continue # Attempt to split the line at the highest-level syntactic break # possible. Note that we don't have a lot of smarts about field # syntax; we just try to break on semi-colons, then commas, then # whitespace. for ch in splitchars: if ch in line: break else: # There's nothing useful to split the line on, not even spaces, so # just append this line unchanged lines.append(line) maxlen = restlen continue # Now split the line on the character plus trailing whitespace cre = re.compile(r'%s\s*' % ch) if ch in ';,': eol = ch else: eol = '' joiner = eol + ' ' joinlen = len(joiner) wslen = len(continuation_ws.replace('\t', SPACE8)) this = [] linelen = 0 for part in cre.split(line): curlen = linelen + max(0, len(this)-1) * joinlen partlen = len(part) onfirstline = not lines # We don't want to split after the field name, if we're on the # first line and the field name is present in the header string. if ch == ' ' and onfirstline and \ len(this) == 1 and fcre.match(this[0]): this.append(part) linelen += partlen elif curlen + partlen > maxlen: if this: lines.append(joiner.join(this) + eol) # If this part is longer than maxlen and we aren't already # splitting on whitespace, try to recursively split this line # on whitespace. if partlen > maxlen and ch != ' ': subl = _split_ascii(part, maxlen, restlen, continuation_ws, ' ') lines.extend(subl[:-1]) this = [subl[-1]] else: this = [part] linelen = wslen + len(this[-1]) maxlen = restlen else: this.append(part) linelen += partlen # Put any left over parts on a line by themselves if this: lines.append(joiner.join(this)) return lines def _binsplit(splittable, charset, maxlinelen): i = 0 j = len(splittable) while i < j: # Invariants: # 1. splittable[:k] fits for all k <= i (note that we *assume*, # at the start, that splittable[:0] fits). # 2. splittable[:k] does not fit for any k > j (at the start, # this means we shouldn't look at any k > len(splittable)). # 3. We don't know about splittable[:k] for k in i+1..j. # 4. We want to set i to the largest k that fits, with i <= k <= j. # m = (i+j+1) >> 1 # ceiling((i+j)/2); i < m <= j chunk = charset.from_splittable(splittable[:m], True) chunklen = charset.encoded_header_len(chunk) if chunklen <= maxlinelen: # m is acceptable, so is a new lower bound. i = m else: # m is not acceptable, so final i must be < m. j = m - 1 # i == j. Invariant #1 implies that splittable[:i] fits, and # invariant #2 implies that splittable[:i+1] does not fit, so i # is what we're looking for. first = charset.from_splittable(splittable[:i], False) last = charset.from_splittable(splittable[i:], False) return first, last
kisel/trex-core
refs/heads/master
scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/history.py
5
#!/usr/bin/python # -- Content-Encoding: UTF-8 -- """ The history module. :authors: Josh Marshall, Thomas Calmant :copyright: Copyright 2015, isandlaTech :license: Apache License 2.0 :version: 0.2.5 .. Copyright 2015 isandlaTech Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # Module version __version_info__ = (0, 2, 5) __version__ = ".".join(str(x) for x in __version_info__) # Documentation strings format __docformat__ = "restructuredtext en" # ------------------------------------------------------------------------------ class History(object): """ This holds all the response and request objects for a session. A server using this should call "clear" after each request cycle in order to keep it from clogging memory. """ def __init__(self): """ Sets up members """ self.requests = [] self.responses = [] def add_response(self, response_obj): """ Adds a response to the history :param response_obj: Response content """ self.responses.append(response_obj) def add_request(self, request_obj): """ Adds a request to the history :param request_obj: A request object """ self.requests.append(request_obj) @property def request(self): """ Returns the latest stored request or None """ try: return self.requests[-1] except IndexError: return None @property def response(self): """ Returns the latest stored response or None """ try: return self.responses[-1] except IndexError: return None def clear(self): """ Clears the history lists """ del self.requests[:] del self.responses[:]
WindCanDie/spark
refs/heads/master
python/test_coverage/coverage_daemon.py
78
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import imp # This is a hack to always refer the main code rather than built zip. main_code_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) daemon = imp.load_source("daemon", "%s/pyspark/daemon.py" % main_code_dir) if "COVERAGE_PROCESS_START" in os.environ: worker = imp.load_source("worker", "%s/pyspark/worker.py" % main_code_dir) def _cov_wrapped(*args, **kwargs): import coverage cov = coverage.coverage( config_file=os.environ["COVERAGE_PROCESS_START"]) cov.start() try: worker.main(*args, **kwargs) finally: cov.stop() cov.save() daemon.worker_main = _cov_wrapped else: raise RuntimeError("COVERAGE_PROCESS_START environment variable is not set, exiting.") if __name__ == '__main__': daemon.manager()
fbalakirev/red-pitaya-notes
refs/heads/master
projects/pulsed_nmr/client/pulsed_nmr.py
1
#!/usr/bin/env python # Control program for the Red Pitaya Pulsed NMR system # Copyright (C) 2015 Pavel Demin # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys import struct import numpy as np import matplotlib matplotlib.use('Qt5Agg') from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from matplotlib.figure import Figure from PyQt5.uic import loadUiType from PyQt5.QtCore import QRegExp, QTimer from PyQt5.QtGui import QRegExpValidator from PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QVBoxLayout, QSizePolicy, QMessageBox, QWidget from PyQt5.QtNetwork import QAbstractSocket, QTcpSocket Ui_PulsedNMR, QMainWindow = loadUiType('pulsed_nmr.ui') class PulsedNMR(QMainWindow, Ui_PulsedNMR): rates = {0:25.0e3, 1:50.0e3, 2:250.0e3, 3:500.0e3, 4:2500.0e3} def __init__(self): super(PulsedNMR, self).__init__() self.setupUi(self) self.rateValue.addItems(['25', '50', '250', '500', '2500']) # IP address validator rx = QRegExp('^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$') self.addrValue.setValidator(QRegExpValidator(rx, self.addrValue)) # state variable self.idle = True # number of samples to show on the plot self.size = 50000 # buffer and offset for the incoming samples self.buffer = bytearray(16 * self.size) self.offset = 0 self.data = np.frombuffer(self.buffer, np.complex64) # create figure figure = Figure() figure.set_facecolor('none') self.axes = figure.add_subplot(111) self.canvas = FigureCanvas(figure) self.plotLayout.addWidget(self.canvas) # create navigation toolbar self.toolbar = NavigationToolbar(self.canvas, self.plotWidget, False) # remove subplots action actions = self.toolbar.actions() if int(matplotlib.__version__[0]) < 2: self.toolbar.removeAction(actions[7]) else: self.toolbar.removeAction(actions[6]) self.plotLayout.addWidget(self.toolbar) # create TCP socket self.socket = QTcpSocket(self) self.socket.connected.connect(self.connected) self.socket.readyRead.connect(self.read_data) self.socket.error.connect(self.display_error) # connect signals from buttons and boxes self.startButton.clicked.connect(self.start) self.freqValue.valueChanged.connect(self.set_freq) self.awidthValue.valueChanged.connect(self.set_awidth) self.deltaValue.valueChanged.connect(self.set_delta) self.rateValue.currentIndexChanged.connect(self.set_rate) # set rate self.rateValue.setCurrentIndex(2) # create timer for the repetitions self.timer = QTimer(self) self.timer.timeout.connect(self.fire) def start(self): if self.idle: self.startButton.setEnabled(False) self.socket.connectToHost(self.addrValue.text(), 1001) else: self.idle = True self.timer.stop() self.socket.close() self.offset = 0 self.startButton.setText('Start') self.startButton.setEnabled(True) def connected(self): self.idle = False self.set_freq(self.freqValue.value()) self.set_rate(self.rateValue.currentIndex()) self.set_awidth(self.awidthValue.value()) self.fire() self.timer.start(self.deltaValue.value()) self.startButton.setText('Stop') self.startButton.setEnabled(True) def read_data(self): size = self.socket.bytesAvailable() if self.offset + size < 16 * self.size: self.buffer[self.offset:self.offset + size] = self.socket.read(size) self.offset += size else: self.buffer[self.offset:16 * self.size] = self.socket.read(16 * self.size - self.offset) self.offset = 0 # plot the signal envelope self.curve.set_ydata(np.abs(self.data[0::2])) self.canvas.draw() def display_error(self, socketError): if socketError == QAbstractSocket.RemoteHostClosedError: pass else: QMessageBox.information(self, 'PulsedNMR', 'Error: %s.' % self.socket.errorString()) self.startButton.setText('Start') self.startButton.setEnabled(True) def set_freq(self, value): if self.idle: return self.socket.write(struct.pack('<I', 0<<28 | int(1.0e6 * value))) def set_rate(self, index): # time axis rate = float(PulsedNMR.rates[index]) time = np.linspace(0.0, (self.size - 1) * 1000.0 / rate, self.size) # reset toolbar self.toolbar.home() self.toolbar._views.clear() self.toolbar._positions.clear() # reset plot self.axes.clear() self.axes.grid() # plot zeros and get store the returned Line2D object self.curve, = self.axes.plot(time, np.zeros(self.size)) x1, x2, y1, y2 = self.axes.axis() # set y axis limits self.axes.axis((x1, x2, -0.1, 0.4)) self.axes.set_xlabel('time, ms') self.canvas.draw() # set repetition time minimum = self.size / rate * 2000.0 if minimum < 100.0: minimum = 100.0 self.deltaValue.setMinimum(minimum) self.deltaValue.setValue(minimum) if self.idle: return self.socket.write(struct.pack('<I', 1<<28 | index)) def set_awidth(self, value): if self.idle: return self.socket.write(struct.pack('<I', 2<<28 | int(1.0e1 * value))) def set_delta(self, value): if self.idle: return self.timer.stop() self.timer.start(value) def fire(self): if self.idle: return self.socket.write(struct.pack('<I', 3<<28)) app = QApplication(sys.argv) window = PulsedNMR() window.show() sys.exit(app.exec_())
schleichdi2/OPENNFR-6.0-CORE
refs/heads/master
opennfr-openembedded-core/meta/lib/oeqa/selftest/bblayers.py
1
import unittest import os import logging import re import shutil import oeqa.utils.ftools as ftools from oeqa.selftest.base import oeSelfTest from oeqa.utils.commands import runCmd, get_bb_var from oeqa.utils.decorators import testcase class BitbakeLayers(oeSelfTest): @testcase(756) def test_bitbakelayers_showcrossdepends(self): result = runCmd('bitbake-layers show-cross-depends') self.assertTrue('aspell' in result.output, msg = "No dependencies were shown. bitbake-layers show-cross-depends output: %s" % result.output) @testcase(83) def test_bitbakelayers_showlayers(self): result = runCmd('bitbake-layers show-layers') self.assertTrue('meta-selftest' in result.output, msg = "No layers were shown. bitbake-layers show-layers output: %s" % result.output) @testcase(93) def test_bitbakelayers_showappends(self): recipe = "xcursor-transparent-theme" bb_file = self.get_recipe_basename(recipe) result = runCmd('bitbake-layers show-appends') self.assertTrue(bb_file in result.output, msg="%s file was not recognised. bitbake-layers show-appends output: %s" % (bb_file, result.output)) @testcase(90) def test_bitbakelayers_showoverlayed(self): result = runCmd('bitbake-layers show-overlayed') self.assertTrue('aspell' in result.output, msg="aspell overlayed recipe was not recognised bitbake-layers show-overlayed %s" % result.output) @testcase(95) def test_bitbakelayers_flatten(self): recipe = "xcursor-transparent-theme" recipe_path = "recipes-graphics/xcursor-transparent-theme" recipe_file = self.get_recipe_basename(recipe) testoutdir = os.path.join(self.builddir, 'test_bitbakelayers_flatten') self.assertFalse(os.path.isdir(testoutdir), msg = "test_bitbakelayers_flatten should not exist at this point in time") self.track_for_cleanup(testoutdir) result = runCmd('bitbake-layers flatten %s' % testoutdir) bb_file = os.path.join(testoutdir, recipe_path, recipe_file) self.assertTrue(os.path.isfile(bb_file), msg = "Cannot find xcursor-transparent-theme_0.1.1.bb in the test_bitbakelayers_flatten local dir.") contents = ftools.read_file(bb_file) find_in_contents = re.search("##### bbappended from meta-selftest #####\n(.*\n)*include test_recipe.inc", contents) self.assertTrue(find_in_contents, msg = "Flattening layers did not work. bitbake-layers flatten output: %s" % result.output) @testcase(1195) def test_bitbakelayers_add_remove(self): test_layer = os.path.join(get_bb_var('COREBASE'), 'meta-skeleton') result = runCmd('bitbake-layers show-layers') self.assertNotIn('meta-skeleton', result.output, "This test cannot run with meta-skeleton in bblayers.conf. bitbake-layers show-layers output: %s" % result.output) result = runCmd('bitbake-layers add-layer %s' % test_layer) result = runCmd('bitbake-layers show-layers') self.assertIn('meta-skeleton', result.output, msg = "Something wrong happened. meta-skeleton layer was not added to conf/bblayers.conf. bitbake-layers show-layers output: %s" % result.output) result = runCmd('bitbake-layers remove-layer %s' % test_layer) result = runCmd('bitbake-layers show-layers') self.assertNotIn('meta-skeleton', result.output, msg = "meta-skeleton should have been removed at this step. bitbake-layers show-layers output: %s" % result.output) result = runCmd('bitbake-layers add-layer %s' % test_layer) result = runCmd('bitbake-layers show-layers') self.assertIn('meta-skeleton', result.output, msg = "Something wrong happened. meta-skeleton layer was not added to conf/bblayers.conf. bitbake-layers show-layers output: %s" % result.output) result = runCmd('bitbake-layers remove-layer */meta-skeleton') result = runCmd('bitbake-layers show-layers') self.assertNotIn('meta-skeleton', result.output, msg = "meta-skeleton should have been removed at this step. bitbake-layers show-layers output: %s" % result.output) @testcase(1384) def test_bitbakelayers_showrecipes(self): distro = get_bb_var('DISTRO') result = runCmd('bitbake-layers show-recipes') self.assertIn('aspell:', result.output) self.assertIn('mtd-utils:', result.output) self.assertIn('core-image-minimal:', result.output) result = runCmd('bitbake-layers show-recipes mtd-utils') self.assertIn('mtd-utils:', result.output) self.assertNotIn('aspell:', result.output) result = runCmd('bitbake-layers show-recipes -i image') self.assertIn('core-image-minimal', result.output) self.assertNotIn('mtd-utils:', result.output) result = runCmd('bitbake-layers show-recipes -i cmake,pkgconfig') self.assertIn('libproxy:', result.output) self.assertNotIn('mtd-utils:', result.output) # doesn't inherit either self.assertNotIn('wget:', result.output) # doesn't inherit cmake self.assertNotIn('waffle:', result.output) # doesn't inherit pkgconfig result = runCmd('bitbake-layers show-recipes -i nonexistentclass', ignore_status=True) self.assertNotEqual(result.status, 0, 'bitbake-layers show-recipes -i nonexistentclass should have failed') self.assertIn('ERROR:', result.output) def get_recipe_basename(self, recipe): recipe_file = "" result = runCmd("bitbake-layers show-recipes -f %s" % recipe) for line in result.output.splitlines(): if recipe in line: recipe_file = line break self.assertTrue(os.path.isfile(recipe_file), msg = "Can't find recipe file for %s" % recipe) return os.path.basename(recipe_file)
jianlirong/incubator-hawq
refs/heads/master
tools/bin/pythonSrc/unittest2-0.5.1/unittest2/suite.py
166
"""TestSuite""" import sys import unittest from unittest2 import case, util __unittest = True class BaseTestSuite(unittest.TestSuite): """A simple test suite that doesn't provide class or module shared fixtures. """ def __init__(self, tests=()): self._tests = [] self.addTests(tests) def __repr__(self): return "<%s tests=%s>" % (util.strclass(self.__class__), list(self)) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return list(self) == list(other) def __ne__(self, other): return not self == other # Can't guarantee hash invariant, so flag as unhashable __hash__ = None def __iter__(self): return iter(self._tests) def countTestCases(self): cases = 0 for test in self: cases += test.countTestCases() return cases def addTest(self, test): # sanity checks if not hasattr(test, '__call__'): raise TypeError("%r is not callable" % (repr(test),)) if isinstance(test, type) and issubclass(test, (case.TestCase, TestSuite)): raise TypeError("TestCases and TestSuites must be instantiated " "before passing them to addTest()") self._tests.append(test) def addTests(self, tests): if isinstance(tests, basestring): raise TypeError("tests must be an iterable of tests, not a string") for test in tests: self.addTest(test) def run(self, result): for test in self: if result.shouldStop: break test(result) return result def __call__(self, *args, **kwds): return self.run(*args, **kwds) def debug(self): """Run the tests without collecting errors in a TestResult""" for test in self: test.debug() class TestSuite(BaseTestSuite): """A test suite is a composite test consisting of a number of TestCases. For use, create an instance of TestSuite, then add test case instances. When all tests have been added, the suite can be passed to a test runner, such as TextTestRunner. It will run the individual test cases in the order in which they were added, aggregating the results. When subclassing, do not forget to call the base class constructor. """ def run(self, result): self._wrapped_run(result) self._tearDownPreviousClass(None, result) self._handleModuleTearDown(result) return result def debug(self): """Run the tests without collecting errors in a TestResult""" debug = _DebugResult() self._wrapped_run(debug, True) self._tearDownPreviousClass(None, debug) self._handleModuleTearDown(debug) ################################ # private methods def _wrapped_run(self, result, debug=False): for test in self: if result.shouldStop: break if _isnotsuite(test): self._tearDownPreviousClass(test, result) self._handleModuleFixture(test, result) self._handleClassSetUp(test, result) result._previousTestClass = test.__class__ if (getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False)): continue if hasattr(test, '_wrapped_run'): test._wrapped_run(result, debug) elif not debug: test(result) else: test.debug() def _handleClassSetUp(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if result._moduleSetUpFailed: return if getattr(currentClass, "__unittest_skip__", False): return try: currentClass._classSetupFailed = False except TypeError: # test may actually be a function # so its class will be a builtin-type pass setUpClass = getattr(currentClass, 'setUpClass', None) if setUpClass is not None: try: setUpClass() except Exception, e: if isinstance(result, _DebugResult): raise currentClass._classSetupFailed = True className = util.strclass(currentClass) errorName = 'setUpClass (%s)' % className self._addClassOrModuleLevelException(result, e, errorName) def _get_previous_module(self, result): previousModule = None previousClass = getattr(result, '_previousTestClass', None) if previousClass is not None: previousModule = previousClass.__module__ return previousModule def _handleModuleFixture(self, test, result): previousModule = self._get_previous_module(result) currentModule = test.__class__.__module__ if currentModule == previousModule: return self._handleModuleTearDown(result) result._moduleSetUpFailed = False try: module = sys.modules[currentModule] except KeyError: return setUpModule = getattr(module, 'setUpModule', None) if setUpModule is not None: try: setUpModule() except Exception, e: if isinstance(result, _DebugResult): raise result._moduleSetUpFailed = True errorName = 'setUpModule (%s)' % currentModule self._addClassOrModuleLevelException(result, e, errorName) def _addClassOrModuleLevelException(self, result, exception, errorName): error = _ErrorHolder(errorName) addSkip = getattr(result, 'addSkip', None) if addSkip is not None and isinstance(exception, case.SkipTest): addSkip(error, str(exception)) else: result.addError(error, sys.exc_info()) def _handleModuleTearDown(self, result): previousModule = self._get_previous_module(result) if previousModule is None: return if result._moduleSetUpFailed: return try: module = sys.modules[previousModule] except KeyError: return tearDownModule = getattr(module, 'tearDownModule', None) if tearDownModule is not None: try: tearDownModule() except Exception, e: if isinstance(result, _DebugResult): raise errorName = 'tearDownModule (%s)' % previousModule self._addClassOrModuleLevelException(result, e, errorName) def _tearDownPreviousClass(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if getattr(previousClass, '_classSetupFailed', False): return if getattr(result, '_moduleSetUpFailed', False): return if getattr(previousClass, "__unittest_skip__", False): return tearDownClass = getattr(previousClass, 'tearDownClass', None) if tearDownClass is not None: try: tearDownClass() except Exception, e: if isinstance(result, _DebugResult): raise className = util.strclass(previousClass) errorName = 'tearDownClass (%s)' % className self._addClassOrModuleLevelException(result, e, errorName) class _ErrorHolder(object): """ Placeholder for a TestCase inside a result. As far as a TestResult is concerned, this looks exactly like a unit test. Used to insert arbitrary errors into a test suite run. """ # Inspired by the ErrorHolder from Twisted: # http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py # attribute used by TestResult._exc_info_to_string failureException = None def __init__(self, description): self.description = description def id(self): return self.description def shortDescription(self): return None def __repr__(self): return "<ErrorHolder description=%r>" % (self.description,) def __str__(self): return self.id() def run(self, result): # could call result.addError(...) - but this test-like object # shouldn't be run anyway pass def __call__(self, result): return self.run(result) def countTestCases(self): return 0 def _isnotsuite(test): "A crude way to tell apart testcases and suites with duck-typing" try: iter(test) except TypeError: return True return False class _DebugResult(object): "Used by the TestSuite to hold previous class when running in debug." _previousTestClass = None _moduleSetUpFailed = False shouldStop = False
MalloyPower/parsing-python
refs/heads/master
front-end/testsuite-python-lib/Python-2.2/Lib/test/test_cfgparser.py
2
import ConfigParser import StringIO from test_support import TestFailed, verify def basic(src): print "Testing basic accessors..." cf = ConfigParser.ConfigParser() sio = StringIO.StringIO(src) cf.readfp(sio) L = cf.sections() L.sort() verify(L == [r'Commented Bar', r'Foo Bar', r'Internationalized Stuff', r'Section\with$weird%characters[' '\t', r'Spacey Bar', ], "unexpected list of section names") # The use of spaces in the section names serves as a regression test for # SourceForge bug #115357. # http://sourceforge.net/bugs/?func=detailbug&group_id=5470&bug_id=115357 verify(cf.get('Foo Bar', 'foo', raw=1) == 'bar') verify(cf.get('Spacey Bar', 'foo', raw=1) == 'bar') verify(cf.get('Commented Bar', 'foo', raw=1) == 'bar') verify('__name__' not in cf.options("Foo Bar"), '__name__ "option" should not be exposed by the API!') # Make sure the right things happen for remove_option(); # added to include check for SourceForge bug #123324: verify(cf.remove_option('Foo Bar', 'foo'), "remove_option() failed to report existance of option") verify(not cf.has_option('Foo Bar', 'foo'), "remove_option() failed to remove option") verify(not cf.remove_option('Foo Bar', 'foo'), "remove_option() failed to report non-existance of option" " that was removed") try: cf.remove_option('No Such Section', 'foo') except ConfigParser.NoSectionError: pass else: raise TestFailed( "remove_option() failed to report non-existance of option" " that never existed") def case_sensitivity(): print "Testing case sensitivity..." cf = ConfigParser.ConfigParser() cf.add_section("A") cf.add_section("a") L = cf.sections() L.sort() verify(L == ["A", "a"]) cf.set("a", "B", "value") verify(cf.options("a") == ["b"]) verify(cf.get("a", "b", raw=1) == "value", "could not locate option, expecting case-insensitive option names") verify(cf.has_option("a", "b")) cf.set("A", "A-B", "A-B value") for opt in ("a-b", "A-b", "a-B", "A-B"): verify(cf.has_option("A", opt), "has_option() returned false for option which should exist") verify(cf.options("A") == ["a-b"]) verify(cf.options("a") == ["b"]) cf.remove_option("a", "B") verify(cf.options("a") == []) # SF bug #432369: cf = ConfigParser.ConfigParser() sio = StringIO.StringIO("[MySection]\nOption: first line\n\tsecond line\n") cf.readfp(sio) verify(cf.options("MySection") == ["option"]) verify(cf.get("MySection", "Option") == "first line\nsecond line") def boolean(src): print "Testing interpretation of boolean Values..." cf = ConfigParser.ConfigParser() sio = StringIO.StringIO(src) cf.readfp(sio) for x in range(1, 5): verify(cf.getboolean('BOOLTEST', 't%d' % (x)) == 1) for x in range(1, 5): verify(cf.getboolean('BOOLTEST', 'f%d' % (x)) == 0) for x in range(1, 5): try: cf.getboolean('BOOLTEST', 'e%d' % (x)) except ValueError: pass else: raise TestFailed( "getboolean() failed to report a non boolean value") def interpolation(src): print "Testing value interpolation..." cf = ConfigParser.ConfigParser({"getname": "%(__name__)s"}) sio = StringIO.StringIO(src) cf.readfp(sio) verify(cf.get("Foo", "getname") == "Foo") verify(cf.get("Foo", "bar") == "something with interpolation (1 step)") verify(cf.get("Foo", "bar9") == "something with lots of interpolation (9 steps)") verify(cf.get("Foo", "bar10") == "something with lots of interpolation (10 steps)") expect_get_error(cf, ConfigParser.InterpolationDepthError, "Foo", "bar11") def parse_errors(): print "Testing parse errors..." expect_parse_error(ConfigParser.ParsingError, """[Foo]\n extra-spaces: splat\n""") expect_parse_error(ConfigParser.ParsingError, """[Foo]\n extra-spaces= splat\n""") expect_parse_error(ConfigParser.ParsingError, """[Foo]\noption-without-value\n""") expect_parse_error(ConfigParser.ParsingError, """[Foo]\n:value-without-option-name\n""") expect_parse_error(ConfigParser.ParsingError, """[Foo]\n=value-without-option-name\n""") expect_parse_error(ConfigParser.MissingSectionHeaderError, """No Section!\n""") def query_errors(): print "Testing query interface..." cf = ConfigParser.ConfigParser() verify(cf.sections() == [], "new ConfigParser should have no defined sections") verify(not cf.has_section("Foo"), "new ConfigParser should have no acknowledged sections") try: cf.options("Foo") except ConfigParser.NoSectionError, e: pass else: raise TestFailed( "Failed to catch expected NoSectionError from options()") try: cf.set("foo", "bar", "value") except ConfigParser.NoSectionError, e: pass else: raise TestFailed("Failed to catch expected NoSectionError from set()") expect_get_error(cf, ConfigParser.NoSectionError, "foo", "bar") cf.add_section("foo") expect_get_error(cf, ConfigParser.NoOptionError, "foo", "bar") def weird_errors(): print "Testing miscellaneous error conditions..." cf = ConfigParser.ConfigParser() cf.add_section("Foo") try: cf.add_section("Foo") except ConfigParser.DuplicateSectionError, e: pass else: raise TestFailed("Failed to catch expected DuplicateSectionError") def expect_get_error(cf, exctype, section, option, raw=0): try: cf.get(section, option, raw=raw) except exctype, e: pass else: raise TestFailed("Failed to catch expected " + exctype.__name__) def expect_parse_error(exctype, src): cf = ConfigParser.ConfigParser() sio = StringIO.StringIO(src) try: cf.readfp(sio) except exctype, e: pass else: raise TestFailed("Failed to catch expected " + exctype.__name__) basic(r""" [Foo Bar] foo=bar [Spacey Bar] foo = bar [Commented Bar] foo: bar ; comment [Section\with$weird%characters[""" '\t' r"""] [Internationalized Stuff] foo[bg]: Bulgarian foo=Default foo[en]=English foo[de]=Deutsch """) case_sensitivity() boolean(r""" [BOOLTEST] T1=1 T2=TRUE T3=True T4=oN T5=yes F1=0 F2=FALSE F3=False F4=oFF F5=nO E1=2 E2=foo E3=-1 E4=0.1 E5=FALSE AND MORE """) interpolation(r""" [Foo] bar=something %(with1)s interpolation (1 step) bar9=something %(with9)s lots of interpolation (9 steps) bar10=something %(with10)s lots of interpolation (10 steps) bar11=something %(with11)s lots of interpolation (11 steps) with11=%(with10)s with10=%(with9)s with9=%(with8)s with8=%(with7)s with7=%(with6)s with6=%(with5)s with5=%(with4)s with4=%(with3)s with3=%(with2)s with2=%(with1)s with1=with [Mutual Recursion] foo=%(bar)s bar=%(foo)s """) parse_errors() query_errors() weird_errors()
giuliov/ansible
refs/heads/devel
lib/ansible/parsing/yaml/__init__.py
7690
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type
nugget/home-assistant
refs/heads/dev
homeassistant/components/sensor/__init__.py
4
""" Component to interface with various sensors that can be monitored. For more details about this component, please refer to the documentation at https://home-assistant.io/components/sensor/ """ from datetime import timedelta import logging import voluptuous as vol from homeassistant.helpers.entity_component import EntityComponent from homeassistant.helpers.config_validation import ( # noqa PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE) from homeassistant.const import ( DEVICE_CLASS_BATTERY, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_ILLUMINANCE, DEVICE_CLASS_TEMPERATURE, DEVICE_CLASS_TIMESTAMP, DEVICE_CLASS_PRESSURE) _LOGGER = logging.getLogger(__name__) DOMAIN = 'sensor' ENTITY_ID_FORMAT = DOMAIN + '.{}' SCAN_INTERVAL = timedelta(seconds=30) DEVICE_CLASSES = [ DEVICE_CLASS_BATTERY, # % of battery that is left DEVICE_CLASS_HUMIDITY, # % of humidity in the air DEVICE_CLASS_ILLUMINANCE, # current light level (lx/lm) DEVICE_CLASS_TEMPERATURE, # temperature (C/F) DEVICE_CLASS_TIMESTAMP, # timestamp (ISO8601) DEVICE_CLASS_PRESSURE, # pressure (hPa/mbar) ] DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES)) async def async_setup(hass, config): """Track states and offer events for sensors.""" component = hass.data[DOMAIN] = EntityComponent( _LOGGER, DOMAIN, hass, SCAN_INTERVAL) await component.async_setup(config) return True async def async_setup_entry(hass, entry): """Set up a config entry.""" return await hass.data[DOMAIN].async_setup_entry(entry) async def async_unload_entry(hass, entry): """Unload a config entry.""" return await hass.data[DOMAIN].async_unload_entry(entry)
cgstudiomap/cgstudiomap
refs/heads/develop
main/eggs/geojson-1.3.1-py2.7.egg/geojson/utils.py
1
"""Coordinate utility functions.""" def coords(obj): """ Yields the coordinates from a Feature or Geometry. :param obj: A geometry or feature to extract the coordinates from." :type obj: Feature, Geometry :return: A generator with coordinate tuples from the geometry or feature. :rtype: generator """ if isinstance(obj, (tuple, list)): coordinates = obj elif 'geometry' in obj: coordinates = obj['geometry']['coordinates'] else: coordinates = obj.get('coordinates', obj) for e in coordinates: if isinstance(e, (float, int)): yield tuple(coordinates) break for f in coords(e): yield f def map_coords(func, obj): """ Returns the coordinates from a Geometry after applying the provided function to the tuples. :param obj: A geometry or feature to extract the coordinates from. :type obj: Point, LineString, MultiPoint, MultiLineString, Polygon, MultiPolygon :return: The result of applying the function to each coordinate array. :rtype: list :raises ValueError: if the provided object is not a Geometry. """ if obj['type'] == 'Point': coordinates = tuple(map(func, obj['coordinates'])) elif obj['type'] in ['LineString', 'MultiPoint']: coordinates = [tuple(map(func, c)) for c in obj['coordinates']] elif obj['type'] in ['MultiLineString', 'Polygon']: coordinates = [[ tuple(map(func, c)) for c in curve] for curve in obj['coordinates']] elif obj['type'] == 'MultiPolygon': coordinates = [[[ tuple(map(func, c)) for c in curve] for curve in part] for part in obj['coordinates']] else: raise ValueError("Invalid geometry object %s" % repr(obj)) return {'type': obj['type'], 'coordinates': coordinates} def generate_random(featureType, numberVertices=3, boundingBox=[-180.0, -90.0, 180.0, 90.0]): """ Generates random geojson features depending on the parameters passed through. :param featureType: A geometry type :type string: Point, LineString, Polygon :param numberVertices: The number vertices that a linestring or polygon will have :type int: defaults to 3 :param boundingBox: A bounding box in which features will be restricted to :type list: defaults to the world - [-180.0, -90.0, 180.0, 90.0] :return: The resulting random geojson object or geometry collection. :rtype: object :raises ValueError: if there is no featureType provided. """ from geojson import Point, LineString, Polygon import random import math lonMin = boundingBox[0] lonMax = boundingBox[2] def randomLon(): return random.uniform(lonMin, lonMax) latMin = boundingBox[1] latMax = boundingBox[3] def randomLat(): return random.uniform(latMin, latMax) def createPoint(): return Point((randomLon(), randomLat())) def createLine(): coords = [] for i in range(0, numberVertices): coords.append((randomLon(), randomLat())) return LineString(coords) def createPoly(): aveRadius = 60 ctrX = 0.1 ctrY = 0.2 irregularity = clip(0.1, 0, 1) * 2 * math.pi / numberVertices spikeyness = clip(0.5, 0, 1) * aveRadius angleSteps = [] lower = (2 * math.pi / numberVertices) - irregularity upper = (2 * math.pi / numberVertices) + irregularity sum = 0 for i in range(numberVertices): tmp = random.uniform(lower, upper) angleSteps.append(tmp) sum = sum + tmp k = sum / (2 * math.pi) for i in range(numberVertices): angleSteps[i] = angleSteps[i] / k points = [] angle = random.uniform(0, 2 * math.pi) for i in range(numberVertices): r_i = clip(random.gauss(aveRadius, spikeyness), 0, 2 * aveRadius) x = ctrX + r_i * math.cos(angle) y = ctrY + r_i * math.sin(angle) points.append((int(x), int(y))) angle = angle + angleSteps[i] firstVal = points[0] points.append(firstVal) return Polygon([points]) def clip(x, min, max): if(min > max): return x elif(x < min): return min elif(x > max): return max else: return x if featureType == 'Point': return createPoint() if featureType == 'LineString': return createLine() if featureType == 'Polygon': return createPoly()
GeyerA/android_external_chromium_org
refs/heads/master
chrome/test/functional/stress.py
65
#!/usr/bin/env python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Stess Tests for Google Chrome. This script runs 4 different stress tests: 1. Plugin stress. 2. Back and forward stress. 3. Download stress. 4. Preference stress. After every cycle (running all 4 stress tests) it checks for crashes. If there are any crashes, the script generates a report, uploads it to a server and mails about the crash and the link to the report on the server. Apart from this whenever the test stops on mac it looks for and reports zombies. Prerequisites: Test needs the following files/folders in the Data dir. 1. A crash_report tool in "pyauto_private/stress/mac" folder for use on Mac. 2. A "downloads" folder containing stress_downloads and all the files referenced in it. 3. A pref_dict file in "pyauto_private/stress/mac" folder. 4. A "plugin" folder containing doubleAnimation.xaml, flash.swf, FlashSpin.swf, generic.html, get_flash_player.gif, js-invoker.swf, mediaplayer.wmv, NavigatorTicker11.class, Plugins_page.html, sample5.mov, silverlight.xaml, silverlight.js, embed.pdf, plugins_page.html and test6.swf. 5. A stress_pref file in "pyauto_private/stress". """ import commands import glob import logging import os import random import re import shutil import sys import time import urllib import test_utils import subprocess import pyauto_functional import pyauto import pyauto_utils CRASHES = 'crashes' # Name of the folder to store crashes class StressTest(pyauto.PyUITest): """Run all the stress tests.""" flash_url1 = pyauto.PyUITest.GetFileURLForPath( os.path.join(pyauto.PyUITest.DataDir(), 'plugin', 'flash.swf')) flash_url2 = pyauto.PyUITest.GetFileURLForPath( os.path.join(pyauto.PyUITest.DataDir(),'plugin', 'js-invoker.swf')) flash_url3 = pyauto.PyUITest.GetFileURLForPath( os.path.join(pyauto.PyUITest.DataDir(), 'plugin', 'generic.html')) plugin_url = pyauto.PyUITest.GetFileURLForPath( os.path.join(pyauto.PyUITest.DataDir(), 'plugin', 'plugins_page.html')) empty_url = pyauto.PyUITest.GetFileURLForPath( os.path.join(pyauto.PyUITest.DataDir(), 'empty.html')) download_url1 = pyauto.PyUITest.GetFileURLForPath( os.path.join(pyauto.PyUITest.DataDir(), 'downloads', 'a_zip_file.zip')) download_url2 = pyauto.PyUITest.GetFileURLForPath( os.path.join(pyauto.PyUITest.DataDir(),'zip', 'test.zip')) file_list = pyauto.PyUITest.EvalDataFrom( os.path.join(pyauto.PyUITest.DataDir(), 'downloads', 'stress_downloads')) symbols_dir = os.path.join(os.getcwd(), 'Build_Symbols') stress_pref = pyauto.PyUITest.EvalDataFrom( os.path.join(pyauto.PyUITest.DataDir(), 'pyauto_private', 'stress', 'stress_pref')) breakpad_dir = None chrome_version = None bookmarks_list = [] def _FuncDir(self): """Returns the path to the functional dir chrome/test/functional.""" return os.path.dirname(__file__) def _DownloadSymbols(self): """Downloads the symbols for the build being tested.""" download_location = os.path.join(os.getcwd(), 'Build_Symbols') if os.path.exists(download_location): shutil.rmtree(download_location) os.makedirs(download_location) url = self.stress_pref['symbols_dir'] + self.chrome_version # TODO: Add linux symbol_files if self.IsWin(): url = url + '/win/' symbol_files = ['chrome.dll.pdb', 'chrome.exe.pdb'] elif self.IsMac(): url = url + '/mac/' symbol_files = map(urllib.quote, ['Google Chrome Framework.framework', 'Google Chrome Helper.app', 'Google Chrome.app', 'crash_inspector', 'crash_report_sender', 'ffmpegsumo.so', 'libplugin_carbon_interpose.dylib']) index = 0 symbol_files = ['%s-%s-i386.breakpad' % (sym_file, self.chrome_version) \ for sym_file in symbol_files] logging.info(symbol_files) for sym_file in symbol_files: sym_url = url + sym_file logging.info(sym_url) download_sym_file = os.path.join(download_location, sym_file) logging.info(download_sym_file) urllib.urlretrieve(sym_url, download_sym_file) def setUp(self): pyauto.PyUITest.setUp(self) self.breakpad_dir = self._CrashDumpFolder() self.chrome_version = self.GetBrowserInfo()['properties']['ChromeVersion'] # Plugin stress functions def _CheckForPluginProcess(self, plugin_name): """Checks if a particular plugin process exists. Args: plugin_name : plugin process which should be running. """ process = self.GetBrowserInfo()['child_processes'] self.assertTrue([x for x in process if x['type'] == 'Plug-in' and x['name'] == plugin_name]) def _GetPluginProcessId(self, plugin_name): """Get Plugin process id. Args: plugin_name: Plugin whose pid is expected. Eg: "Shockwave Flash" Returns: Process id if the plugin process is running. None otherwise. """ for process in self.GetBrowserInfo()['child_processes']: if process['type'] == 'Plug-in' and \ re.search(plugin_name, process['name']): return process['pid'] return None def _CloseAllTabs(self): """Close all but one tab in first window.""" tab_count = self.GetTabCount(0) for tab_index in xrange(tab_count - 1, 0, -1): self.CloseTab(tab_index) def _CloseAllWindows(self): """Close all windows except one.""" win_count = self.GetBrowserWindowCount() for windex in xrange(win_count - 1, 0, -1): self.RunCommand(pyauto.IDC_CLOSE_WINDOW, windex) def _ReloadAllTabs(self): """Reload all the tabs in first window.""" for tab_index in range(self.GetTabCount()): self.ReloadTab(tab_index) def _LoadFlashInMultipleTabs(self): """Load Flash in multiple tabs in first window.""" self.NavigateToURL(self.empty_url) # Open 18 tabs with flash for _ in range(9): self.AppendTab(pyauto.GURL(self.flash_url1)) self.AppendTab(pyauto.GURL(self.flash_url2)) def _OpenAndCloseMultipleTabsWithFlash(self): """Stress test for flash in multiple tabs.""" logging.info("In _OpenAndCloseMultipleWindowsWithFlash.") self._LoadFlashInMultipleTabs() self._CheckForPluginProcess('Shockwave Flash') self._CloseAllTabs() def _OpenAndCloseMultipleWindowsWithFlash(self): """Stress test for flash in multiple windows.""" logging.info('In _OpenAndCloseMultipleWindowsWithFlash.') # Open 5 Normal and 4 Incognito windows for tab_index in range(1, 10): if tab_index < 6: self.OpenNewBrowserWindow(True) else: self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW) self.NavigateToURL(self.flash_url2, tab_index, 0) self.AppendTab(pyauto.GURL(self.flash_url2), tab_index) self._CloseAllWindows() def _OpenAndCloseMultipleTabsWithMultiplePlugins(self): """Stress test using multiple plugins in multiple tabs.""" logging.info('In _OpenAndCloseMultipleTabsWithMultiplePlugins.') # Append 4 tabs with URL for _ in range(5): self.AppendTab(pyauto.GURL(self.plugin_url)) self._CloseAllTabs() def _OpenAndCloseMultipleWindowsWithMultiplePlugins(self): """Stress test using multiple plugins in multiple windows.""" logging.info('In _OpenAndCloseMultipleWindowsWithMultiplePlugins.') # Open 4 windows with URL for tab_index in range(1, 5): if tab_index < 6: self.OpenNewBrowserWindow(True) else: self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW) self.NavigateToURL(self.plugin_url, tab_index, 0) self._CloseAllWindows() def _KillAndReloadFlash(self): """Stress test by killing flash process and reloading tabs.""" self._LoadFlashInMultipleTabs() flash_process_id1 = self._GetPluginProcessId('Shockwave Flash') self.Kill(flash_process_id1) self._ReloadAllTabs() self._CloseAllTabs() def _KillAndReloadRenderersWithFlash(self): """Stress test by killing renderer processes and reloading tabs.""" logging.info('In _KillAndReloadRenderersWithFlash') self._LoadFlashInMultipleTabs() info = self.GetBrowserInfo() # Kill all renderer processes for tab_index in range(self.GetTabCount(0)): self.KillRendererProcess( info['windows'][0]['tabs'][tab_index]['renderer_pid']) self._ReloadAllTabs() self._CloseAllTabs() def _TogglePlugin(self, plugin_name): """Toggle plugin status. Args: plugin_name: Name of the plugin to toggle. """ plugins = self.GetPluginsInfo().Plugins() for item in range(len(plugins)): if re.search(plugin_name, plugins[item]['name']): if plugins[item]['enabled']: self.DisablePlugin(plugins[item]['path']) else: self.EnablePlugin(plugins[item]['path']) def _ToggleAndReloadFlashPlugin(self): """Toggle flash and reload all tabs.""" logging.info('In _ToggleAndReloadFlashPlugin') for _ in range(10): self.AppendTab(pyauto.GURL(self.flash_url3)) # Disable Flash Plugin self._TogglePlugin('Shockwave Flash') self._ReloadAllTabs() # Enable Flash Plugin self._TogglePlugin('Shockwave Flash') self._ReloadAllTabs() self._CloseAllTabs() # Downloads stress functions def _LoadDownloadsInMultipleTabs(self): """Load Downloads in multiple tabs in the same window.""" # Open 15 tabs with downloads logging.info('In _LoadDownloadsInMultipleTabs') for tab_index in range(15): # We open an empty tab and then downlad a file from it. self.AppendTab(pyauto.GURL(self.empty_url)) self.NavigateToURL(self.download_url1, 0, tab_index + 1) self.AppendTab(pyauto.GURL(self.empty_url)) self.NavigateToURL(self.download_url2, 0, tab_index + 2) def _OpenAndCloseMultipleTabsWithDownloads(self): """Download items in multiple tabs.""" logging.info('In _OpenAndCloseMultipleTabsWithDownloads') self._LoadDownloadsInMultipleTabs() self._CloseAllTabs() def _OpenAndCloseMultipleWindowsWithDownloads(self): """Randomly have downloads in multiple windows.""" logging.info('In _OpenAndCloseMultipleWindowsWithDownloads') # Open 15 Windows randomly on both regular and incognito with downloads for window_index in range(15): tick = round(random.random() * 100) if tick % 2 != 0: self.NavigateToURL(self.download_url2, 0, 0) else: self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW) self.AppendTab(pyauto.GURL(self.empty_url), 1) self.NavigateToURL(self.download_url2, 1, 1) self._CloseAllWindows() def _OpenAndCloseMultipleTabsWithMultipleDownloads(self): """Download multiple items in multiple tabs.""" logging.info('In _OpenAndCloseMultipleTabsWithMultipleDownloads') self.NavigateToURL(self.empty_url) for _ in range(15): for file in self.file_list: count = 1 url = self.GetFileURLForPath( os.path.join(self.DataDir(), 'downloads', file)) self.AppendTab(pyauto.GURL(self.empty_url)) self.NavigateToURL(url, 0, count) count = count + 1 self._CloseAllTabs() def _OpenAndCloseMultipleWindowsWithMultipleDownloads(self): """Randomly multiple downloads in multiple windows.""" logging.info('In _OpenAndCloseMultipleWindowsWithMultipleDownloads') for _ in range(15): for file in self.file_list: tick = round(random.random() * 100) url = self.GetFileURLForPath( os.path.join(self.DataDir(), 'downloads', file)) if tick % 2!= 0: self.NavigateToURL(url, 0, 0) else: self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW) self.AppendTab(pyauto.GURL(self.empty_url), 1) self.NavigateToURL(url, 1, 1) self._CloseAllWindows() # Back and Forward stress functions def _BrowserGoBack(self, window_index): """Go back in the browser history. Chrome has limitation on going back and can only go back 49 pages. Args: window_index: the index of the browser window to work on. """ for nback in range(48): # Go back 48 times. if nback % 4 == 0: # Bookmark every 5th url when going back. self._BookMarkEvery5thURL(window_index) self.TabGoBack(tab_index=0, windex=window_index) def _BrowserGoForward(self, window_index): """Go Forward in the browser history. Chrome has limitation on going back and can only go back 49 pages. Args: window_index: the index of the browser window to work on. """ for nforward in range(48): # Go back 48 times. if nforward % 4 == 0: # Bookmark every 5th url when going Forward self._BookMarkEvery5thURL(window_index) self.TabGoForward(tab_index=0, windex=window_index) def _AddToListAndBookmark(self, newname, url): """Bookmark the url to bookmarkbar and to he list of bookmarks. Args: newname: the name of the bookmark. url: the url to bookmark. """ bookmarks = self.GetBookmarkModel() bar_id = bookmarks.BookmarkBar()['id'] self.AddBookmarkURL(bar_id, 0, newname, url) self.bookmarks_list.append(newname) def _RemoveFromListAndBookmarkBar(self, name): """Remove the bookmark bor and bookmarks list. Args: name: the name of bookmark to remove. """ bookmarks = self.GetBookmarkModel() node = bookmarks.FindByTitle(name) self.RemoveBookmark(node[0]['id']) self.bookmarks_list.remove(name) def _DuplicateBookmarks(self, name): """Find duplicate bookmark in the bookmarks list. Args: name: name of the bookmark. Returns: True if it's a duplicate. """ for index in (self.bookmarks_list): if index == name: return True return False def _BookMarkEvery5thURL(self, window_index): """Check for duplicate in list and bookmark current url. If its the first time and list is empty add the bookmark. If its a duplicate remove the bookmark. If its new tab page move over. Args: window_index: the index of the browser window to work on. """ tab_title = self.GetActiveTabTitle(window_index) # get the page title url = self.GetActiveTabURL(window_index).spec() # get the page url if not self.bookmarks_list: self._AddToListAndBookmark(tab_title, url) # first run bookmark the url return elif self._DuplicateBookmarks(tab_title): self._RemoveFromListAndBookmarkBar(tab_title) return elif tab_title == 'New Tab': # new tab page pass over return else: # new bookmark add it to bookmarkbar self._AddToListAndBookmark(tab_title, url) return def _ReadFileAndLoadInNormalAndIncognito(self): """Read urls and load them in normal and incognito window. We load 96 urls only as we can go back and forth 48 times. Uses time to get different urls in normal and incognito window The source file is taken from stress folder in /data folder. """ # URL source from stress folder in data folder data_file = os.path.join(self.DataDir(), 'pyauto_private', 'stress', 'urls_and_titles') url_data = self.EvalDataFrom(data_file) urls = url_data.keys() i = 0 ticks = int(time.time()) # get the latest time. for url in urls: if i <= 96 : # load only 96 urls. if ticks % 2 == 0: # loading in Incognito and Normal window. self.NavigateToURL(url) else: self.NavigateToURL(url, 1, 0) else: break ticks = ticks - 1 i += 1 return def _StressTestNavigation(self): """ This is the method from where various navigations are called. First we load the urls then call navigete back and forth in incognito window then in normal window. """ self._ReadFileAndLoadInNormalAndIncognito() # Load the urls. self._BrowserGoBack(1) # Navigate back in incognito window. self._BrowserGoForward(1) # Navigate forward in incognito window self._BrowserGoBack(0) # Navigate back in normal window self._BrowserGoForward(0) # Navigate forward in normal window # Preference stress functions def _RandomBool(self): """For any preferences bool value, it takes True or False value. We are generating random True or False value. """ return random.randint(0, 1) == 1 def _RandomURL(self): """Some of preferences take string url, so generating random url here.""" # Site list site_list = ['test1.html', 'test2.html','test3.html','test4.html', 'test5.html', 'test7.html', 'test6.html'] random_site = random.choice(site_list) # Returning a url of random site return self.GetFileURLForPath(os.path.join(self.DataDir(), random_site)) def _RandomURLArray(self): """Returns a list of 10 random URLs.""" return [self._RandomURL() for _ in range(10)] def _RandomInt(self, max_number): """Some of the preferences takes integer value. Eg: If there are three options, we generate random value for any option. Arg: max_number: The number of options that a preference has. """ return random.randrange(1, max_number) def _RandomDownloadDir(self): """Returns a random download directory.""" return random.choice(['dl_dir1', 'dl_dir2', 'dl_dir3', 'dl_dir4', 'dl_dir5']) def _SetPref(self): """Reads the preferences from file and sets the preferences to Chrome. """ raw_dictionary = self.EvalDataFrom(os.path.join(self.DataDir(), 'pyauto_private', 'stress', 'pref_dict')) value_dictionary = {} for key, value in raw_dictionary.iteritems(): if value == 'BOOL': value_dictionary[key] = self._RandomBool() elif value == 'STRING_URL': value_dictionary[key] = self._RandomURL() elif value == 'ARRAY_URL': value_dictionary[key] = self._RandomURLArray() elif value == 'STRING_PATH': value_dictionary[key] = self._RandomDownloadDir() elif value[0:3] == 'INT': # Normally we difine INT datatype with number of options, # so parsing number of options and selecting any of them # randomly. value_dictionary[key] = 1 max_number = raw_dictionary[key][3:4] if not max_number == 1: value_dictionary[key]= self._RandomInt(int(max_number)) self.SetPrefs(getattr(pyauto,key), value_dictionary[key]) return value_dictionary # Crash reporting functions def _CrashDumpFolder(self): """Get the breakpad folder. Returns: The full path of the Crash Reports folder. """ breakpad_folder = self.GetBrowserInfo()['properties']['DIR_CRASH_DUMPS'] self.assertTrue(breakpad_folder, 'Cannot figure crash dir') return breakpad_folder def _DeleteDumps(self): """Delete all the dump files in teh Crash Reports folder.""" # should be called at the start of stress run if os.path.exists(self.breakpad_dir): logging.info('xxxxxxxxxxxxxxxINSIDE DELETE DUMPSxxxxxxxxxxxxxxxxx') if self.IsMac(): shutil.rmtree(self.breakpad_dir) elif self.IsWin(): files = os.listdir(self.breakpad_dir) for file in files: os.remove(file) first_crash = os.path.join(os.getcwd(), '1stcrash') crashes_dir = os.path.join(os.getcwd(), 'crashes') if (os.path.exists(crashes_dir)): shutil.rmtree(crashes_dir) shutil.rmtree(first_crash) def _SymbolicateCrashDmp(self, dmp_file, symbols_dir, output_file): """Generate symbolicated crash report. Args: dmp_file: the dmp file to symbolicate. symbols_dir: the directory containing the symbols. output_file: the output file. Returns: Crash report text. """ report = '' if self.IsWin(): windbg_cmd = [ os.path.join('C:', 'Program Files', 'Debugging Tools for Windows', 'windbg.exe'), '-Q', '-y', '\"', symbols_dir, '\"', '-c', '\".ecxr;k50;.logclose;q\"', '-logo', output_file, '-z', '\"', dmp_file, '\"'] subprocess.call(windbg_cmd) # Since we are directly writing the info into output_file, # we just need to copy that in to report report = open(output_file, 'r').read() elif self.IsMac(): crash_report = os.path.join(self.DataDir(), 'pyauto_private', 'stress', 'mac', 'crash_report') for i in range(5): # crash_report doesn't work sometimes. So we retry report = test_utils.Shell2( '%s -S "%s" "%s"' % (crash_report, symbols_dir, dmp_file))[0] if len(report) < 200: try_again = 'Try %d. crash_report didn\'t work out. Trying again', i logging.info(try_again) else: break open(output_file, 'w').write(report) return report def _SaveSymbols(self, symbols_dir, dump_dir=' ', multiple_dumps=True): """Save the symbolicated files for all crash dumps. Args: symbols_dir: the directory containing the symbols. dump_dir: Path to the directory holding the crash dump files. multiple_dumps: True if we are processing multiple dump files, False if we are processing only the first crash. """ if multiple_dumps: dump_dir = self.breakpad_dir if not os.path.isdir(CRASHES): os.makedirs(CRASHES) # This will be sent to the method by the caller. dmp_files = glob.glob(os.path.join(dump_dir, '*.dmp')) for dmp_file in dmp_files: dmp_id = os.path.splitext(os.path.basename(dmp_file))[0] if multiple_dumps: report_folder = CRASHES else: report_folder = dump_dir report_fname = os.path.join(report_folder, '%s.txt' % (dmp_id)) report = self._SymbolicateCrashDmp(dmp_file, symbols_dir, report_fname) if report == '': logging.info('Crash report is empty.') # This is for copying the original dumps. if multiple_dumps: shutil.copy2(dmp_file, CRASHES) def _GetFirstCrashDir(self): """Get first crash file in the crash folder. Here we create the 1stcrash directory which holds the first crash report, which will be attached to the mail. """ breakpad_folder = self.breakpad_dir dump_list = glob.glob1(breakpad_folder,'*.dmp') dump_list.sort(key=lambda s: os.path.getmtime(os.path.join( breakpad_folder, s))) first_crash_file = os.path.join(breakpad_folder, dump_list[0]) if not os.path.isdir('1stcrash'): os.makedirs('1stcrash') shutil.copy2(first_crash_file, '1stcrash') first_crash_dir = os.path.join(os.getcwd(), '1stcrash') return first_crash_dir def _GetFirstCrashFile(self): """Get first crash file in the crash folder.""" first_crash_dir = os.path.join(os.getcwd(), '1stcrash') for each in os.listdir(first_crash_dir): if each.endswith('.txt'): first_crash_file = each return os.path.join(first_crash_dir, first_crash_file) def _ProcessOnlyFirstCrash(self): """ Process only the first crash report for email.""" first_dir = self._GetFirstCrashDir() self._SaveSymbols(self.symbols_dir, first_dir, False) def _GetOSName(self): """Returns the OS type we are running this script on.""" os_name = '' if self.IsMac(): os_number = commands.getoutput('sw_vers -productVersion | cut -c 1-4') if os_number == '10.6': os_name = 'Snow_Leopard' elif os_number == '10.5': os_name = 'Leopard' elif self.IsWin(): # TODO: Windows team need to find the way to get OS name os_name = 'Windows' if platform.version()[0] == '5': os_name = os_name + '_XP' else: os_name = os_name + '_Vista/Win7' return os_name def _ProcessUploadAndEmailCrashes(self): """Upload the crashes found and email the team about this.""" logging.info('#########INSIDE _ProcessUploadAndEmailCrashes#########') try: build_version = self.chrome_version self._SaveSymbols(self.symbols_dir) self._ProcessOnlyFirstCrash() file_to_attach = self._GetFirstCrashFile() # removing the crash_txt for now, # since we are getting UnicodeDecodeError # crash_txt = open(file_to_attach).read() except ValueError: test_utils.SendMail(self.stress_pref['mailing_address'], self.stress_pref['mailing_address'], "We don't have build version", "BROWSER CRASHED, PLEASE CHECK", self.stress_pref['smtp']) # Move crash reports and dumps to server os_name = self._GetOSName() dest_dir = build_version + '_' + os_name if (test_utils.Shell2(self.stress_pref['script'] % (CRASHES, dest_dir))): logging.info('Copy Complete') upload_dir= self.stress_pref['upload_dir'] + dest_dir num_crashes = '\n \n Number of Crashes :' + \ str(len(glob.glob1(self.breakpad_dir, '*.dmp'))) mail_content = '\n\n Crash Report URL :' + upload_dir + '\n' + \ num_crashes + '\n\n' # + crash_txt mail_subject = 'Stress Results :' + os_name + '_' + build_version # Sending mail with first crash report, # of crashes, location of upload test_utils.SendMail(self.stress_pref['mailing_address'], self.stress_pref['mailing_address'], mail_subject, mail_content, self.stress_pref['smtp'], file_to_attach) def _ReportCrashIfAny(self): """Check for browser crashes and report.""" if os.path.isdir(self.breakpad_dir): listOfDumps = glob.glob(os.path.join(self.breakpad_dir, '*.dmp')) if len(listOfDumps) > 0: logging.info('========== INSIDE REPORT CRASH++++++++++++++') # inform a method to process the dumps self._ProcessUploadAndEmailCrashes() # Test functions def _PrefStress(self): """Stress preferences.""" default_prefs = self.GetPrefsInfo() pref_dictionary = self._SetPref() for key, value in pref_dictionary.iteritems(): self.assertEqual(value, self.GetPrefsInfo().Prefs( getattr(pyauto, key))) for key, value in pref_dictionary.iteritems(): self.SetPrefs(getattr(pyauto, key), default_prefs.Prefs(getattr(pyauto, key))) def _NavigationStress(self): """Run back and forward stress in normal and incognito window.""" self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW) self._StressTestNavigation() def _DownloadStress(self): """Run all the Download stress test.""" org_download_dir = self.GetDownloadDirectory().value() new_dl_dir = os.path.join(org_download_dir, 'My+Downloads Folder') os.path.exists(new_dl_dir) and shutil.rmtree(new_dl_dir) os.makedirs(new_dl_dir) self.SetPrefs(pyauto.kDownloadDefaultDirectory, new_dl_dir) self._OpenAndCloseMultipleTabsWithDownloads() self._OpenAndCloseMultipleWindowsWithDownloads() self._OpenAndCloseMultipleTabsWithMultipleDownloads() self._OpenAndCloseMultipleWindowsWithMultipleDownloads() pyauto_utils.RemovePath(new_dl_dir) # cleanup self.SetPrefs(pyauto.kDownloadDefaultDirectory, org_download_dir) def _PluginStress(self): """Run all the plugin stress tests.""" self._OpenAndCloseMultipleTabsWithFlash() self._OpenAndCloseMultipleWindowsWithFlash() self._OpenAndCloseMultipleTabsWithMultiplePlugins() self._OpenAndCloseMultipleWindowsWithMultiplePlugins() self._KillAndReloadRenderersWithFlash() self._ToggleAndReloadFlashPlugin() def testStress(self): """Run all the stress tests for 24 hrs.""" if self.GetBrowserInfo()['properties']['branding'] != 'Google Chrome': logging.info('This is not a branded build, so stopping the stress') return 1 self._DownloadSymbols() run_number = 1 start_time = time.time() while True: logging.info('run %d...' % run_number) run_number = run_number + 1 if (time.time() - start_time) >= 24*60*60: logging.info('Its been 24hrs, so we break now.') break try: methods = [self._NavigationStress, self._DownloadStress, self._PluginStress, self._PrefStress] random.shuffle(methods) for method in methods: method() logging.info('Method %s done' % method) except KeyboardInterrupt: logging.info('----------We got a KeyboardInterrupt-----------') except Exception, error: logging.info('-------------There was an ERROR---------------') logging.info(error) # Crash Reporting self._ReportCrashIfAny() self._DeleteDumps() if self.IsMac(): zombie = 'ps -el | grep Chrom | grep -v grep | grep Z | wc -l' zombie_count = int(commands.getoutput(zombie)) if zombie_count > 0: logging.info('WE HAVE ZOMBIES = %d' % zombie_count) if __name__ == '__main__': pyauto_functional.Main()
osrf/opensplice
refs/heads/osrf-6.9.0
build/docs/DDSMATLABGuide/source/conf.py
2
# -*- coding: utf-8 -*- # # OpenSplice_Deployment_Guide build configuration file, created by # ReST Editor on 06-Jul-2015 # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os import time # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. #extensions = ['sphinx.ext.todo'] extensions = ['sphinx.ext.todo', 'sphinx.ext.ifconfig'] def setup(app): app.add_config_value('rmi_languages', '', True) #rmi_languages = 'C++ and Java' rmi_languages = 'C++' #rmi_languages = 'Java' rst_prolog = """ .. |rmi_langs| replace:: C++ .. |product_name| replace:: OpenSplice """ #.. |rmi_langs| replace:: C++ and Java # Add any paths that contain templates here, relative to this directory. templates_path = [u'_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. source_encoding = u'utf-8-sig' # The master toctree document. master_doc = u'index' # General information about the project. project = u'Vortex OpenSplice MATLAB Guide' this_year = time.strftime( '%Y' ) copyright = u'{y}, ADLINK Technology Limited'.format( y = this_year ) print 'Copyright string is:', copyright # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'6.x' # The full version, including alpha/beta/rc tags. release = version #release = u'.0' print 'Short version string is:', version print 'Full version string is:', release # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. language = u'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: today = ' ' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = u'sphinxdoc' html_theme = u'vortextheme' # LINUX PATH: html_theme_path = ['../../.'] # WINDOWS PATH: # html_theme_path = ['..\..\.'] #build theme directory in lite using environment variable, so shared amongst books # insight team can delete, #html_theme_path = [os.environ['VL_HOME'] + '/build/docs'] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None html_title = 'The Vortex OpenSplice MATLAB Guide' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None #html_short_title = 'HTML short Title conf.py' html_short_title = 'OpenSplice MATLAB Guide' # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = './images/Vortex_logo_2014.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [u'_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True html_show_sphinx = False # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'The Vortex OpenSplice MATLAB Guide' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). latex_paper_size = u'a4' # The font size ('10pt', '11pt' or '12pt'). latex_font_size = u'10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [('index', 'OpenSplice_DDSMATLABGuide.tex', u'DDS MATLAB Guide', u'', 'manual', True)] # Note 'author' field empty # Added 'True' to end of generated line to suppress 'Index & Tables' # A dictionary that contains LaTeX snippets that override those Sphinx usually # puts into the generated .tex files. latex_elements = { 'babel': '\\usepackage[english]{babel}' } # The name of an image file (relative to this directory) to place at the top of # the title page. latex_logo = './images/Vortex-Cover.png' # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. latex_preamble = r""" \setcounter {tocdepth} {5} \setcounter{secnumdepth}{5} """ # * THIS GETS RID OF BLANK PAGES AT ENDS OF CHAPTERS & ToC latex_elements = { 'classoptions': ',openany, oneside', 'babel': '\\usepackage[english]{babel}' } # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [('index', 'OpenSplice_DDSMATLABGuide', u'OpenSplice_DDSMATLAB Documentation', [u'ADLINK Technology Limited'], 1)] # -- Additional options -------------------------------------------------------- todo_include_todos = True
public-ink/public-ink
refs/heads/master
server/appengine-staging/lib/unidecode/x030.py
248
data = ( ' ', # 0x00 ', ', # 0x01 '. ', # 0x02 '"', # 0x03 '[JIS]', # 0x04 '"', # 0x05 '/', # 0x06 '0', # 0x07 '<', # 0x08 '> ', # 0x09 '<<', # 0x0a '>> ', # 0x0b '[', # 0x0c '] ', # 0x0d '{', # 0x0e '} ', # 0x0f '[(', # 0x10 ')] ', # 0x11 '@', # 0x12 'X ', # 0x13 '[', # 0x14 '] ', # 0x15 '[[', # 0x16 ']] ', # 0x17 '((', # 0x18 ')) ', # 0x19 '[[', # 0x1a ']] ', # 0x1b '~ ', # 0x1c '``', # 0x1d '\'\'', # 0x1e ',,', # 0x1f '@', # 0x20 '1', # 0x21 '2', # 0x22 '3', # 0x23 '4', # 0x24 '5', # 0x25 '6', # 0x26 '7', # 0x27 '8', # 0x28 '9', # 0x29 '', # 0x2a '', # 0x2b '', # 0x2c '', # 0x2d '', # 0x2e '', # 0x2f '~', # 0x30 '+', # 0x31 '+', # 0x32 '+', # 0x33 '+', # 0x34 '', # 0x35 '@', # 0x36 ' // ', # 0x37 '+10+', # 0x38 '+20+', # 0x39 '+30+', # 0x3a '[?]', # 0x3b '[?]', # 0x3c '[?]', # 0x3d '', # 0x3e '', # 0x3f '[?]', # 0x40 'a', # 0x41 'a', # 0x42 'i', # 0x43 'i', # 0x44 'u', # 0x45 'u', # 0x46 'e', # 0x47 'e', # 0x48 'o', # 0x49 'o', # 0x4a 'ka', # 0x4b 'ga', # 0x4c 'ki', # 0x4d 'gi', # 0x4e 'ku', # 0x4f 'gu', # 0x50 'ke', # 0x51 'ge', # 0x52 'ko', # 0x53 'go', # 0x54 'sa', # 0x55 'za', # 0x56 'shi', # 0x57 'zi', # 0x58 'su', # 0x59 'zu', # 0x5a 'se', # 0x5b 'ze', # 0x5c 'so', # 0x5d 'zo', # 0x5e 'ta', # 0x5f 'da', # 0x60 'chi', # 0x61 'di', # 0x62 'tsu', # 0x63 'tsu', # 0x64 'du', # 0x65 'te', # 0x66 'de', # 0x67 'to', # 0x68 'do', # 0x69 'na', # 0x6a 'ni', # 0x6b 'nu', # 0x6c 'ne', # 0x6d 'no', # 0x6e 'ha', # 0x6f 'ba', # 0x70 'pa', # 0x71 'hi', # 0x72 'bi', # 0x73 'pi', # 0x74 'hu', # 0x75 'bu', # 0x76 'pu', # 0x77 'he', # 0x78 'be', # 0x79 'pe', # 0x7a 'ho', # 0x7b 'bo', # 0x7c 'po', # 0x7d 'ma', # 0x7e 'mi', # 0x7f 'mu', # 0x80 'me', # 0x81 'mo', # 0x82 'ya', # 0x83 'ya', # 0x84 'yu', # 0x85 'yu', # 0x86 'yo', # 0x87 'yo', # 0x88 'ra', # 0x89 'ri', # 0x8a 'ru', # 0x8b 're', # 0x8c 'ro', # 0x8d 'wa', # 0x8e 'wa', # 0x8f 'wi', # 0x90 'we', # 0x91 'wo', # 0x92 'n', # 0x93 'vu', # 0x94 '[?]', # 0x95 '[?]', # 0x96 '[?]', # 0x97 '[?]', # 0x98 '', # 0x99 '', # 0x9a '', # 0x9b '', # 0x9c '"', # 0x9d '"', # 0x9e '[?]', # 0x9f '[?]', # 0xa0 'a', # 0xa1 'a', # 0xa2 'i', # 0xa3 'i', # 0xa4 'u', # 0xa5 'u', # 0xa6 'e', # 0xa7 'e', # 0xa8 'o', # 0xa9 'o', # 0xaa 'ka', # 0xab 'ga', # 0xac 'ki', # 0xad 'gi', # 0xae 'ku', # 0xaf 'gu', # 0xb0 'ke', # 0xb1 'ge', # 0xb2 'ko', # 0xb3 'go', # 0xb4 'sa', # 0xb5 'za', # 0xb6 'shi', # 0xb7 'zi', # 0xb8 'su', # 0xb9 'zu', # 0xba 'se', # 0xbb 'ze', # 0xbc 'so', # 0xbd 'zo', # 0xbe 'ta', # 0xbf 'da', # 0xc0 'chi', # 0xc1 'di', # 0xc2 'tsu', # 0xc3 'tsu', # 0xc4 'du', # 0xc5 'te', # 0xc6 'de', # 0xc7 'to', # 0xc8 'do', # 0xc9 'na', # 0xca 'ni', # 0xcb 'nu', # 0xcc 'ne', # 0xcd 'no', # 0xce 'ha', # 0xcf 'ba', # 0xd0 'pa', # 0xd1 'hi', # 0xd2 'bi', # 0xd3 'pi', # 0xd4 'hu', # 0xd5 'bu', # 0xd6 'pu', # 0xd7 'he', # 0xd8 'be', # 0xd9 'pe', # 0xda 'ho', # 0xdb 'bo', # 0xdc 'po', # 0xdd 'ma', # 0xde 'mi', # 0xdf 'mu', # 0xe0 'me', # 0xe1 'mo', # 0xe2 'ya', # 0xe3 'ya', # 0xe4 'yu', # 0xe5 'yu', # 0xe6 'yo', # 0xe7 'yo', # 0xe8 'ra', # 0xe9 'ri', # 0xea 'ru', # 0xeb 're', # 0xec 'ro', # 0xed 'wa', # 0xee 'wa', # 0xef 'wi', # 0xf0 'we', # 0xf1 'wo', # 0xf2 'n', # 0xf3 'vu', # 0xf4 'ka', # 0xf5 'ke', # 0xf6 'va', # 0xf7 'vi', # 0xf8 've', # 0xf9 'vo', # 0xfa '', # 0xfb '', # 0xfc '"', # 0xfd '"', # 0xfe )
nesdis/djongo
refs/heads/master
tests/django_tests/tests/v21/tests/auth_tests/test_management.py
3
import builtins import getpass import sys from datetime import date from io import StringIO from unittest import mock from django.apps import apps from django.contrib.auth import management from django.contrib.auth.management import ( create_permissions, get_default_username, ) from django.contrib.auth.management.commands import ( changepassword, createsuperuser, ) from django.contrib.auth.models import Group, Permission, User from django.contrib.contenttypes.models import ContentType from django.core.management import call_command from django.core.management.base import CommandError from django.db import migrations from django.test import TestCase, override_settings from django.utils.translation import gettext_lazy as _ from .models import ( CustomUser, CustomUserNonUniqueUsername, CustomUserWithFK, Email, ) MOCK_INPUT_KEY_TO_PROMPTS = { # @mock_inputs dict key: [expected prompt messages], 'bypass': ['Bypass password validation and create user anyway? [y/N]: '], 'email': ['Email address: '], 'username': ['Username: ', lambda: "Username (leave blank to use '%s'): " % get_default_username()], } def mock_inputs(inputs): """ Decorator to temporarily replace input/getpass to allow interactive createsuperuser. """ def inner(test_func): def wrapped(*args): class mock_getpass: @staticmethod def getpass(prompt=b'Password: ', stream=None): if callable(inputs['password']): return inputs['password']() return inputs['password'] def mock_input(prompt): assert '__proxy__' not in prompt response = None for key, val in inputs.items(): # get() fallback because sometimes 'key' is the actual # prompt rather than a shortcut name. prompt_msgs = MOCK_INPUT_KEY_TO_PROMPTS.get(key, key) if isinstance(prompt_msgs, list): prompt_msgs = [msg() if callable(msg) else msg for msg in prompt_msgs] if prompt in prompt_msgs: if callable(val): response = val() else: response = val break if response is None: raise ValueError('Mock input for %r not found.' % prompt) return response old_getpass = createsuperuser.getpass old_input = builtins.input createsuperuser.getpass = mock_getpass builtins.input = mock_input try: test_func(*args) finally: createsuperuser.getpass = old_getpass builtins.input = old_input return wrapped return inner class MockTTY: """ A fake stdin object that pretends to be a TTY to be used in conjunction with mock_inputs. """ def isatty(self): return True class MockInputTests(TestCase): @mock_inputs({'username': 'alice'}) def test_input_not_found(self): with self.assertRaisesMessage(ValueError, "Mock input for 'Email address: ' not found."): call_command('createsuperuser', stdin=MockTTY()) class GetDefaultUsernameTestCase(TestCase): def setUp(self): self.old_get_system_username = management.get_system_username def tearDown(self): management.get_system_username = self.old_get_system_username def test_actual_implementation(self): self.assertIsInstance(management.get_system_username(), str) def test_simple(self): management.get_system_username = lambda: 'joe' self.assertEqual(management.get_default_username(), 'joe') def test_existing(self): User.objects.create(username='joe') management.get_system_username = lambda: 'joe' self.assertEqual(management.get_default_username(), '') self.assertEqual( management.get_default_username(check_db=False), 'joe') def test_i18n(self): # 'Julia' with accented 'u': management.get_system_username = lambda: 'J\xfalia' self.assertEqual(management.get_default_username(), 'julia') @override_settings(AUTH_PASSWORD_VALIDATORS=[ {'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}, ]) class ChangepasswordManagementCommandTestCase(TestCase): def setUp(self): self.user = User.objects.create_user(username='joe', password='qwerty') self.stdout = StringIO() self.stderr = StringIO() def tearDown(self): self.stdout.close() self.stderr.close() @mock.patch.object(getpass, 'getpass', return_value='password') def test_get_pass(self, mock_get_pass): call_command('changepassword', username='joe', stdout=self.stdout) self.assertIs(User.objects.get(username='joe').check_password('password'), True) @mock.patch.object(getpass, 'getpass', return_value='') def test_get_pass_no_input(self, mock_get_pass): with self.assertRaisesMessage(CommandError, 'aborted'): call_command('changepassword', username='joe', stdout=self.stdout) @mock.patch.object(changepassword.Command, '_get_pass', return_value='new_password') def test_system_username(self, mock_get_pass): """The system username is used if --username isn't provided.""" username = getpass.getuser() User.objects.create_user(username=username, password='qwerty') call_command('changepassword', stdout=self.stdout) self.assertIs(User.objects.get(username=username).check_password('new_password'), True) def test_nonexistent_username(self): with self.assertRaisesMessage(CommandError, "user 'test' does not exist"): call_command('changepassword', username='test', stdout=self.stdout) @mock.patch.object(changepassword.Command, '_get_pass', return_value='not qwerty') def test_that_changepassword_command_changes_joes_password(self, mock_get_pass): "Executing the changepassword management command should change joe's password" self.assertTrue(self.user.check_password('qwerty')) call_command('changepassword', username='joe', stdout=self.stdout) command_output = self.stdout.getvalue().strip() self.assertEqual( command_output, "Changing password for user 'joe'\nPassword changed successfully for user 'joe'" ) self.assertTrue(User.objects.get(username="joe").check_password("not qwerty")) @mock.patch.object(changepassword.Command, '_get_pass', side_effect=lambda *args: str(args)) def test_that_max_tries_exits_1(self, mock_get_pass): """ A CommandError should be thrown by handle() if the user enters in mismatched passwords three times. """ msg = "Aborting password change for user 'joe' after 3 attempts" with self.assertRaisesMessage(CommandError, msg): call_command('changepassword', username='joe', stdout=self.stdout, stderr=self.stderr) @mock.patch.object(changepassword.Command, '_get_pass', return_value='1234567890') def test_password_validation(self, mock_get_pass): """ A CommandError should be raised if the user enters in passwords which fail validation three times. """ abort_msg = "Aborting password change for user 'joe' after 3 attempts" with self.assertRaisesMessage(CommandError, abort_msg): call_command('changepassword', username='joe', stdout=self.stdout, stderr=self.stderr) self.assertIn('This password is entirely numeric.', self.stderr.getvalue()) @mock.patch.object(changepassword.Command, '_get_pass', return_value='not qwerty') def test_that_changepassword_command_works_with_nonascii_output(self, mock_get_pass): """ #21627 -- Executing the changepassword management command should allow non-ASCII characters from the User object representation. """ # 'Julia' with accented 'u': User.objects.create_user(username='J\xfalia', password='qwerty') call_command('changepassword', username='J\xfalia', stdout=self.stdout) class MultiDBChangepasswordManagementCommandTestCase(TestCase): multi_db = True @mock.patch.object(changepassword.Command, '_get_pass', return_value='not qwerty') def test_that_changepassword_command_with_database_option_uses_given_db(self, mock_get_pass): """ changepassword --database should operate on the specified DB. """ user = User.objects.db_manager('other').create_user(username='joe', password='qwerty') self.assertTrue(user.check_password('qwerty')) out = StringIO() call_command('changepassword', username='joe', database='other', stdout=out) command_output = out.getvalue().strip() self.assertEqual( command_output, "Changing password for user 'joe'\nPassword changed successfully for user 'joe'" ) self.assertTrue(User.objects.using('other').get(username="joe").check_password('not qwerty')) @override_settings( SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True) AUTH_PASSWORD_VALIDATORS=[{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}], ) class CreatesuperuserManagementCommandTestCase(TestCase): def test_no_email_argument(self): new_io = StringIO() with self.assertRaisesMessage(CommandError, 'You must use --email with --noinput.'): call_command('createsuperuser', interactive=False, username='joe', stdout=new_io) def test_basic_usage(self): "Check the operation of the createsuperuser management command" # We can use the management command to create a superuser new_io = StringIO() call_command( "createsuperuser", interactive=False, username="joe", email="joe@somewhere.org", stdout=new_io ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, 'Superuser created successfully.') u = User.objects.get(username="joe") self.assertEqual(u.email, 'joe@somewhere.org') # created password should be unusable self.assertFalse(u.has_usable_password()) def test_non_ascii_verbose_name(self): @mock_inputs({ 'password': "nopasswd", "Uživatel (leave blank to use '%s'): " % get_default_username(): 'foo', # username (cz) 'email': 'nolocale@somewhere.org', }) def test(self): username_field = User._meta.get_field('username') old_verbose_name = username_field.verbose_name username_field.verbose_name = _('u\u017eivatel') new_io = StringIO() try: call_command( "createsuperuser", interactive=True, stdout=new_io, stdin=MockTTY(), ) finally: username_field.verbose_name = old_verbose_name command_output = new_io.getvalue().strip() self.assertEqual(command_output, 'Superuser created successfully.') test(self) def test_verbosity_zero(self): # We can suppress output on the management command new_io = StringIO() call_command( "createsuperuser", interactive=False, username="joe2", email="joe2@somewhere.org", verbosity=0, stdout=new_io ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, '') u = User.objects.get(username="joe2") self.assertEqual(u.email, 'joe2@somewhere.org') self.assertFalse(u.has_usable_password()) def test_email_in_username(self): new_io = StringIO() call_command( "createsuperuser", interactive=False, username="joe+admin@somewhere.org", email="joe@somewhere.org", stdout=new_io ) u = User._default_manager.get(username="joe+admin@somewhere.org") self.assertEqual(u.email, 'joe@somewhere.org') self.assertFalse(u.has_usable_password()) @override_settings(AUTH_USER_MODEL='auth_tests.CustomUser') def test_swappable_user(self): "A superuser can be created when a custom user model is in use" # We can use the management command to create a superuser # We skip validation because the temporary substitution of the # swappable User model messes with validation. new_io = StringIO() call_command( "createsuperuser", interactive=False, email="joe@somewhere.org", date_of_birth="1976-04-01", stdout=new_io, ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, 'Superuser created successfully.') u = CustomUser._default_manager.get(email="joe@somewhere.org") self.assertEqual(u.date_of_birth, date(1976, 4, 1)) # created password should be unusable self.assertFalse(u.has_usable_password()) @override_settings(AUTH_USER_MODEL='auth_tests.CustomUser') def test_swappable_user_missing_required_field(self): "A Custom superuser won't be created when a required field isn't provided" # We can use the management command to create a superuser # We skip validation because the temporary substitution of the # swappable User model messes with validation. new_io = StringIO() with self.assertRaisesMessage(CommandError, 'You must use --email with --noinput.'): call_command( "createsuperuser", interactive=False, stdout=new_io, stderr=new_io, ) self.assertEqual(CustomUser._default_manager.count(), 0) @override_settings( AUTH_USER_MODEL='auth_tests.CustomUserNonUniqueUsername', AUTHENTICATION_BACKENDS=['my.custom.backend'], ) def test_swappable_user_username_non_unique(self): @mock_inputs({ 'username': 'joe', 'password': 'nopasswd', }) def createsuperuser(): new_io = StringIO() call_command( "createsuperuser", interactive=True, email="joe@somewhere.org", stdout=new_io, stdin=MockTTY(), ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, 'Superuser created successfully.') for i in range(2): createsuperuser() users = CustomUserNonUniqueUsername.objects.filter(username="joe") self.assertEqual(users.count(), 2) def test_skip_if_not_in_TTY(self): """ If the command is not called from a TTY, it should be skipped and a message should be displayed (#7423). """ class FakeStdin: """A fake stdin object that has isatty() return False.""" def isatty(self): return False out = StringIO() call_command( "createsuperuser", stdin=FakeStdin(), stdout=out, interactive=True, ) self.assertEqual(User._default_manager.count(), 0) self.assertIn("Superuser creation skipped", out.getvalue()) def test_passing_stdin(self): """ You can pass a stdin object as an option and it should be available on self.stdin. If no such option is passed, it defaults to sys.stdin. """ sentinel = object() command = createsuperuser.Command() call_command( command, stdin=sentinel, stdout=StringIO(), stderr=StringIO(), interactive=False, verbosity=0, username='janet', email='janet@example.com', ) self.assertIs(command.stdin, sentinel) command = createsuperuser.Command() call_command( command, stdout=StringIO(), stderr=StringIO(), interactive=False, verbosity=0, username='joe', email='joe@example.com', ) self.assertIs(command.stdin, sys.stdin) @override_settings(AUTH_USER_MODEL='auth_tests.CustomUserWithFK') def test_fields_with_fk(self): new_io = StringIO() group = Group.objects.create(name='mygroup') email = Email.objects.create(email='mymail@gmail.com') call_command( 'createsuperuser', interactive=False, username=email.pk, email=email.email, group=group.pk, stdout=new_io, ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, 'Superuser created successfully.') u = CustomUserWithFK._default_manager.get(email=email) self.assertEqual(u.username, email) self.assertEqual(u.group, group) non_existent_email = 'mymail2@gmail.com' msg = 'email instance with email %r does not exist.' % non_existent_email with self.assertRaisesMessage(CommandError, msg): call_command( 'createsuperuser', interactive=False, username=email.pk, email=non_existent_email, stdout=new_io, ) @override_settings(AUTH_USER_MODEL='auth_tests.CustomUserWithFK') def test_fields_with_fk_interactive(self): new_io = StringIO() group = Group.objects.create(name='mygroup') email = Email.objects.create(email='mymail@gmail.com') @mock_inputs({ 'password': 'nopasswd', 'Username (Email.id): ': email.pk, 'Email (Email.email): ': email.email, 'Group (Group.id): ': group.pk, }) def test(self): call_command( 'createsuperuser', interactive=True, stdout=new_io, stdin=MockTTY(), ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, 'Superuser created successfully.') u = CustomUserWithFK._default_manager.get(email=email) self.assertEqual(u.username, email) self.assertEqual(u.group, group) test(self) def test_default_username(self): """createsuperuser uses a default username when one isn't provided.""" # Get the default username before creating a user. default_username = get_default_username() new_io = StringIO() entered_passwords = ['password', 'password'] def return_passwords(): return entered_passwords.pop(0) @mock_inputs({'password': return_passwords, 'username': '', 'email': ''}) def test(self): call_command( 'createsuperuser', interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual(new_io.getvalue().strip(), 'Superuser created successfully.') self.assertTrue(User.objects.filter(username=default_username).exists()) test(self) def test_password_validation(self): """ Creation should fail if the password fails validation. """ new_io = StringIO() # Returns '1234567890' the first two times it is called, then # 'password' subsequently. def bad_then_good_password(index=[0]): index[0] += 1 if index[0] <= 2: return '1234567890' return 'password' @mock_inputs({ 'password': bad_then_good_password, 'username': 'joe1234567890', 'email': '', 'bypass': 'n', }) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "This password is entirely numeric.\n" "Superuser created successfully." ) test(self) def test_blank_username(self): """Creation fails if --username is blank.""" new_io = StringIO() def test(self): with self.assertRaisesMessage(CommandError, 'Username cannot be blank.'): call_command( 'createsuperuser', username='', stdin=MockTTY(), stdout=new_io, stderr=new_io, ) test(self) def test_password_validation_bypass(self): """ Password validation can be bypassed by entering 'y' at the prompt. """ new_io = StringIO() @mock_inputs({ 'password': '1234567890', 'username': 'joe1234567890', 'email': '', 'bypass': 'y', }) def test(self): call_command( 'createsuperuser', interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), 'This password is entirely numeric.\n' 'Superuser created successfully.' ) test(self) def test_invalid_username(self): """Creation fails if the username fails validation.""" user_field = User._meta.get_field(User.USERNAME_FIELD) new_io = StringIO() entered_passwords = ['password', 'password'] # Enter an invalid (too long) username first and then a valid one. invalid_username = ('x' * user_field.max_length) + 'y' entered_usernames = [invalid_username, 'janet'] def return_passwords(): return entered_passwords.pop(0) def return_usernames(): return entered_usernames.pop(0) @mock_inputs({'password': return_passwords, 'username': return_usernames, 'email': ''}) def test(self): call_command( 'createsuperuser', interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), 'Error: Ensure this value has at most %s characters (it has %s).\n' 'Superuser created successfully.' % (user_field.max_length, len(invalid_username)) ) test(self) def test_existing_username(self): """Creation fails if the username already exists.""" user = User.objects.create(username='janet') new_io = StringIO() entered_passwords = ['password', 'password'] # Enter the existing username first and then a new one. entered_usernames = [user.username, 'joe'] def return_passwords(): return entered_passwords.pop(0) def return_usernames(): return entered_usernames.pop(0) @mock_inputs({'password': return_passwords, 'username': return_usernames, 'email': ''}) def test(self): call_command( 'createsuperuser', interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), 'Error: That username is already taken.\n' 'Superuser created successfully.' ) test(self) def test_validation_mismatched_passwords(self): """ Creation should fail if the user enters mismatched passwords. """ new_io = StringIO() # The first two passwords do not match, but the second two do match and # are valid. entered_passwords = ["password", "not password", "password2", "password2"] def mismatched_passwords_then_matched(): return entered_passwords.pop(0) @mock_inputs({ 'password': mismatched_passwords_then_matched, 'username': 'joe1234567890', 'email': '', }) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "Error: Your passwords didn't match.\n" "Superuser created successfully." ) test(self) def test_validation_blank_password_entered(self): """ Creation should fail if the user enters blank passwords. """ new_io = StringIO() # The first two passwords are empty strings, but the second two are # valid. entered_passwords = ["", "", "password2", "password2"] def blank_passwords_then_valid(): return entered_passwords.pop(0) @mock_inputs({ 'password': blank_passwords_then_valid, 'username': 'joe1234567890', 'email': '', }) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "Error: Blank passwords aren't allowed.\n" "Superuser created successfully." ) test(self) class MultiDBCreatesuperuserTestCase(TestCase): multi_db = True def test_createsuperuser_command_with_database_option(self): """ changepassword --database should operate on the specified DB. """ new_io = StringIO() call_command( 'createsuperuser', interactive=False, username='joe', email='joe@somewhere.org', database='other', stdout=new_io, ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, 'Superuser created successfully.') user = User.objects.using('other').get(username='joe') self.assertEqual(user.email, 'joe@somewhere.org') class CreatePermissionsTests(TestCase): def setUp(self): self._original_permissions = Permission._meta.permissions[:] self._original_default_permissions = Permission._meta.default_permissions self.app_config = apps.get_app_config('auth') def tearDown(self): Permission._meta.permissions = self._original_permissions Permission._meta.default_permissions = self._original_default_permissions ContentType.objects.clear_cache() def test_default_permissions(self): permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission') Permission._meta.permissions = [ ('my_custom_permission', 'Some permission'), ] create_permissions(self.app_config, verbosity=0) # view/add/change/delete permission by default + custom permission self.assertEqual(Permission.objects.filter( content_type=permission_content_type, ).count(), 5) Permission.objects.filter(content_type=permission_content_type).delete() Permission._meta.default_permissions = [] create_permissions(self.app_config, verbosity=0) # custom permission only since default permissions is empty self.assertEqual(Permission.objects.filter( content_type=permission_content_type, ).count(), 1) def test_unavailable_models(self): """ #24075 - Permissions shouldn't be created or deleted if the ContentType or Permission models aren't available. """ state = migrations.state.ProjectState() # Unavailable contenttypes.ContentType with self.assertNumQueries(0): create_permissions(self.app_config, verbosity=0, apps=state.apps) # Unavailable auth.Permission state = migrations.state.ProjectState(real_apps=['contenttypes']) with self.assertNumQueries(0): create_permissions(self.app_config, verbosity=0, apps=state.apps)
jrrickerson/pyweek23
refs/heads/master
src/main.py
1
import sge from sge.gfx import Background, Color from .game import Game from .rooms import MainMenu from .objects.testing import TestPlayer, SmallEnemy def main(): sge.game = Game(width=640, height=480, fps=120, window_text='SGE Test') background = sge.gfx.Background([], sge.gfx.Color("black")) player = TestPlayer(sge.game.width / 2, sge.game.height / 2) enemy1 = SmallEnemy('a201', sge.game.width / 200, sge.game.height / 1.2, 50) enemy2 = SmallEnemy('b201', sge.game.width / 4, sge.game.height / 1.2, 50) sge.game.start_room = MainMenu( objects=[player, enemy1, enemy2], background=background) sge.game.mouse_visible = False sge.game.start()
adityacs/ansible
refs/heads/devel
lib/ansible/modules/network/nxos/nxos_vrf_interface.py
11
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = ''' --- module: nxos_vrf_interface version_added: "2.1" short_description: Manages interface specific VRF configuration. description: - Manages interface specific VRF configuration. author: - Jason Edelman (@jedelman8) - Gabriele Gerbino (@GGabriele) notes: - VRF needs to be added globally with M(nxos_vrf) before adding a VRF to an interface. - Remove a VRF from an interface will still remove all L3 attributes just as it does from CLI. - VRF is not read from an interface until IP address is configured on that interface. options: vrf: description: - Name of VRF to be managed. required: true interface: description: - Full name of interface to be managed, i.e. Ethernet1/1. required: true state: description: - Manages desired state of the resource. required: false default: present choices: ['present','absent'] ''' EXAMPLES = ''' - name: Ensure vrf ntc exists on Eth1/1 nxos_vrf_interface: vrf: ntc interface: Ethernet1/1 host: 68.170.147.165 state: present - name: Ensure ntc VRF does not exist on Eth1/1 nxos_vrf_interface: vrf: ntc interface: Ethernet1/1 host: 68.170.147.165 state: absent ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: always type: dict sample: {"interface": "loopback16", "vrf": "ntc"} existing: description: k/v pairs of existing vrf on the interface type: dict sample: {"interface": "loopback16", "vrf": ""} end_state: description: k/v pairs of vrf after module execution returned: always type: dict sample: {"interface": "loopback16", "vrf": "ntc"} updates: description: commands sent to the device returned: always type: list sample: ["interface loopback16", "vrf member ntc"] changed: description: check to see if a change was made on the device returned: always type: boolean sample: true ''' import re from ansible.module_utils.nxos import get_config, load_config, run_commands from ansible.module_utils.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.netcfg import CustomNetworkConfig WARNINGS = [] def execute_show_command(command, module, command_type='cli_show'): if module.params['transport'] == 'cli': if 'show run' not in command: command += ' | json' cmds = [command] body = run_commands(module, cmds) elif module.params['transport'] == 'nxapi': cmds = [command] body = run_commands(module, cmds) return body def get_interface_type(interface): if interface.upper().startswith('ET'): return 'ethernet' elif interface.upper().startswith('VL'): return 'svi' elif interface.upper().startswith('LO'): return 'loopback' elif interface.upper().startswith('MG'): return 'management' elif interface.upper().startswith('MA'): return 'management' elif interface.upper().startswith('PO'): return 'portchannel' else: return 'unknown' def get_interface_mode(interface, intf_type, module): command = 'show interface {0}'.format(interface) interface = {} mode = 'unknown' if intf_type in ['ethernet', 'portchannel']: body = execute_show_command(command, module)[0] interface_table = body['TABLE_interface']['ROW_interface'] mode = str(interface_table.get('eth_mode', 'layer3')) if mode == 'access' or mode == 'trunk': mode = 'layer2' elif intf_type == 'loopback' or intf_type == 'svi': mode = 'layer3' return mode def get_vrf_list(module): command = 'show vrf all' vrf_list = [] body = execute_show_command(command, module)[0] try: vrf_table = body['TABLE_vrf']['ROW_vrf'] except (KeyError, AttributeError): return vrf_list for each in vrf_table: vrf_list.append(str(each['vrf_name'])) return vrf_list def get_interface_info(interface, module): if not interface.startswith('loopback'): interface = interface.capitalize() command = 'show run | section interface.{0}'.format(interface) vrf_regex = ".*vrf\s+member\s+(?P<vrf>\S+).*" try: body = execute_show_command(command, module, command_type='cli_show_ascii')[0] match_vrf = re.match(vrf_regex, body, re.DOTALL) group_vrf = match_vrf.groupdict() vrf = group_vrf["vrf"] except (AttributeError, TypeError): return "" return vrf def is_default(interface, module): command = 'show run interface {0}'.format(interface) try: body = execute_show_command(command, module, command_type='cli_show_ascii')[0] raw_list = body.split('\n') if raw_list[-1].startswith('interface'): return True else: return False except (KeyError, IndexError): return 'DNE' def main(): argument_spec = dict( vrf=dict(required=True), interface=dict(type='str', required=True), state=dict(default='present', choices=['present', 'absent'], required=False), include_defaults=dict(default=False), config=dict(), save=dict(type='bool', default=False) ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) vrf = module.params['vrf'] interface = module.params['interface'].lower() state = module.params['state'] current_vrfs = get_vrf_list(module) if vrf not in current_vrfs: WARNINGS.append("The VRF is not present/active on the device. " "Use nxos_vrf to fix this.") intf_type = get_interface_type(interface) if (intf_type != 'ethernet' and module.params['transport'] == 'cli'): if is_default(interface, module) == 'DNE': module.fail_json(msg="interface does not exist on switch. Verify " "switch platform or create it first with " "nxos_interface if it's a logical interface") mode = get_interface_mode(interface, intf_type, module) if mode == 'layer2': module.fail_json(msg='Ensure interface is a Layer 3 port before ' 'configuring a VRF on an interface. You can ' 'use nxos_interface') proposed = dict(interface=interface, vrf=vrf) current_vrf = get_interface_info(interface, module) existing = dict(interface=interface, vrf=current_vrf) changed = False end_state = existing if vrf != existing['vrf'] and state == 'absent': module.fail_json(msg='The VRF you are trying to remove ' 'from the interface does not exist ' 'on that interface.', interface=interface, proposed_vrf=vrf, existing_vrf=existing['vrf']) commands = [] if existing: if state == 'absent': if existing and vrf == existing['vrf']: command = 'no vrf member {0}'.format(vrf) commands.append(command) elif state == 'present': if existing['vrf'] != vrf: command = 'vrf member {0}'.format(vrf) commands.append(command) if commands: commands.insert(0, 'interface {0}'.format(interface)) if commands: if module.check_mode: module.exit_json(changed=True, commands=commands) else: load_config(module, commands) changed = True changed_vrf = get_interface_info(interface, module) end_state = dict(interface=interface, vrf=changed_vrf) if 'configure' in commands: commands.pop(0) results = {} results['proposed'] = proposed results['existing'] = existing results['end_state'] = end_state results['updates'] = commands results['changed'] = changed if WARNINGS: results['warnings'] = WARNINGS module.exit_json(**results) if __name__ == '__main__': main()
ugaliguy/HackerRank
refs/heads/master
Algorithms/Implementation/cut-the-sticks.py
1
# Python 2 #!/bin/python import sys n = int(raw_input()) arr = map(int,raw_input().split()) arr.sort() count = len(arr) while(count >= 1): print count min = arr[0] for i in range(count): if arr[0] == min: arr.pop(0) count = len(arr)
clownhood/lol
refs/heads/master
contrib/wallettools/walletunlock.py
2299
from jsonrpc import ServiceProxy access = ServiceProxy("http://127.0.0.1:8332") pwd = raw_input("Enter wallet passphrase: ") access.walletpassphrase(pwd, 60)
pniebla/test-repo-console
refs/heads/master
svn/git-1.8.3.3.tar/git-1.8.3.3/git-1.8.3.3/git_remote_helpers/git/git.py
44
#!/usr/bin/env python """Functionality for interacting with Git repositories. This module provides classes for interfacing with a Git repository. """ import os import re import time from binascii import hexlify from cStringIO import StringIO import unittest from git_remote_helpers.util import debug, error, die, start_command, run_command def get_git_dir (): """Return the path to the GIT_DIR for this repo.""" args = ("git", "rev-parse", "--git-dir") exit_code, output, errors = run_command(args) if exit_code: die("Failed to retrieve git dir") assert not errors return output.strip() def parse_git_config (): """Return a dict containing the parsed version of 'git config -l'.""" exit_code, output, errors = run_command(("git", "config", "-z", "-l")) if exit_code: die("Failed to retrieve git configuration") assert not errors return dict([e.split('\n', 1) for e in output.split("\0") if e]) def git_config_bool (value): """Convert the given git config string value to True or False. Raise ValueError if the given string was not recognized as a boolean value. """ norm_value = str(value).strip().lower() if norm_value in ("true", "1", "yes", "on", ""): return True if norm_value in ("false", "0", "no", "off", "none"): return False raise ValueError("Failed to parse '%s' into a boolean value" % (value)) def valid_git_ref (ref_name): """Return True iff the given ref name is a valid git ref name.""" # The following is a reimplementation of the git check-ref-format # command. The rules were derived from the git check-ref-format(1) # manual page. This code should be replaced by a call to # check_refname_format() in the git library, when such is available. if ref_name.endswith('/') or \ ref_name.startswith('.') or \ ref_name.count('/.') or \ ref_name.count('..') or \ ref_name.endswith('.lock'): return False for c in ref_name: if ord(c) < 0x20 or ord(c) == 0x7f or c in " ~^:?*[": return False return True class GitObjectFetcher(object): """Provide parsed access to 'git cat-file --batch'. This provides a read-only interface to the Git object database. """ def __init__ (self): """Initiate a 'git cat-file --batch' session.""" self.queue = [] # List of object names to be submitted self.in_transit = None # Object name currently in transit # 'git cat-file --batch' produces binary output which is likely # to be corrupted by the default "rU"-mode pipe opened by # start_command. (Mode == "rU" does universal new-line # conversion, which mangles carriage returns.) Therefore, we # open an explicitly binary-safe pipe for transferring the # output from 'git cat-file --batch'. pipe_r_fd, pipe_w_fd = os.pipe() pipe_r = os.fdopen(pipe_r_fd, "rb") pipe_w = os.fdopen(pipe_w_fd, "wb") self.proc = start_command(("git", "cat-file", "--batch"), stdout = pipe_w) self.f = pipe_r def __del__ (self): """Verify completed communication with 'git cat-file --batch'.""" assert not self.queue assert self.in_transit is None self.proc.stdin.close() assert self.proc.wait() == 0 # Zero exit code assert self.f.read() == "" # No remaining output def _submit_next_object (self): """Submit queue items to the 'git cat-file --batch' process. If there are items in the queue, and there is currently no item currently in 'transit', then pop the first item off the queue, and submit it. """ if self.queue and self.in_transit is None: self.in_transit = self.queue.pop(0) print >> self.proc.stdin, self.in_transit[0] def push (self, obj, callback): """Push the given object name onto the queue. The given callback function will at some point in the future be called exactly once with the following arguments: - self - this GitObjectFetcher instance - obj - the object name provided to push() - sha1 - the SHA1 of the object, if 'None' obj is missing - t - the type of the object (tag/commit/tree/blob) - size - the size of the object in bytes - data - the object contents """ self.queue.append((obj, callback)) self._submit_next_object() # (Re)start queue processing def process_next_entry (self): """Read the next entry off the queue and invoke callback.""" obj, cb = self.in_transit self.in_transit = None header = self.f.readline() if header == "%s missing\n" % (obj): cb(self, obj, None, None, None, None) return sha1, t, size = header.split(" ") assert len(sha1) == 40 assert t in ("tag", "commit", "tree", "blob") assert size.endswith("\n") size = int(size.strip()) data = self.f.read(size) assert self.f.read(1) == "\n" cb(self, obj, sha1, t, size, data) self._submit_next_object() def process (self): """Process the current queue until empty.""" while self.in_transit is not None: self.process_next_entry() # High-level convenience methods: def get_sha1 (self, objspec): """Return the SHA1 of the object specified by 'objspec'. Return None if 'objspec' does not specify an existing object. """ class _ObjHandler(object): """Helper class for getting the returned SHA1.""" def __init__ (self, parser): self.parser = parser self.sha1 = None def __call__ (self, parser, obj, sha1, t, size, data): # FIXME: Many unused arguments. Could this be cheaper? assert parser == self.parser self.sha1 = sha1 handler = _ObjHandler(self) self.push(objspec, handler) self.process() return handler.sha1 def open_obj (self, objspec): """Return a file object wrapping the contents of a named object. The caller is responsible for calling .close() on the returned file object. Raise KeyError if 'objspec' does not exist in the repo. """ class _ObjHandler(object): """Helper class for parsing the returned git object.""" def __init__ (self, parser): """Set up helper.""" self.parser = parser self.contents = StringIO() self.err = None def __call__ (self, parser, obj, sha1, t, size, data): """Git object callback (see GitObjectFetcher documentation).""" assert parser == self.parser if not sha1: # Missing object self.err = "Missing object '%s'" % obj else: assert size == len(data) self.contents.write(data) handler = _ObjHandler(self) self.push(objspec, handler) self.process() if handler.err: raise KeyError(handler.err) handler.contents.seek(0) return handler.contents def walk_tree (self, tree_objspec, callback, prefix = ""): """Recursively walk the given Git tree object. Recursively walk all subtrees of the given tree object, and invoke the given callback passing three arguments: (path, mode, data) with the path, permission bits, and contents of all the blobs found in the entire tree structure. """ class _ObjHandler(object): """Helper class for walking a git tree structure.""" def __init__ (self, parser, cb, path, mode = None): """Set up helper.""" self.parser = parser self.cb = cb self.path = path self.mode = mode self.err = None def parse_tree (self, treedata): """Parse tree object data, yield tree entries. Each tree entry is a 3-tuple (mode, sha1, path) self.path is prepended to all paths yielded from this method. """ while treedata: mode = int(treedata[:6], 10) # Turn 100xxx into xxx if mode > 100000: mode -= 100000 assert treedata[6] == " " i = treedata.find("\0", 7) assert i > 0 path = treedata[7:i] sha1 = hexlify(treedata[i + 1: i + 21]) yield (mode, sha1, self.path + path) treedata = treedata[i + 21:] def __call__ (self, parser, obj, sha1, t, size, data): """Git object callback (see GitObjectFetcher documentation).""" assert parser == self.parser if not sha1: # Missing object self.err = "Missing object '%s'" % (obj) return assert size == len(data) if t == "tree": if self.path: self.path += "/" # Recurse into all blobs and subtrees for m, s, p in self.parse_tree(data): parser.push(s, self.__class__(self.parser, self.cb, p, m)) elif t == "blob": self.cb(self.path, self.mode, data) else: raise ValueError("Unknown object type '%s'" % (t)) self.push(tree_objspec, _ObjHandler(self, callback, prefix)) self.process() class GitRefMap(object): """Map Git ref names to the Git object names they currently point to. Behaves like a dictionary of Git ref names -> Git object names. """ def __init__ (self, obj_fetcher): """Create a new Git ref -> object map.""" self.obj_fetcher = obj_fetcher self._cache = {} # dict: refname -> objname def _load (self, ref): """Retrieve the object currently bound to the given ref. The name of the object pointed to by the given ref is stored into this mapping, and also returned. """ if ref not in self._cache: self._cache[ref] = self.obj_fetcher.get_sha1(ref) return self._cache[ref] def __contains__ (self, refname): """Return True if the given refname is present in this cache.""" return bool(self._load(refname)) def __getitem__ (self, refname): """Return the git object name pointed to by the given refname.""" commit = self._load(refname) if commit is None: raise KeyError("Unknown ref '%s'" % (refname)) return commit def get (self, refname, default = None): """Return the git object name pointed to by the given refname.""" commit = self._load(refname) if commit is None: return default return commit class GitFICommit(object): """Encapsulate the data in a Git fast-import commit command.""" SHA1RE = re.compile(r'^[0-9a-f]{40}$') @classmethod def parse_mode (cls, mode): """Verify the given git file mode, and return it as a string.""" assert mode in (644, 755, 100644, 100755, 120000) return "%i" % (mode) @classmethod def parse_objname (cls, objname): """Return the given object name (or mark number) as a string.""" if isinstance(objname, int): # Object name is a mark number assert objname > 0 return ":%i" % (objname) # No existence check is done, only checks for valid format assert cls.SHA1RE.match(objname) # Object name is valid SHA1 return objname @classmethod def quote_path (cls, path): """Return a quoted version of the given path.""" path = path.replace("\\", "\\\\") path = path.replace("\n", "\\n") path = path.replace('"', '\\"') return '"%s"' % (path) @classmethod def parse_path (cls, path): """Verify that the given path is valid, and quote it, if needed.""" assert not isinstance(path, int) # Cannot be a mark number # These checks verify the rules on the fast-import man page assert not path.count("//") assert not path.endswith("/") assert not path.startswith("/") assert not path.count("/./") assert not path.count("/../") assert not path.endswith("/.") assert not path.endswith("/..") assert not path.startswith("./") assert not path.startswith("../") if path.count('"') + path.count('\n') + path.count('\\'): return cls.quote_path(path) return path def __init__ (self, name, email, timestamp, timezone, message): """Create a new Git fast-import commit, with the given metadata.""" self.name = name self.email = email self.timestamp = timestamp self.timezone = timezone self.message = message self.pathops = [] # List of path operations in this commit def modify (self, mode, blobname, path): """Add a file modification to this Git fast-import commit.""" self.pathops.append(("M", self.parse_mode(mode), self.parse_objname(blobname), self.parse_path(path))) def delete (self, path): """Add a file deletion to this Git fast-import commit.""" self.pathops.append(("D", self.parse_path(path))) def copy (self, path, newpath): """Add a file copy to this Git fast-import commit.""" self.pathops.append(("C", self.parse_path(path), self.parse_path(newpath))) def rename (self, path, newpath): """Add a file rename to this Git fast-import commit.""" self.pathops.append(("R", self.parse_path(path), self.parse_path(newpath))) def note (self, blobname, commit): """Add a note object to this Git fast-import commit.""" self.pathops.append(("N", self.parse_objname(blobname), self.parse_objname(commit))) def deleteall (self): """Delete all files in this Git fast-import commit.""" self.pathops.append("deleteall") class TestGitFICommit(unittest.TestCase): """GitFICommit selftests.""" def test_basic (self): """GitFICommit basic selftests.""" def expect_fail (method, data): """Verify that the method(data) raises an AssertionError.""" try: method(data) except AssertionError: return raise AssertionError("Failed test for invalid data '%s(%s)'" % (method.__name__, repr(data))) def test_parse_mode (self): """GitFICommit.parse_mode() selftests.""" self.assertEqual(GitFICommit.parse_mode(644), "644") self.assertEqual(GitFICommit.parse_mode(755), "755") self.assertEqual(GitFICommit.parse_mode(100644), "100644") self.assertEqual(GitFICommit.parse_mode(100755), "100755") self.assertEqual(GitFICommit.parse_mode(120000), "120000") self.assertRaises(AssertionError, GitFICommit.parse_mode, 0) self.assertRaises(AssertionError, GitFICommit.parse_mode, 123) self.assertRaises(AssertionError, GitFICommit.parse_mode, 600) self.assertRaises(AssertionError, GitFICommit.parse_mode, "644") self.assertRaises(AssertionError, GitFICommit.parse_mode, "abc") def test_parse_objname (self): """GitFICommit.parse_objname() selftests.""" self.assertEqual(GitFICommit.parse_objname(1), ":1") self.assertRaises(AssertionError, GitFICommit.parse_objname, 0) self.assertRaises(AssertionError, GitFICommit.parse_objname, -1) self.assertEqual(GitFICommit.parse_objname("0123456789" * 4), "0123456789" * 4) self.assertEqual(GitFICommit.parse_objname("2468abcdef" * 4), "2468abcdef" * 4) self.assertRaises(AssertionError, GitFICommit.parse_objname, "abcdefghij" * 4) def test_parse_path (self): """GitFICommit.parse_path() selftests.""" self.assertEqual(GitFICommit.parse_path("foo/bar"), "foo/bar") self.assertEqual(GitFICommit.parse_path("path/with\n and \" in it"), '"path/with\\n and \\" in it"') self.assertRaises(AssertionError, GitFICommit.parse_path, 1) self.assertRaises(AssertionError, GitFICommit.parse_path, 0) self.assertRaises(AssertionError, GitFICommit.parse_path, -1) self.assertRaises(AssertionError, GitFICommit.parse_path, "foo//bar") self.assertRaises(AssertionError, GitFICommit.parse_path, "foo/bar/") self.assertRaises(AssertionError, GitFICommit.parse_path, "/foo/bar") self.assertRaises(AssertionError, GitFICommit.parse_path, "foo/./bar") self.assertRaises(AssertionError, GitFICommit.parse_path, "foo/../bar") self.assertRaises(AssertionError, GitFICommit.parse_path, "foo/bar/.") self.assertRaises(AssertionError, GitFICommit.parse_path, "foo/bar/..") self.assertRaises(AssertionError, GitFICommit.parse_path, "./foo/bar") self.assertRaises(AssertionError, GitFICommit.parse_path, "../foo/bar") class GitFastImport(object): """Encapsulate communication with git fast-import.""" def __init__ (self, f, obj_fetcher, last_mark = 0): """Set up self to communicate with a fast-import process through f.""" self.f = f # File object where fast-import stream is written self.obj_fetcher = obj_fetcher # GitObjectFetcher instance self.next_mark = last_mark + 1 # Next mark number self.refs = set() # Keep track of the refnames we've seen def comment (self, s): """Write the given comment in the fast-import stream.""" assert "\n" not in s, "Malformed comment: '%s'" % (s) self.f.write("# %s\n" % (s)) def commit (self, ref, commitdata): """Make a commit on the given ref, with the given GitFICommit. Return the mark number identifying this commit. """ self.f.write("""\ commit %(ref)s mark :%(mark)i committer %(name)s <%(email)s> %(timestamp)i %(timezone)s data %(msgLength)i %(msg)s """ % { 'ref': ref, 'mark': self.next_mark, 'name': commitdata.name, 'email': commitdata.email, 'timestamp': commitdata.timestamp, 'timezone': commitdata.timezone, 'msgLength': len(commitdata.message), 'msg': commitdata.message, }) if ref not in self.refs: self.refs.add(ref) parent = ref + "^0" if self.obj_fetcher.get_sha1(parent): self.f.write("from %s\n" % (parent)) for op in commitdata.pathops: self.f.write(" ".join(op)) self.f.write("\n") self.f.write("\n") retval = self.next_mark self.next_mark += 1 return retval def blob (self, data): """Import the given blob. Return the mark number identifying this blob. """ self.f.write("blob\nmark :%i\ndata %i\n%s\n" % (self.next_mark, len(data), data)) retval = self.next_mark self.next_mark += 1 return retval def reset (self, ref, objname): """Reset the given ref to point at the given Git object.""" self.f.write("reset %s\nfrom %s\n\n" % (ref, GitFICommit.parse_objname(objname))) if ref not in self.refs: self.refs.add(ref) class GitNotes(object): """Encapsulate access to Git notes. Simulates a dictionary of object name (SHA1) -> Git note mappings. """ def __init__ (self, notes_ref, obj_fetcher): """Create a new Git notes interface, bound to the given notes ref.""" self.notes_ref = notes_ref self.obj_fetcher = obj_fetcher # Used to get objects from repo self.imports = [] # list: (objname, note data blob name) tuples def __del__ (self): """Verify that self.commit_notes() was called before destruction.""" if self.imports: error("Missing call to self.commit_notes().") error("%i notes are not committed!", len(self.imports)) def _load (self, objname): """Return the note data associated with the given git object. The note data is returned in string form. If no note is found for the given object, None is returned. """ try: f = self.obj_fetcher.open_obj("%s:%s" % (self.notes_ref, objname)) ret = f.read() f.close() except KeyError: ret = None return ret def __getitem__ (self, objname): """Return the note contents associated with the given object. Raise KeyError if given object has no associated note. """ blobdata = self._load(objname) if blobdata is None: raise KeyError("Object '%s' has no note" % (objname)) return blobdata def get (self, objname, default = None): """Return the note contents associated with the given object. Return given default if given object has no associated note. """ blobdata = self._load(objname) if blobdata is None: return default return blobdata def import_note (self, objname, data, gfi): """Tell git fast-import to store data as a note for objname. This method uses the given GitFastImport object to create a blob containing the given note data. Also an entry mapping the given object name to the created blob is stored until commit_notes() is called. Note that this method only works if it is later followed by a call to self.commit_notes() (which produces the note commit that refers to the blob produced here). """ if not data.endswith("\n"): data += "\n" gfi.comment("Importing note for object %s" % (objname)) mark = gfi.blob(data) self.imports.append((objname, mark)) def commit_notes (self, gfi, author, message): """Produce a git fast-import note commit for the imported notes. This method uses the given GitFastImport object to create a commit on the notes ref, introducing the notes previously submitted to import_note(). """ if not self.imports: return commitdata = GitFICommit(author[0], author[1], time.time(), "0000", message) for objname, blobname in self.imports: assert isinstance(objname, int) and objname > 0 assert isinstance(blobname, int) and blobname > 0 commitdata.note(blobname, objname) gfi.commit(self.notes_ref, commitdata) self.imports = [] class GitCachedNotes(GitNotes): """Encapsulate access to Git notes (cached version). Only use this class if no caching is done at a higher level. Simulates a dictionary of object name (SHA1) -> Git note mappings. """ def __init__ (self, notes_ref, obj_fetcher): """Set up a caching wrapper around GitNotes.""" GitNotes.__init__(self, notes_ref, obj_fetcher) self._cache = {} # Cache: object name -> note data def __del__ (self): """Verify that GitNotes' destructor is called.""" GitNotes.__del__(self) def _load (self, objname): """Extend GitNotes._load() with a local objname -> note cache.""" if objname not in self._cache: self._cache[objname] = GitNotes._load(self, objname) return self._cache[objname] def import_note (self, objname, data, gfi): """Extend GitNotes.import_note() with a local objname -> note cache.""" if not data.endswith("\n"): data += "\n" assert objname not in self._cache self._cache[objname] = data GitNotes.import_note(self, objname, data, gfi) if __name__ == '__main__': unittest.main()
dav1x/ansible
refs/heads/devel
contrib/inventory/ec2.py
26
#!/usr/bin/env python ''' EC2 external inventory script ================================= Generates inventory that Ansible can understand by making API request to AWS EC2 using the Boto library. NOTE: This script assumes Ansible is being executed where the environment variables needed for Boto have already been set: export AWS_ACCESS_KEY_ID='AK123' export AWS_SECRET_ACCESS_KEY='abc123' optional region environement variable if region is 'auto' This script also assumes there is an ec2.ini file alongside it. To specify a different path to ec2.ini, define the EC2_INI_PATH environment variable: export EC2_INI_PATH=/path/to/my_ec2.ini If you're using eucalyptus you need to set the above variables and you need to define: export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus If you're using boto profiles (requires boto>=2.24.0) you can choose a profile using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using the AWS_PROFILE variable: AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html When run against a specific host, this script returns the following variables: - ec2_ami_launch_index - ec2_architecture - ec2_association - ec2_attachTime - ec2_attachment - ec2_attachmentId - ec2_block_devices - ec2_client_token - ec2_deleteOnTermination - ec2_description - ec2_deviceIndex - ec2_dns_name - ec2_eventsSet - ec2_group_name - ec2_hypervisor - ec2_id - ec2_image_id - ec2_instanceState - ec2_instance_type - ec2_ipOwnerId - ec2_ip_address - ec2_item - ec2_kernel - ec2_key_name - ec2_launch_time - ec2_monitored - ec2_monitoring - ec2_networkInterfaceId - ec2_ownerId - ec2_persistent - ec2_placement - ec2_platform - ec2_previous_state - ec2_private_dns_name - ec2_private_ip_address - ec2_publicIp - ec2_public_dns_name - ec2_ramdisk - ec2_reason - ec2_region - ec2_requester_id - ec2_root_device_name - ec2_root_device_type - ec2_security_group_ids - ec2_security_group_names - ec2_shutdown_state - ec2_sourceDestCheck - ec2_spot_instance_request_id - ec2_state - ec2_state_code - ec2_state_reason - ec2_status - ec2_subnet_id - ec2_tenancy - ec2_virtualization_type - ec2_vpc_id These variables are pulled out of a boto.ec2.instance object. There is a lack of consistency with variable spellings (camelCase and underscores) since this just loops through all variables the object exposes. It is preferred to use the ones with underscores when multiple exist. In addition, if an instance has AWS Tags associated with it, each tag is a new variable named: - ec2_tag_[Key] = [Value] Security groups are comma-separated in 'ec2_security_group_ids' and 'ec2_security_group_names'. ''' # (c) 2012, Peter Sankauskas # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ###################################################################### import sys import os import argparse import re from time import time import boto from boto import ec2 from boto import rds from boto import elasticache from boto import route53 from boto import sts import six from ansible.module_utils import ec2 as ec2_utils HAS_BOTO3 = False try: import boto3 HAS_BOTO3 = True except ImportError: pass from six.moves import configparser from collections import defaultdict try: import json except ImportError: import simplejson as json class Ec2Inventory(object): def _empty_inventory(self): return {"_meta" : {"hostvars" : {}}} def __init__(self): ''' Main execution path ''' # Inventory grouped by instance IDs, tags, security groups, regions, # and availability zones self.inventory = self._empty_inventory() self.aws_account_id = None # Index of hostname (address) to instance ID self.index = {} # Boto profile to use (if any) self.boto_profile = None # AWS credentials. self.credentials = {} # Read settings and parse CLI arguments self.parse_cli_args() self.read_settings() # Make sure that profile_name is not passed at all if not set # as pre 2.24 boto will fall over otherwise if self.boto_profile: if not hasattr(boto.ec2.EC2Connection, 'profile_name'): self.fail_with_error("boto version must be >= 2.24 to use profile") # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of instances for inventory if self.inventory == self._empty_inventory(): data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print(data_to_print) def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): ''' Reads the settings from the ec2.ini file ''' scriptbasename = __file__ scriptbasename = os.path.basename(scriptbasename) scriptbasename = scriptbasename.replace('.py', '') defaults = {'ec2': { 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename) } } if six.PY3: config = configparser.ConfigParser() else: config = configparser.SafeConfigParser() ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path']) ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path)) config.read(ec2_ini_path) # is eucalyptus? self.eucalyptus_host = None self.eucalyptus = False if config.has_option('ec2', 'eucalyptus'): self.eucalyptus = config.getboolean('ec2', 'eucalyptus') if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') # Regions self.regions = [] configRegions = config.get('ec2', 'regions') if (configRegions == 'all'): if self.eucalyptus_host: self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials) else: configRegions_exclude = config.get('ec2', 'regions_exclude') for regionInfo in ec2.regions(): if regionInfo.name not in configRegions_exclude: self.regions.append(regionInfo.name) else: self.regions = configRegions.split(",") if 'auto' in self.regions: env_region = os.environ.get('AWS_REGION') if env_region is None: env_region = os.environ.get('AWS_DEFAULT_REGION') self.regions = [ env_region ] # Destination addresses self.destination_variable = config.get('ec2', 'destination_variable') self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') if config.has_option('ec2', 'hostname_variable'): self.hostname_variable = config.get('ec2', 'hostname_variable') else: self.hostname_variable = None if config.has_option('ec2', 'destination_format') and \ config.has_option('ec2', 'destination_format_tags'): self.destination_format = config.get('ec2', 'destination_format') self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') else: self.destination_format = None self.destination_format_tags = None # Route53 self.route53_enabled = config.getboolean('ec2', 'route53') if config.has_option('ec2', 'route53_hostnames'): self.route53_hostnames = config.get('ec2', 'route53_hostnames') else: self.route53_hostnames = None self.route53_excluded_zones = [] if config.has_option('ec2', 'route53_excluded_zones'): self.route53_excluded_zones.extend( config.get('ec2', 'route53_excluded_zones', '').split(',')) # Include RDS instances? self.rds_enabled = True if config.has_option('ec2', 'rds'): self.rds_enabled = config.getboolean('ec2', 'rds') # Include RDS cluster instances? if config.has_option('ec2', 'include_rds_clusters'): self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters') else: self.include_rds_clusters = False # Include ElastiCache instances? self.elasticache_enabled = True if config.has_option('ec2', 'elasticache'): self.elasticache_enabled = config.getboolean('ec2', 'elasticache') # Return all EC2 instances? if config.has_option('ec2', 'all_instances'): self.all_instances = config.getboolean('ec2', 'all_instances') else: self.all_instances = False # Instance states to be gathered in inventory. Default is 'running'. # Setting 'all_instances' to 'yes' overrides this option. ec2_valid_instance_states = [ 'pending', 'running', 'shutting-down', 'terminated', 'stopping', 'stopped' ] self.ec2_instance_states = [] if self.all_instances: self.ec2_instance_states = ec2_valid_instance_states elif config.has_option('ec2', 'instance_states'): for instance_state in config.get('ec2', 'instance_states').split(','): instance_state = instance_state.strip() if instance_state not in ec2_valid_instance_states: continue self.ec2_instance_states.append(instance_state) else: self.ec2_instance_states = ['running'] # Return all RDS instances? (if RDS is enabled) if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') else: self.all_rds_instances = False # Return all ElastiCache replication groups? (if ElastiCache is enabled) if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled: self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') else: self.all_elasticache_replication_groups = False # Return all ElastiCache clusters? (if ElastiCache is enabled) if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') else: self.all_elasticache_clusters = False # Return all ElastiCache nodes? (if ElastiCache is enabled) if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') else: self.all_elasticache_nodes = False # boto configuration profile (prefer CLI argument then environment variables then config file) self.boto_profile = self.args.boto_profile or os.environ.get('AWS_PROFILE') if config.has_option('ec2', 'boto_profile') and not self.boto_profile: self.boto_profile = config.get('ec2', 'boto_profile') # AWS credentials (prefer environment variables) if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or os.environ.get('AWS_PROFILE')): if config.has_option('credentials', 'aws_access_key_id'): aws_access_key_id = config.get('credentials', 'aws_access_key_id') else: aws_access_key_id = None if config.has_option('credentials', 'aws_secret_access_key'): aws_secret_access_key = config.get('credentials', 'aws_secret_access_key') else: aws_secret_access_key = None if config.has_option('credentials', 'aws_security_token'): aws_security_token = config.get('credentials', 'aws_security_token') else: aws_security_token = None if aws_access_key_id: self.credentials = { 'aws_access_key_id': aws_access_key_id, 'aws_secret_access_key': aws_secret_access_key } if aws_security_token: self.credentials['security_token'] = aws_security_token # Cache related cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) if self.boto_profile: cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) if not os.path.exists(cache_dir): os.makedirs(cache_dir) cache_name = 'ansible-ec2' cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id')) if cache_id: cache_name = '%s-%s' % (cache_name, cache_id) self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name) self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name) self.cache_max_age = config.getint('ec2', 'cache_max_age') if config.has_option('ec2', 'expand_csv_tags'): self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') else: self.expand_csv_tags = False # Configure nested groups instead of flat namespace. if config.has_option('ec2', 'nested_groups'): self.nested_groups = config.getboolean('ec2', 'nested_groups') else: self.nested_groups = False # Replace dash or not in group names if config.has_option('ec2', 'replace_dash_in_groups'): self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') else: self.replace_dash_in_groups = True # IAM role to assume for connection if config.has_option('ec2', 'iam_role'): self.iam_role = config.get('ec2', 'iam_role') else: self.iam_role = None # Configure which groups should be created. group_by_options = [ 'group_by_instance_id', 'group_by_region', 'group_by_availability_zone', 'group_by_ami_id', 'group_by_instance_type', 'group_by_instance_state', 'group_by_key_pair', 'group_by_vpc_id', 'group_by_security_group', 'group_by_tag_keys', 'group_by_tag_none', 'group_by_route53_names', 'group_by_rds_engine', 'group_by_rds_parameter_group', 'group_by_elasticache_engine', 'group_by_elasticache_cluster', 'group_by_elasticache_parameter_group', 'group_by_elasticache_replication_group', 'group_by_aws_account', ] for option in group_by_options: if config.has_option('ec2', option): setattr(self, option, config.getboolean('ec2', option)) else: setattr(self, option, True) # Do we need to just include hosts that match a pattern? try: pattern_include = config.get('ec2', 'pattern_include') if pattern_include and len(pattern_include) > 0: self.pattern_include = re.compile(pattern_include) else: self.pattern_include = None except configparser.NoOptionError: self.pattern_include = None # Do we need to exclude hosts that match a pattern? try: pattern_exclude = config.get('ec2', 'pattern_exclude') if pattern_exclude and len(pattern_exclude) > 0: self.pattern_exclude = re.compile(pattern_exclude) else: self.pattern_exclude = None except configparser.NoOptionError: self.pattern_exclude = None # Do we want to stack multiple filters? if config.has_option('ec2', 'stack_filters'): self.stack_filters = config.getboolean('ec2', 'stack_filters') else: self.stack_filters = False # Instance filters (see boto and EC2 API docs). Ignore invalid filters. self.ec2_instance_filters = defaultdict(list) if config.has_option('ec2', 'instance_filters'): filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f] for instance_filter in filters: instance_filter = instance_filter.strip() if not instance_filter or '=' not in instance_filter: continue filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] if not filter_key: continue self.ec2_instance_filters[filter_key].append(filter_value) def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile', help='Use boto profile for connections to EC2') self.args = parser.parse_args() def do_api_calls_update_cache(self): ''' Do API calls to each region, and save data in cache files ''' if self.route53_enabled: self.get_route53_records() for region in self.regions: self.get_instances_by_region(region) if self.rds_enabled: self.get_rds_instances_by_region(region) if self.elasticache_enabled: self.get_elasticache_clusters_by_region(region) self.get_elasticache_replication_groups_by_region(region) if self.include_rds_clusters: self.include_rds_clusters_by_region(region) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def connect(self, region): ''' create connection to api server''' if self.eucalyptus: conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials) conn.APIVersion = '2010-08-31' else: conn = self.connect_to_aws(ec2, region) return conn def boto_fix_security_token_in_profile(self, connect_args): ''' monkey patch for boto issue boto/boto#2100 ''' profile = 'profile ' + self.boto_profile if boto.config.has_option(profile, 'aws_security_token'): connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') return connect_args def connect_to_aws(self, module, region): connect_args = self.credentials # only pass the profile name if it's set (as it is not supported by older boto versions) if self.boto_profile: connect_args['profile_name'] = self.boto_profile self.boto_fix_security_token_in_profile(connect_args) if self.iam_role: sts_conn = sts.connect_to_region(region, **connect_args) role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory') connect_args['aws_access_key_id'] = role.credentials.access_key connect_args['aws_secret_access_key'] = role.credentials.secret_key connect_args['security_token'] = role.credentials.session_token conn = module.connect_to_region(region, **connect_args) # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported if conn is None: self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) return conn def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular region ''' try: conn = self.connect(region) reservations = [] if self.ec2_instance_filters: if self.stack_filters: filters_dict = {} for filter_key, filter_values in self.ec2_instance_filters.items(): filters_dict[filter_key] = filter_values reservations.extend(conn.get_all_instances(filters = filters_dict)) else: for filter_key, filter_values in self.ec2_instance_filters.items(): reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) else: reservations = conn.get_all_instances() # Pull the tags back in a second step # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags` instance_ids = [] for reservation in reservations: instance_ids.extend([instance.id for instance in reservation.instances]) max_filter_value = 199 tags = [] for i in range(0, len(instance_ids), max_filter_value): tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]})) tags_by_instance_id = defaultdict(dict) for tag in tags: tags_by_instance_id[tag.res_id][tag.name] = tag.value if (not self.aws_account_id) and reservations: self.aws_account_id = reservations[0].owner_id for reservation in reservations: for instance in reservation.instances: instance.tags = tags_by_instance_id[instance.id] self.add_instance(instance, region) except boto.exception.BotoServerError as e: if e.error_code == 'AuthFailure': error = self.get_auth_error_message() else: backend = 'Eucalyptus' if self.eucalyptus else 'AWS' error = "Error connecting to %s backend.\n%s" % (backend, e.message) self.fail_with_error(error, 'getting EC2 instances') def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular region ''' try: conn = self.connect_to_aws(rds, region) if conn: marker = None while True: instances = conn.get_all_dbinstances(marker=marker) marker = instances.marker for instance in instances: self.add_rds_instance(instance, region) if not marker: break except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS RDS is down:\n%s" % e.message self.fail_with_error(error, 'getting RDS instances') def include_rds_clusters_by_region(self, region): if not HAS_BOTO3: self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again", "getting RDS clusters") client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) marker, clusters = '', [] while marker is not None: resp = client.describe_db_clusters(Marker=marker) clusters.extend(resp["DBClusters"]) marker = resp.get('Marker', None) account_id = boto.connect_iam().get_user().arn.split(':')[4] c_dict = {} for c in clusters: # remove these datetime objects as there is no serialisation to json # currently in place and we don't need the data yet if 'EarliestRestorableTime' in c: del c['EarliestRestorableTime'] if 'LatestRestorableTime' in c: del c['LatestRestorableTime'] if self.ec2_instance_filters == {}: matches_filter = True else: matches_filter = False try: # arn:aws:rds:<region>:<account number>:<resourcetype>:<name> tags = client.list_tags_for_resource( ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier']) c['Tags'] = tags['TagList'] if self.ec2_instance_filters: for filter_key, filter_values in self.ec2_instance_filters.items(): # get AWS tag key e.g. tag:env will be 'env' tag_name = filter_key.split(":", 1)[1] # Filter values is a list (if you put multiple values for the same tag name) matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags']) if matches_filter: # it matches a filter, so stop looking for further matches break except Exception as e: if e.message.find('DBInstanceNotFound') >= 0: # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster. # Ignore errors when trying to find tags for these pass # ignore empty clusters caused by AWS bug if len(c['DBClusterMembers']) == 0: continue elif matches_filter: c_dict[c['DBClusterIdentifier']] = c self.inventory['db_clusters'] = c_dict def get_elasticache_clusters_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache clusters (with nodes' info) in a particular region.''' # ElastiCache boto module doesn't provide a get_all_intances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = self.connect_to_aws(elasticache, region) if conn: # show_cache_node_info = True # because we also want nodes' information response = conn.describe_cache_clusters(None, None, None, True) except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache is down:\n%s" % e.message self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to CacheClusters or # CacheNodes. Because of that we can't make use of the get_list # method in the AWSQueryConnection. Let's do the work manually clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] except KeyError as e: error = "ElastiCache query to AWS failed (unexpected format)." self.fail_with_error(error, 'getting ElastiCache clusters') for cluster in clusters: self.add_elasticache_cluster(cluster, region) def get_elasticache_replication_groups_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache replication groups in a particular region.''' # ElastiCache boto module doesn't provide a get_all_intances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = self.connect_to_aws(elasticache, region) if conn: response = conn.describe_replication_groups() except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to ReplicationGroups # Because of that we can't make use of the get_list method in the # AWSQueryConnection. Let's do the work manually replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] except KeyError as e: error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." self.fail_with_error(error, 'getting ElastiCache clusters') for replication_group in replication_groups: self.add_elasticache_replication_group(replication_group, region) def get_auth_error_message(self): ''' create an informative error message if there is an issue authenticating''' errors = ["Authentication error retrieving ec2 inventory."] if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') else: errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) if len(boto_config_found) > 0: errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) else: errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) return '\n'.join(errors) def fail_with_error(self, err_msg, err_operation=None): '''log an error to std err for ansible-playbook to consume and exit''' if err_operation: err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( err_msg=err_msg, err_operation=err_operation) sys.stderr.write(err_msg) sys.exit(1) def get_instance(self, region, instance_id): conn = self.connect(region) reservations = conn.get_all_instances([instance_id]) for reservation in reservations: for instance in reservation.instances: return instance def add_instance(self, instance, region): ''' Adds an instance to the inventory and index, as long as it is addressable ''' # Only return instances with desired instance states if instance.state not in self.ec2_instance_states: return # Select the best destination address if self.destination_format and self.destination_format_tags: dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ]) elif instance.subnet_id: dest = getattr(instance, self.vpc_destination_variable, None) if dest is None: dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) else: dest = getattr(instance, self.destination_variable, None) if dest is None: dest = getattr(instance, 'tags').get(self.destination_variable, None) if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # Set the inventory name hostname = None if self.hostname_variable: if self.hostname_variable.startswith('tag_'): hostname = instance.tags.get(self.hostname_variable[4:], None) else: hostname = getattr(instance, self.hostname_variable) # set the hostname from route53 if self.route53_enabled and self.route53_hostnames: route53_names = self.get_instance_route53_names(instance) for name in route53_names: if name.endswith(self.route53_hostnames): hostname = name # If we can't get a nice hostname, use the destination address if not hostname: hostname = dest # to_safe strips hostname characters like dots, so don't strip route53 hostnames elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames): hostname = hostname.lower() else: hostname = self.to_safe(hostname).lower() # if we only want to include hosts that match a pattern, skip those that don't if self.pattern_include and not self.pattern_include.match(hostname): return # if we need to exclude hosts that match a pattern, skip those if self.pattern_exclude and self.pattern_exclude.match(hostname): return # Add to index self.index[hostname] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[instance.id] = [hostname] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, hostname) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, instance.placement, hostname) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.placement) self.push_group(self.inventory, 'zones', instance.placement) # Inventory: Group by Amazon Machine Image (AMI) ID if self.group_by_ami_id: ami_id = self.to_safe(instance.image_id) self.push(self.inventory, ami_id, hostname) if self.nested_groups: self.push_group(self.inventory, 'images', ami_id) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_type) self.push(self.inventory, type_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by instance state if self.group_by_instance_state: state_name = self.to_safe('instance_state_' + instance.state) self.push(self.inventory, state_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'instance_states', state_name) # Inventory: Group by key pair if self.group_by_key_pair and instance.key_name: key_name = self.to_safe('key_' + instance.key_name) self.push(self.inventory, key_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'keys', key_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) self.push(self.inventory, vpc_id_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) # Inventory: Group by security group if self.group_by_security_group: try: for group in instance.groups: key = self.to_safe("security_group_" + group.name) self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by AWS account ID if self.group_by_aws_account: self.push(self.inventory, self.aws_account_id, dest) if self.nested_groups: self.push_group(self.inventory, 'accounts', self.aws_account_id) # Inventory: Group by tag keys if self.group_by_tag_keys: for k, v in instance.tags.items(): if self.expand_csv_tags and v and ',' in v: values = map(lambda x: x.strip(), v.split(',')) else: values = [v] for v in values: if v: key = self.to_safe("tag_" + k + "=" + v) else: key = self.to_safe("tag_" + k) self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) if v: self.push_group(self.inventory, self.to_safe("tag_" + k), key) # Inventory: Group by Route53 domain names if enabled if self.route53_enabled and self.group_by_route53_names: route53_names = self.get_instance_route53_names(instance) for name in route53_names: self.push(self.inventory, name, hostname) if self.nested_groups: self.push_group(self.inventory, 'route53', name) # Global Tag: instances without tags if self.group_by_tag_none and len(instance.tags) == 0: self.push(self.inventory, 'tag_none', hostname) if self.nested_groups: self.push_group(self.inventory, 'tags', 'tag_none') # Global Tag: tag all EC2 instances self.push(self.inventory, 'ec2', hostname) self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest def add_rds_instance(self, instance, region): ''' Adds an RDS instance to the inventory and index, as long as it is addressable ''' # Only want available instances unless all_rds_instances is True if not self.all_rds_instances and instance.status != 'available': return # Select the best destination address dest = instance.endpoint[0] if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # Set the inventory name hostname = None if self.hostname_variable: if self.hostname_variable.startswith('tag_'): hostname = instance.tags.get(self.hostname_variable[4:], None) else: hostname = getattr(instance, self.hostname_variable) # If we can't get a nice hostname, use the destination address if not hostname: hostname = dest hostname = self.to_safe(hostname).lower() # Add to index self.index[hostname] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[instance.id] = [hostname] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, hostname) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, instance.availability_zone, hostname) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.availability_zone) self.push_group(self.inventory, 'zones', instance.availability_zone) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_class) self.push(self.inventory, type_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) self.push(self.inventory, vpc_id_name, hostname) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) # Inventory: Group by security group if self.group_by_security_group: try: if instance.security_group: key = self.to_safe("security_group_" + instance.security_group.name) self.push(self.inventory, key, hostname) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by engine if self.group_by_rds_engine: self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname) if self.nested_groups: self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) # Inventory: Group by parameter group if self.group_by_rds_parameter_group: self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname) if self.nested_groups: self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) # Global Tag: all RDS instances self.push(self.inventory, 'rds', hostname) self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest def add_elasticache_cluster(self, cluster, region): ''' Adds an ElastiCache cluster to the inventory and index, as long as it's nodes are addressable ''' # Only want available clusters unless all_elasticache_clusters is True if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': return # Select the best destination address if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: # Memcached cluster dest = cluster['ConfigurationEndpoint']['Address'] is_redis = False else: # Redis sigle node cluster # Because all Redis clusters are single nodes, we'll merge the # info from the cluster with info about the node dest = cluster['CacheNodes'][0]['Endpoint']['Address'] is_redis = True if not dest: # Skip clusters we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, cluster['CacheClusterId']] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[cluster['CacheClusterId']] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) # Inventory: Group by region if self.group_by_region and not is_redis: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone and not is_redis: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type if self.group_by_instance_type and not is_redis: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC (information not available in the current # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group and not is_redis: # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine if self.group_by_elasticache_engine and not is_redis: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) # Inventory: Group by parameter group if self.group_by_elasticache_parameter_group: self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) # Inventory: Group by replication group if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) # Global Tag: all ElastiCache clusters self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) host_info = self.get_host_info_dict_from_describe_dict(cluster) self.inventory["_meta"]["hostvars"][dest] = host_info # Add the nodes for node in cluster['CacheNodes']: self.add_elasticache_node(node, cluster, region) def add_elasticache_node(self, node, cluster, region): ''' Adds an ElastiCache node to the inventory and index, as long as it is addressable ''' # Only want available nodes unless all_elasticache_nodes is True if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': return # Select the best destination address dest = node['Endpoint']['Address'] if not dest: # Skip nodes we cannot address (e.g. private VPC subnet) return node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) # Add to index self.index[dest] = [region, node_id] # Inventory: Group by node ID (always a group of 1) if self.group_by_instance_id: self.inventory[node_id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', node_id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type if self.group_by_instance_type: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC (information not available in the current # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group: # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine if self.group_by_elasticache_engine: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) # Inventory: Group by parameter group (done at cluster level) # Inventory: Group by replication group (done at cluster level) # Inventory: Group by ElastiCache Cluster if self.group_by_elasticache_cluster: self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) # Global Tag: all ElastiCache nodes self.push(self.inventory, 'elasticache_nodes', dest) host_info = self.get_host_info_dict_from_describe_dict(node) if dest in self.inventory["_meta"]["hostvars"]: self.inventory["_meta"]["hostvars"][dest].update(host_info) else: self.inventory["_meta"]["hostvars"][dest] = host_info def add_elasticache_replication_group(self, replication_group, region): ''' Adds an ElastiCache replication group to the inventory and index ''' # Only want available clusters unless all_elasticache_replication_groups is True if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': return # Skip clusters we cannot address (e.g. private VPC subnet or clustered redis) if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \ replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None: return # Select the best destination address (PrimaryEndpoint) dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] # Add to index self.index[dest] = [region, replication_group['ReplicationGroupId']] # Inventory: Group by ID (always a group of 1) if self.group_by_instance_id: self.inventory[replication_group['ReplicationGroupId']] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone (doesn't apply to replication groups) # Inventory: Group by node type (doesn't apply to replication groups) # Inventory: Group by VPC (information not available in the current # AWS API version for replication groups # Inventory: Group by security group (doesn't apply to replication groups) # Check this value in cluster level # Inventory: Group by engine (replication groups are always Redis) if self.group_by_elasticache_engine: self.push(self.inventory, 'elasticache_redis', dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', 'redis') # Global Tag: all ElastiCache clusters self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) host_info = self.get_host_info_dict_from_describe_dict(replication_group) self.inventory["_meta"]["hostvars"][dest] = host_info def get_route53_records(self): ''' Get and store the map of resource records to domain names that point to them. ''' if self.boto_profile: r53_conn = route53.Route53Connection(profile_name=self.boto_profile) else: r53_conn = route53.Route53Connection() all_zones = r53_conn.get_zones() route53_zones = [ zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones ] self.route53_records = {} for zone in route53_zones: rrsets = r53_conn.get_all_rrsets(zone.id) for record_set in rrsets: record_name = record_set.name if record_name.endswith('.'): record_name = record_name[:-1] for resource in record_set.resource_records: self.route53_records.setdefault(resource, set()) self.route53_records[resource].add(record_name) def get_instance_route53_names(self, instance): ''' Check if an instance is referenced in the records we have from Route53. If it is, return the list of domain names pointing to said instance. If nothing points to it, return an empty list. ''' instance_attributes = [ 'public_dns_name', 'private_dns_name', 'ip_address', 'private_ip_address' ] name_list = set() for attrib in instance_attributes: try: value = getattr(instance, attrib) except AttributeError: continue if value in self.route53_records: name_list.update(self.route53_records[value]) return list(name_list) def get_host_info_dict_from_instance(self, instance): instance_vars = {} for key in vars(instance): value = getattr(instance, key) key = self.to_safe('ec2_' + key) # Handle complex types # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 if key == 'ec2__state': instance_vars['ec2_state'] = instance.state or '' instance_vars['ec2_state_code'] = instance.state_code elif key == 'ec2__previous_state': instance_vars['ec2_previous_state'] = instance.previous_state or '' instance_vars['ec2_previous_state_code'] = instance.previous_state_code elif isinstance(value, (int, bool)): instance_vars[key] = value elif isinstance(value, six.string_types): instance_vars[key] = value.strip() elif value is None: instance_vars[key] = '' elif key == 'ec2_region': instance_vars[key] = value.name elif key == 'ec2__placement': instance_vars['ec2_placement'] = value.zone elif key == 'ec2_tags': for k, v in value.items(): if self.expand_csv_tags and ',' in v: v = list(map(lambda x: x.strip(), v.split(','))) key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': group_ids = [] group_names = [] for group in value: group_ids.append(group.id) group_names.append(group.name) instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) elif key == 'ec2_block_device_mapping': instance_vars["ec2_block_devices"] = {} for k, v in value.items(): instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id else: pass # TODO Product codes if someone finds them useful #print key #print type(value) #print value instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id return instance_vars def get_host_info_dict_from_describe_dict(self, describe_dict): ''' Parses the dictionary returned by the API call into a flat list of parameters. This method should be used only when 'describe' is used directly because Boto doesn't provide specific classes. ''' # I really don't agree with prefixing everything with 'ec2' # because EC2, RDS and ElastiCache are different services. # I'm just following the pattern used until now to not break any # compatibility. host_info = {} for key in describe_dict: value = describe_dict[key] key = self.to_safe('ec2_' + self.uncammelize(key)) # Handle complex types # Target: Memcached Cache Clusters if key == 'ec2_configuration_endpoint' and value: host_info['ec2_configuration_endpoint_address'] = value['Address'] host_info['ec2_configuration_endpoint_port'] = value['Port'] # Target: Cache Nodes and Redis Cache Clusters (single node) if key == 'ec2_endpoint' and value: host_info['ec2_endpoint_address'] = value['Address'] host_info['ec2_endpoint_port'] = value['Port'] # Target: Redis Replication Groups if key == 'ec2_node_groups' and value: host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] replica_count = 0 for node in value[0]['NodeGroupMembers']: if node['CurrentRole'] == 'primary': host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] elif node['CurrentRole'] == 'replica': host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address'] host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] replica_count += 1 # Target: Redis Replication Groups if key == 'ec2_member_clusters' and value: host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) # Target: All Cache Clusters elif key == 'ec2_cache_parameter_group': host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] # Target: Almost everything elif key == 'ec2_security_groups': # Skip if SecurityGroups is None # (it is possible to have the key defined but no value in it). if value is not None: sg_ids = [] for sg in value: sg_ids.append(sg['SecurityGroupId']) host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) # Target: Everything # Preserve booleans and integers elif isinstance(value, (int, bool)): host_info[key] = value # Target: Everything # Sanitize string values elif isinstance(value, six.string_types): host_info[key] = value.strip() # Target: Everything # Replace None by an empty string elif value is None: host_info[key] = '' else: # Remove non-processed complex types pass return host_info def get_host_info(self): ''' Get variables about a specific host ''' if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if not self.args.host in self.index: # try updating the cache self.do_api_calls_update_cache() if not self.args.host in self.index: # host might not exist anymore return self.json_format_dict({}, True) (region, instance_id) = self.index[self.args.host] instance = self.get_instance(region, instance_id) return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) def push(self, my_dict, key, element): ''' Push an element onto an array that may not have been defined in the dict ''' group_info = my_dict.setdefault(key, []) if isinstance(group_info, dict): host_list = group_info.setdefault('hosts', []) host_list.append(element) else: group_info.append(element) def push_group(self, my_dict, key, element): ''' Push a group as a child of another group. ''' parent_group = my_dict.setdefault(key, {}) if not isinstance(parent_group, dict): parent_group = my_dict[key] = {'hosts': parent_group} child_groups = parent_group.setdefault('children', []) if element not in child_groups: child_groups.append(element) def get_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON object ''' with open(self.cache_path_cache, 'r') as f: json_inventory = f.read() return json_inventory def load_index_from_cache(self): ''' Reads the index from the cache file sets self.index ''' with open(self.cache_path_index, 'rb') as f: self.index = json.load(f) def write_to_cache(self, data, filename): ''' Writes data in JSON format to a file ''' json_data = self.json_format_dict(data, True) with open(filename, 'w') as f: f.write(json_data) def uncammelize(self, key): temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' regex = "[^A-Za-z0-9\_" if not self.replace_dash_in_groups: regex += "\-" return re.sub(regex + "]", "_", word) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) if __name__ == '__main__': # Run the script Ec2Inventory()
JioCloud/swift
refs/heads/master
test/unit/common/ring/test_utils.py
2
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from swift.common.ring.utils import (build_tier_tree, tiers_for_dev, parse_search_value, parse_args, build_dev_from_opts) class TestUtils(unittest.TestCase): def setUp(self): self.test_dev = {'region': 1, 'zone': 1, 'ip': '192.168.1.1', 'port': '6000', 'id': 0} def get_test_devs(): dev0 = {'region': 1, 'zone': 1, 'ip': '192.168.1.1', 'port': '6000', 'id': 0} dev1 = {'region': 1, 'zone': 1, 'ip': '192.168.1.1', 'port': '6000', 'id': 1} dev2 = {'region': 1, 'zone': 1, 'ip': '192.168.1.1', 'port': '6000', 'id': 2} dev3 = {'region': 1, 'zone': 1, 'ip': '192.168.1.2', 'port': '6000', 'id': 3} dev4 = {'region': 1, 'zone': 1, 'ip': '192.168.1.2', 'port': '6000', 'id': 4} dev5 = {'region': 1, 'zone': 1, 'ip': '192.168.1.2', 'port': '6000', 'id': 5} dev6 = {'region': 1, 'zone': 2, 'ip': '192.168.2.1', 'port': '6000', 'id': 6} dev7 = {'region': 1, 'zone': 2, 'ip': '192.168.2.1', 'port': '6000', 'id': 7} dev8 = {'region': 1, 'zone': 2, 'ip': '192.168.2.1', 'port': '6000', 'id': 8} dev9 = {'region': 1, 'zone': 2, 'ip': '192.168.2.2', 'port': '6000', 'id': 9} dev10 = {'region': 1, 'zone': 2, 'ip': '192.168.2.2', 'port': '6000', 'id': 10} dev11 = {'region': 1, 'zone': 2, 'ip': '192.168.2.2', 'port': '6000', 'id': 11} return [dev0, dev1, dev2, dev3, dev4, dev5, dev6, dev7, dev8, dev9, dev10, dev11] self.test_devs = get_test_devs() def test_tiers_for_dev(self): self.assertEqual( tiers_for_dev(self.test_dev), ((1,), (1, 1), (1, 1, '192.168.1.1:6000'), (1, 1, '192.168.1.1:6000', 0))) def test_build_tier_tree(self): ret = build_tier_tree(self.test_devs) self.assertEqual(len(ret), 8) self.assertEqual(ret[()], set([(1,)])) self.assertEqual(ret[(1,)], set([(1, 1), (1, 2)])) self.assertEqual(ret[(1, 1)], set([(1, 1, '192.168.1.2:6000'), (1, 1, '192.168.1.1:6000')])) self.assertEqual(ret[(1, 2)], set([(1, 2, '192.168.2.2:6000'), (1, 2, '192.168.2.1:6000')])) self.assertEqual(ret[(1, 1, '192.168.1.1:6000')], set([(1, 1, '192.168.1.1:6000', 0), (1, 1, '192.168.1.1:6000', 1), (1, 1, '192.168.1.1:6000', 2)])) self.assertEqual(ret[(1, 1, '192.168.1.2:6000')], set([(1, 1, '192.168.1.2:6000', 3), (1, 1, '192.168.1.2:6000', 4), (1, 1, '192.168.1.2:6000', 5)])) self.assertEqual(ret[(1, 2, '192.168.2.1:6000')], set([(1, 2, '192.168.2.1:6000', 6), (1, 2, '192.168.2.1:6000', 7), (1, 2, '192.168.2.1:6000', 8)])) self.assertEqual(ret[(1, 2, '192.168.2.2:6000')], set([(1, 2, '192.168.2.2:6000', 9), (1, 2, '192.168.2.2:6000', 10), (1, 2, '192.168.2.2:6000', 11)])) def test_parse_search_value(self): res = parse_search_value('r0') self.assertEqual(res, {'region': 0}) res = parse_search_value('r1') self.assertEqual(res, {'region': 1}) res = parse_search_value('r1z2') self.assertEqual(res, {'region': 1, 'zone': 2}) res = parse_search_value('d1') self.assertEqual(res, {'id': 1}) res = parse_search_value('z1') self.assertEqual(res, {'zone': 1}) res = parse_search_value('-127.0.0.1') self.assertEqual(res, {'ip': '127.0.0.1'}) res = parse_search_value('-[127.0.0.1]:10001') self.assertEqual(res, {'ip': '127.0.0.1', 'port': 10001}) res = parse_search_value(':10001') self.assertEqual(res, {'port': 10001}) res = parse_search_value('R127.0.0.10') self.assertEqual(res, {'replication_ip': '127.0.0.10'}) res = parse_search_value('R[127.0.0.10]:20000') self.assertEqual(res, {'replication_ip': '127.0.0.10', 'replication_port': 20000}) res = parse_search_value('R:20000') self.assertEqual(res, {'replication_port': 20000}) res = parse_search_value('/sdb1') self.assertEqual(res, {'device': 'sdb1'}) res = parse_search_value('_meta1') self.assertEqual(res, {'meta': 'meta1'}) self.assertRaises(ValueError, parse_search_value, 'OMGPONIES') def test_replication_defaults(self): args = '-r 1 -z 1 -i 127.0.0.1 -p 6010 -d d1 -w 100'.split() opts, _ = parse_args(args) device = build_dev_from_opts(opts) expected = { 'device': 'd1', 'ip': '127.0.0.1', 'meta': '', 'port': 6010, 'region': 1, 'replication_ip': '127.0.0.1', 'replication_port': 6010, 'weight': 100.0, 'zone': 1, } self.assertEquals(device, expected) if __name__ == '__main__': unittest.main()
99cloud/keystone_register
refs/heads/master
openstack_dashboard/dashboards/admin/instances/views.py
7
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Openstack, LLC # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.utils.datastructures import SortedDict from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import tables from openstack_dashboard import api from openstack_dashboard.dashboards.admin.instances.tables import \ AdminInstancesTable from openstack_dashboard.dashboards.project.instances.views import \ console, DetailView, vnc, spice, UpdateView from openstack_dashboard.dashboards.project.instances.workflows.\ update_instance import AdminUpdateInstance LOG = logging.getLogger(__name__) class AdminUpdateView(UpdateView): workflow_class = AdminUpdateInstance class AdminIndexView(tables.DataTableView): table_class = AdminInstancesTable template_name = 'admin/instances/index.html' def get_data(self): instances = [] try: instances = api.nova.server_list(self.request, all_tenants=True) except: exceptions.handle(self.request, _('Unable to retrieve instance list.')) if instances: # Gather our flavors to correlate against IDs try: flavors = api.nova.flavor_list(self.request) except: # If fails to retrieve flavor list, creates an empty list. flavors = [] # Gather our tenants to correlate against IDs try: tenants = api.keystone.tenant_list(self.request, admin=True) except: tenants = [] msg = _('Unable to retrieve instance tenant information.') exceptions.handle(self.request, msg) full_flavors = SortedDict([(f.id, f) for f in flavors]) tenant_dict = SortedDict([(t.id, t) for t in tenants]) # Loop through instances to get flavor and tenant info. for inst in instances: flavor_id = inst.flavor["id"] try: if flavor_id in full_flavors: inst.full_flavor = full_flavors[flavor_id] else: # If the flavor_id is not in full_flavors list, # gets it via nova api. inst.full_flavor = api.nova.flavor_get( self.request, flavor_id) except: msg = _('Unable to retrieve instance size information.') exceptions.handle(self.request, msg) tenant = tenant_dict.get(inst.tenant_id, None) inst.tenant_name = getattr(tenant, "name", None) return instances
dati91/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/third_party/pytest/src/_pytest/resultlog.py
32
""" log machine-parseable test session result information in a plain text file. """ from __future__ import absolute_import, division, print_function import py import os def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "resultlog plugin options") group.addoption( "--resultlog", "--result-log", action="store", metavar="path", default=None, help="DEPRECATED path for machine-readable result log.", ) def pytest_configure(config): resultlog = config.option.resultlog # prevent opening resultlog on slave nodes (xdist) if resultlog and not hasattr(config, "slaveinput"): dirname = os.path.dirname(os.path.abspath(resultlog)) if not os.path.isdir(dirname): os.makedirs(dirname) logfile = open(resultlog, "w", 1) # line buffered config._resultlog = ResultLog(config, logfile) config.pluginmanager.register(config._resultlog) from _pytest.deprecated import RESULT_LOG config.warn("C1", RESULT_LOG) def pytest_unconfigure(config): resultlog = getattr(config, "_resultlog", None) if resultlog: resultlog.logfile.close() del config._resultlog config.pluginmanager.unregister(resultlog) def generic_path(item): chain = item.listchain() gpath = [chain[0].name] fspath = chain[0].fspath fspart = False for node in chain[1:]: newfspath = node.fspath if newfspath == fspath: if fspart: gpath.append(":") fspart = False else: gpath.append(".") else: gpath.append("/") fspart = True name = node.name if name[0] in "([": gpath.pop() gpath.append(name) fspath = newfspath return "".join(gpath) class ResultLog(object): def __init__(self, config, logfile): self.config = config self.logfile = logfile # preferably line buffered def write_log_entry(self, testpath, lettercode, longrepr): print("%s %s" % (lettercode, testpath), file=self.logfile) for line in longrepr.splitlines(): print(" %s" % line, file=self.logfile) def log_outcome(self, report, lettercode, longrepr): testpath = getattr(report, "nodeid", None) if testpath is None: testpath = report.fspath self.write_log_entry(testpath, lettercode, longrepr) def pytest_runtest_logreport(self, report): if report.when != "call" and report.passed: return res = self.config.hook.pytest_report_teststatus(report=report) code = res[1] if code == "x": longrepr = str(report.longrepr) elif code == "X": longrepr = "" elif report.passed: longrepr = "" elif report.failed: longrepr = str(report.longrepr) elif report.skipped: longrepr = str(report.longrepr[2]) self.log_outcome(report, code, longrepr) def pytest_collectreport(self, report): if not report.passed: if report.failed: code = "F" longrepr = str(report.longrepr) else: assert report.skipped code = "S" longrepr = "%s:%d: %s" % report.longrepr self.log_outcome(report, code, longrepr) def pytest_internalerror(self, excrepr): reprcrash = getattr(excrepr, "reprcrash", None) path = getattr(reprcrash, "path", None) if path is None: path = "cwd:%s" % py.path.local() self.write_log_entry(path, "!", str(excrepr))
sprinkler/rainmachine-developer-resources
refs/heads/master
sdk-parsers/RMParserFramework/parsers/met-no-parser.py
1
# Copyright (c) 2014-2015 RainMachine, Green Electronics LLC # All rights reserved. # Authors: Nicu Pavel <npavel@mini-box.com> from RMParserFramework.rmParser import RMParser from RMUtilsFramework.rmLogging import log from RMDataFramework.rmWeatherData import RMWeatherConditions from RMUtilsFramework.rmTimeUtils import * from RMUtilsFramework.rmTypeUtils import * import datetime, time, os import random from xml.etree import ElementTree as e from datetime import timedelta from collections import OrderedDict from RMDataFramework.rmUserSettings import globalSettings class METNO(RMParser): parserName = "METNO Parser" parserDescription = "Global weather service from Norwegian Meteorological Institute http://met.no" parserForecast = True parserHistorical = False parserEnabled = True parserDebug = False parserInterval = 6 * 3600 params = {} def isEnabledForLocation(self, timezone, lat, long): if METNO.parserEnabled and timezone: return timezone.startswith("Europe") return False def perform(self): s = self.settings headers = [{ "User-Agent": "RainMachine.com v2" }, { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36"}] URL = "https://api.met.no/weatherapi/locationforecast/2.0/classic" URLParams = [("lat", s.location.latitude), ("lon", s.location.longitude), ("altitude", int(round(s.location.elevation)))] #----------------------------------------------------------------------------------------------- # # Get hourly data. # d = self.openURL(URL, URLParams, headers=headers[random.randint(0, 1)]) if d is None: return tree = e.parse(d) #tree = e.parse("/tmp/MET.NO/forecast.xml") if tree.getroot().tag == 'error': log.error("*** No hourly information found in response!") self.lastKnownError = "Error: No hourly information found" tree.getroot().clear() del tree tree = None else: data = self.__parseXMLData(tree) # Free memory tree.getroot().clear() del tree tree = None temp = self.__extractTagData(data, 'temperature') mintemp = self.__extractTagData(data, 'minTemperature') maxtemp = self.__extractTagData(data, 'maxTemperature') dewpoint = self.__extractTagData(data, 'dewpointTemperature') wind = self.__extractTagData(data, 'windSpeed') humidity = self.__extractTagData(data, 'humidity') pressure = self.__extractTagData(data, 'pressure') qpf = self.__extractTagData(data, 'precipitation') condition = self.__extractTagData(data, 'symbol') self.addValues(RMParser.dataType.TEMPERATURE, temp) self.addValues(RMParser.dataType.MINTEMP, mintemp) self.addValues(RMParser.dataType.MAXTEMP, maxtemp) self.addValues(RMParser.dataType.DEWPOINT, dewpoint) self.addValues(RMParser.dataType.WIND, wind) self.addValues(RMParser.dataType.RH, humidity) self.addValues(RMParser.dataType.QPF, qpf) self.addValues(RMParser.dataType.PRESSURE, pressure) self.addValues(RMParser.dataType.CONDITION, condition) # Build a python dictionary from the XML data def __parseXMLData(self, tree): dateFormat = "%Y-%m-%dT%H:%M:%SZ" data = {} todayTimestamp = rmCurrentDayTimestamp() for w in tree.getroot().find('product'): for wd in w.find('location'): tag = wd.tag # Reduce memory size by skipping tags that we don't need if (tag == 'cloudiness' or tag == 'fog' or tag == 'lowClouds' or tag == 'mediumClouds' or tag == 'highClouds' or tag == 'windDirection'): continue from_time = w.get('from') to_time = w.get('to') # New function call in next branch intervalStartTimestamp = rmTimestampFromUTCDateAsString(from_time, dateFormat) intervalEndTimestamp = rmTimestampFromUTCDateAsString(to_time, dateFormat) if intervalStartTimestamp < todayTimestamp: self.logtrace("From: %s To: %s" % (from_time, to_time)) self.logtrace("*** Local conversion of start timestamp (%s) in the past skipping ..." % from_time) continue localStartDate = rmTimestampToDate(intervalStartTimestamp) localEndDate = rmTimestampToDate(intervalEndTimestamp) day = localStartDate.strftime('%Y-%m-%d') if tag not in data: data[tag] = OrderedDict() if day not in data[tag]: data[tag][day] = [] startHour = localStartDate.hour endHour = localEndDate.hour try: val = None if (tag == 'windSpeed'): val = toFloat(wd.get('mps')) elif (tag == 'symbol'): condition = toInt(wd.get('number')) val = self.conditionConvert(condition) elif (tag == 'cloudiness' or tag == 'fog' or tag == 'lowClouds' or tag == 'mediumClouds' or tag == 'highClouds'): # UNUSED val = toFloat(wd.get('percent')) elif (tag == 'windDirection'): # UNUSED val = toFloat(wd.get('deg')) else: val = toFloat(wd.get('value')) # hPa -> kPa if (tag == 'pressure'): val = val / 10 except Exception, e: val = None log.debug(e) data[tag][day].append({ "startHour": startHour, "endHour": endHour, "start": intervalStartTimestamp, "end": intervalEndTimestamp, "value": val }) return data # Extract a tag data with timestamps from our previously built python dictionary # Some data is available hourly but precipitation and min/max temperatures are available on 6 hours interval # We average the hourly available data to 6 hours interval def __extractTagData(self, data, tag): result = [] hoursInterval = 6 if tag not in data: return [] for day in data[tag].keys(): self.logtrace("Day: %s - %s" % (day, tag)) daySum = 0 # For debugging partSum = None # For averaging hourly data to 6 hours interval hourlyCounter = 0 intervalCounter = 1 usedIntervals = [] # Keep track of intervals so we can eliminate intersecting ones for interval in data[tag][day]: value = interval["value"] intervalTime = interval["start"] sH = interval["startHour"] eH = interval["endHour"] shouldSkip = False # Ends in next day if eH < sH: log.debug("\t (SKIP) Interval %s %s ends in next day" % (sH, eH)) continue # Does this new interval intersects with an interval already in our data for si in usedIntervals: # if tag == "precipitation": # log.info("Checking %s-%s against %s-%s" % (sH, eH, si[0], si[1])) if sH < si[1]: if tag == "precipitation": log.debug("%s %s" % (day, tag)) log.debug("\t (SKIP i) Interval %s-%s intersects with interval %s-%s" % (sH, eH, si[0], si[1])) shouldSkip = True break if shouldSkip: continue duration = eH - sH # We found a "hourly" entry that contains temperature, windSpeed, pressure, dew if duration == 0 and value is not None: self.logtrace("\t (%s) %s-%s: %s %s" % (duration, sH, eH, tag, value)) if partSum is None: partSum = value else: partSum += value hourlyCounter += 1 if sH > 0 and sH % hoursInterval == 0 or hourlyCounter > hoursInterval: partSum /= hourlyCounter daySum += partSum self.logtrace("%s Hour average %s to %s (%s values)" % (hoursInterval, tag, partSum, hourlyCounter)) usedIntervals.append((sH, eH)) result.append((intervalTime, partSum)) # Add 6 hours interval value hourlyCounter = 0 partSum = 0 intervalCounter += 1 elif duration == hoursInterval: usedIntervals.append((sH, eH)) result.append((intervalTime, value)) # Add 6 hours interval value daySum += value log.debug("\t ADDED (%s) %s-%s: %s %s" % (duration, sH, eH, tag, value)) else: log.debug("\t SKIP (%s) %s-%s: %s %s" % (duration, sH, eH, tag, value)) if tag != "precipitation": # Any remaining partials for 6h averaging for a day if partSum is not None and hourlyCounter > 0: partSum /= hourlyCounter daySum += partSum self.logtrace("(Partial) %s Hour Average hourly %s to %s (%s values)" % (hoursInterval, tag, partSum, hourlyCounter)) usedIntervals.append((sH, eH)) result.append((intervalTime, partSum)) # Add 6 hours interval value daySum = daySum/intervalCounter self.logtrace("Day %s Total/AVG (%s/%s) %s %s (%s)\n\n" % (day, intervalCounter, hourlyCounter, tag, partSum, daySum)) return result def conditionConvert(self, symbol): if symbol in [999]: return RMParser.conditionType.MostlyCloudy elif symbol in [1, 101]: return RMParser.conditionType.Fair elif symbol in [2, 102]: return RMParser.conditionType.FewClouds elif symbol in [3, 103]: return RMParser.conditionType.PartlyCloudy elif symbol in [4, 104]: return RMParser.conditionType.Overcast elif symbol in [15, 115]: return RMParser.conditionType.Fog elif symbol in [999]: return RMParser.conditionType.Smoke elif symbol in [999]: return RMParser.conditionType.HeavyFreezingRain elif symbol in [999]: return RMParser.conditionType.IcePellets elif symbol in [999]: return RMParser.conditionType.FreezingRain elif symbol in [48, 148]: return RMParser.conditionType.RainIce elif symbol in [7, 12, 20, 21, 23, 26, 27, 31, 32, 42, 43, 47, 107, 112, 120, 121, 123, 126, 127, 131, 132, 142, 143, 147]: return RMParser.conditionType.RainSnow elif symbol in [10, 25, 41, 110, 125, 141]: return RMParser.conditionType.RainShowers elif symbol in [11, 14, 111, 114]: return RMParser.conditionType.Thunderstorm elif symbol in [8, 13, 21, 28, 29, 33, 34, 44, 45, 49, 50, 108, 113, 121, 128, 129, 133, 134, 144, 145, 149, 150]: return RMParser.conditionType.Snow elif symbol in [999]: return RMParser.conditionType.Windy elif symbol in [999]: return RMParser.conditionType.ShowersInVicinity elif symbol in [999]: return RMParser.conditionType.HeavyFreezingRain elif symbol in [999]: return RMParser.conditionType.ThunderstormInVicinity elif symbol in [5, 6, 9, 22, 24, 30, 40, 46, 109, 105, 106, 122, 124, 130, 140, 146]: return RMParser.conditionType.LightRain elif symbol in [999]: return RMParser.conditionType.HeavyRain elif symbol in [999]: return RMParser.conditionType.FunnelCloud elif symbol in [999]: return RMParser.conditionType.Dust elif symbol in [999]: return RMParser.conditionType.Haze elif symbol in [999]: return RMParser.conditionType.Hot elif symbol in [999]: return RMParser.conditionType.Cold else: return RMParser.conditionType.Unknown def logtrace(self, msg, *args, **kwargs): if self.parserDebug: log.info(msg, *args, **kwargs) if __name__ == "__main__": parser = METNO() parser.perform()
ctu-osgeorel/gdal-vfr
refs/heads/master
vfr2pg.py
1
#!/usr/bin/env python ############################################################################### # # VFR importer based on GDAL library # # Author: Martin Landa <landa.martin gmail.com> # # Licence: MIT/X # ############################################################################### """ Imports VFR data to PostGIS database Requires GDAL library version 1.11 or later. """ import sys import atexit import argparse from vfr4ogr import VfrPg from vfr4ogr.parse import parse_cmd from vfr4ogr.logger import check_log, VfrLogger from vfr4ogr.exception import VfrError, VfrErrorCmd def parse_args(): parser = argparse.ArgumentParser(prog="vfr2pg", description="Imports VFR data to PostGIS database. " "Requires GDAL library version 1.11 or later.") parser.add_argument("-e", "--extended", action='store_true', help="Extended layer list statistics") parser.add_argument("-d", "--download", action='store_true', help="Download VFR data to the currect directory (--type required) and exit") parser.add_argument("-s", "--fileschema", action='store_true', help="Create new schema for each VFR file") parser.add_argument("-g", "--nogeomskip", action='store_true', help="Skip features without geometry") parser.add_argument("-l", "--list", action='store_true', help="List existing layers in output database and exit") parser.add_argument("--file", help="Path to xml.gz|zip or URL list file") parser.add_argument("--date", help="Date in format 'YYYYMMDD'") parser.add_argument("--type", help="Type of request in format XY_ABCD, eg. 'ST_UKSH' or 'OB_000000_ABCD'") parser.add_argument("--layer", help="Import only selected layers separated by comma (if not given all layers are processed)") parser.add_argument("--geom", help="Preferred geometry 'OriginalniHranice' or 'GeneralizovaneHranice' (if not found or not given than first geometry is used)") parser.add_argument("--dbname", help="Output PostGIS database") parser.add_argument("--schema", help="Schema name (default: public)") parser.add_argument("--user", help="User name") parser.add_argument("--passwd", help="Password") parser.add_argument("--host", help="Host name") parser.add_argument("--port", help="Port") parser.add_argument("-o", "--overwrite", action='store_true', help="Overwrite existing PostGIS tables") parser.add_argument("-a", "--append", action='store_true', help="Append to existing PostGIS tables") return parser.parse_args(), parser.print_help def main(): # parse cmdline arguments options, usage = parse_args() options.format = 'PostgreSQL' try: file_list = parse_cmd(options) except VfrErrorCmd as e: usage() sys.exit('ERROR: {}'.format(e)) # build datasource name odsn = None if options.dbname: odsn = "PG:dbname=%s" % options.dbname if options.user: odsn += " user=%s" % options.user if options.passwd: odsn += " password=%s" % options.passwd if options.host: odsn += " host=%s" % options.host if options.port: odsn += " port=%s" % options.port # create convertor try: pg = VfrPg(schema=options.schema, schema_per_file=options.fileschema, dsn=odsn, geom_name=options.geom, layers=options.layer, nogeomskip=options.nogeomskip, overwrite=options.overwrite) except VfrError as e: sys.exit('ERROR: {}'.format(e)) # write log process header pg.cmd_log(sys.argv) if options.list: # list output database and exit pg.print_summary() return 0 # read file list and download VFR files if needed try: pg.download(file_list, options.date) except VfrError as e: VfrLogger.error(str(e)) if options.download: # download only requested, exiting return 0 # import input VFR files to PostGIS ipass = pg.run(options.append, options.extended) # create indices for output tables pg.create_indices() # print final summary if (ipass > 1 and options.fileschema is False) \ or options.append: pg.print_summary() return 0 if __name__ == "__main__": atexit.register(check_log) sys.exit(main())
handroissuazo/tensorflow
refs/heads/master
tensorflow/contrib/specs/python/summaries.py
164
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions for summarizing and describing TensorFlow graphs. This contains functions that generate string descriptions from TensorFlow graphs, for debugging, testing, and model size estimation. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from tensorflow.contrib.specs.python import specs from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops # These are short abbreviations for common TensorFlow operations used # in test cases with tf_structure to verify that specs_lib generates a # graph structure with the right operations. Operations outside the # scope of specs (e.g., Const and Placeholder) are just assigned "_" # since they are not relevant to testing. SHORT_NAMES_SRC = """ BiasAdd biasadd Const _ Conv2D conv MatMul dot Placeholder _ Sigmoid sig Variable var """.split() SHORT_NAMES = { x: y for x, y in zip(SHORT_NAMES_SRC[::2], SHORT_NAMES_SRC[1::2]) } def _truncate_structure(x): """A helper function that disables recursion in tf_structure. Some constructs (e.g., HorizontalLstm) are complex unrolled structures and don't need to be represented in the output of tf_structure or tf_print. This helper function defines which tree branches should be pruned. This is a very imperfect way of dealing with unrolled LSTM's (since it truncates useful information as well), but it's not worth doing something better until the new fused and unrolled ops are ready. Args: x: a Tensor or Op Returns: A bool indicating whether the subtree should be pruned. """ if "/HorizontalLstm/" in x.name: return True return False def tf_structure(x, include_shapes=False, finished=None): """A postfix expression summarizing the TF graph. This is intended to be used as part of test cases to check for gross differences in the structure of the graph. The resulting string is not invertible or unabiguous and cannot be used to reconstruct the graph accurately. Args: x: a tf.Tensor or tf.Operation include_shapes: include shapes in the output string finished: a set of ops that have already been output Returns: A string representing the structure as a string of postfix operations. """ if finished is None: finished = set() if isinstance(x, ops.Tensor): shape = x.get_shape().as_list() x = x.op else: shape = [] if x in finished: return " <>" finished |= {x} result = "" if not _truncate_structure(x): for y in x.inputs: result += tf_structure(y, include_shapes, finished) if include_shapes: result += " %s" % (shape,) if x.type != "Identity": name = SHORT_NAMES.get(x.type, x.type.lower()) result += " " + name return result def tf_print(x, depth=0, finished=None, printer=print): """A simple print function for a TensorFlow graph. Args: x: a tf.Tensor or tf.Operation depth: current printing depth finished: set of nodes already output printer: print function to use Returns: Total number of parameters found in the subtree. """ if finished is None: finished = set() if isinstance(x, ops.Tensor): shape = x.get_shape().as_list() x = x.op else: shape = "" if x.type == "Identity": x = x.inputs[0].op if x in finished: printer("%s<%s> %s %s" % (" " * depth, x.name, x.type, shape)) return finished |= {x} printer("%s%s %s %s" % (" " * depth, x.name, x.type, shape)) if not _truncate_structure(x): for y in x.inputs: tf_print(y, depth + 1, finished, printer=printer) def tf_num_params(x): """Number of parameters in a TensorFlow subgraph. Args: x: root of the subgraph (Tensor, Operation) Returns: Total number of elements found in all Variables in the subgraph. """ if isinstance(x, ops.Tensor): shape = x.get_shape() x = x.op if x.type in ["Variable", "VariableV2"]: return shape.num_elements() totals = [tf_num_params(y) for y in x.inputs] return sum(totals) def tf_left_split(op): """Split the parameters of op for left recursion. Args: op: tf.Operation Returns: A tuple of the leftmost input tensor and a list of the remaining arguments. """ if len(op.inputs) < 1: return None, [] if op.type == "Concat": return op.inputs[1], op.inputs[2:] return op.inputs[0], op.inputs[1:] def tf_parameter_iter(x): """Iterate over the left branches of a graph and yield sizes. Args: x: root of the subgraph (Tensor, Operation) Yields: A triple of name, number of params, and shape. """ while 1: if isinstance(x, ops.Tensor): shape = x.get_shape().as_list() x = x.op else: shape = "" left, right = tf_left_split(x) totals = [tf_num_params(y) for y in right] total = sum(totals) yield x.name, total, shape if left is None: break x = left def _combine_filter(x): """A filter for combining successive layers with similar names.""" last_name = None last_total = 0 last_shape = None for name, total, shape in x: name = re.sub("/.*", "", name) if name == last_name: last_total += total continue if last_name is not None: yield last_name, last_total, last_shape last_name = name last_total = total last_shape = shape if last_name is not None: yield last_name, last_total, last_shape def tf_parameter_summary(x, printer=print, combine=True): """Summarize parameters by depth. Args: x: root of the subgraph (Tensor, Operation) printer: print function for output combine: combine layers by top-level scope """ seq = tf_parameter_iter(x) if combine: seq = _combine_filter(seq) seq = reversed(list(seq)) for name, total, shape in seq: printer("%10d %-20s %s" % (total, name, shape)) def tf_spec_structure(spec, inputs=None, input_shape=None, input_type=dtypes.float32): """Return a postfix representation of the specification. This is intended to be used as part of test cases to check for gross differences in the structure of the graph. The resulting string is not invertible or unabiguous and cannot be used to reconstruct the graph accurately. Args: spec: specification inputs: input to the spec construction (usually a Tensor) input_shape: tensor shape (in lieu of inputs) input_type: type of the input tensor Returns: A string with a postfix representation of the specification. """ if inputs is None: inputs = array_ops.placeholder(input_type, input_shape) outputs = specs.create_net(spec, inputs) return str(tf_structure(outputs).strip()) def tf_spec_summary(spec, inputs=None, input_shape=None, input_type=dtypes.float32): """Output a summary of the specification. This prints a list of left-most tensor operations and summarized the variables found in the right branches. This kind of representation is particularly useful for networks that are generally structured like pipelines. Args: spec: specification inputs: input to the spec construction (usually a Tensor) input_shape: optional shape of input input_type: type of the input tensor """ if inputs is None: inputs = array_ops.placeholder(input_type, input_shape) outputs = specs.create_net(spec, inputs) tf_parameter_summary(outputs) def tf_spec_print(spec, inputs=None, input_shape=None, input_type=dtypes.float32): """Print a tree representing the spec. Args: spec: specification inputs: input to the spec construction (usually a Tensor) input_shape: optional shape of input input_type: type of the input tensor """ if inputs is None: inputs = array_ops.placeholder(input_type, input_shape) outputs = specs.create_net(spec, inputs) tf_print(outputs)
makinacorpus/django-ckeditor
refs/heads/master
setup.py
2
import os.path from setuptools import setup, find_packages def get_source_files(): for dirname, _, files in os.walk('ckeditor/static/ckeditor/ckeditor/_source'): for filename in files: yield os.path.join('/'.join(dirname.split('/')[1:]), filename) setup( name='django-ckeditor-updated', version='4.2.6', description='Django admin CKEditor integration.', long_description=open('README.rst', 'r').read() + open('AUTHORS.rst', 'r').read() + open('CHANGELOG.rst', 'r').read(), author='Piotr Malinski', author_email='riklaunim@gmail.com', url='https://github.com/riklaunim/django-ckeditor', packages=find_packages(exclude=["*.demo"]), install_requires=[ 'Django', ], include_package_data=True, classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.3", "License :: OSI Approved :: BSD License", "Development Status :: 4 - Beta", "Operating System :: OS Independent", "Framework :: Django", "Intended Audience :: Developers", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", ], )
anbasile/pmu_simulator
refs/heads/master
simulator/main.py
2
from __future__ import print_function import matplotlib.pyplot as plt import random import math import folium import pandas as pd import numpy as np #select a point to generate points around pt = [46.086262,11.128824] #load TT stops stops = pd.read_csv("stops.txt") #generate N points around pt. temp = [] def generate_random_data(lon,lat, n_points): for _ in xrange(n_points): dec_lat = random.random()/10 # <-modify the denominator in order to change the radius dec_lon = random.random()/10 # <- temp.append((lon+dec_lon, lat+dec_lat)) return pd.DataFrame(temp) df = generate_random_data(pt[0],pt[1],20) # <- call the function and generate 20 points map_1 = folium.Map(location=pt,zoom_start=12) # <- initialize a map #populate the map with the random points for i,row in df.iterrows(): map_1.simple_marker((row[0],row[1])) #create N tuples of the form (driver,rider,end) i = 0 while i<10: # <- create 10 tuples #get three point point1 = df.ix[i,0],df.ix[i,1] point2 = df.ix[i+1,0],df.ix[i+1,1] point3 = stops['stop_lat'][random.randrange(len(stops))],stops['stop_lon'][random.randrange(len(stops))] #draw lines from driver->end,rider->end map_1.line((point1,point3)) map_1.line((point2,point3)) #build vectors and calculate angle in between v0 = np.array(point1)-np.array(point3) v1 = np.array(point2)-np.array(point3) angle = np.math.atan2(np.linalg.det([v0,v1]),np.dot(v0,v1)) #name the end point. In this case I show the angle map_1.simple_marker(point3,popup=str(math.degrees(angle))) i += 2 map_1.create_map(path='simulation.html')
aterrel/dynd-python
refs/heads/master
dynd/tests/test_ckernel.py
1
from __future__ import print_function, absolute_import import sys import ctypes import unittest from dynd import nd, ndt, _lowlevel import numpy as np # ctypes.c_ssize_t/c_size_t was introduced in python 2.7 if sys.version_info >= (2, 7): c_ssize_t = ctypes.c_ssize_t c_size_t = ctypes.c_size_t else: if ctypes.sizeof(ctypes.c_void_p) == 4: c_ssize_t = ctypes.c_int32 c_size_t = ctypes.c_uint32 else: c_ssize_t = ctypes.c_int64 c_size_t = ctypes.c_uint64 class TestCKernelBuilder(unittest.TestCase): def test_creation(self): with _lowlevel.ckernel.CKernelBuilder() as ckb: pass def test_allocation(self): with _lowlevel.ckernel.CKernelBuilder() as ckb: # Initially, the memory within the structure # is being used self.assertEqual(ckb.ckb.data, ctypes.addressof(ckb.ckb.static_data)) # The capacity is 16 pointer-sized objects initial_capacity = 16 * ctypes.sizeof(ctypes.c_void_p) self.assertEqual(ckb.ckb.capacity, initial_capacity) # Requesting exactly the space already there should do nothing ckb.ensure_capacity(initial_capacity - ctypes.sizeof(_lowlevel.CKernelPrefixStruct)) self.assertEqual(ckb.ckb.data, ctypes.addressof(ckb.ckb.static_data)) self.assertEqual(ckb.ckb.capacity, initial_capacity) # Requesting more space should reallocate ckb.ensure_capacity(initial_capacity) self.assertTrue(ckb.ckb.data != ctypes.addressof(ckb.ckb.static_data)) self.assertTrue(ckb.ckb.capacity >= initial_capacity + ctypes.sizeof(_lowlevel.CKernelPrefixStruct)) def test_allocation_leaf(self): with _lowlevel.ckernel.CKernelBuilder() as ckb: # Initially, the memory within the structure # is being used self.assertEqual(ckb.ckb.data, ctypes.addressof(ckb.ckb.static_data)) # The capacity is 16 pointer-sized objects initial_capacity = 16 * ctypes.sizeof(ctypes.c_void_p) self.assertEqual(ckb.ckb.capacity, initial_capacity) # Requesting exactly the space already there should do nothing ckb.ensure_capacity_leaf(initial_capacity) self.assertEqual(ckb.ckb.data, ctypes.addressof(ckb.ckb.static_data)) self.assertEqual(ckb.ckb.capacity, initial_capacity) # Requesting more space should reallocate ckb.ensure_capacity(initial_capacity + ctypes.sizeof(ctypes.c_void_p)) self.assertTrue(ckb.ckb.data != ctypes.addressof(ckb.ckb.static_data)) self.assertTrue(ckb.ckb.capacity >= initial_capacity + ctypes.sizeof(_lowlevel.CKernelPrefixStruct)) def test_reset(self): with _lowlevel.ckernel.CKernelBuilder() as ckb: initial_capacity = 16 * ctypes.sizeof(ctypes.c_void_p) # put the ckernel builder in a non-initial state ckb.ensure_capacity_leaf(initial_capacity + 16) self.assertTrue(ckb.ckb.data != ctypes.addressof(ckb.ckb.static_data)) # verify that reset puts it back in an initial state ckb.reset() self.assertEqual(ckb.ckb.data, ctypes.addressof(ckb.ckb.static_data)) self.assertEqual(ckb.ckb.capacity, initial_capacity) def test_assignment_ckernel_single(self): with _lowlevel.ckernel.CKernelBuilder() as ckb: _lowlevel.make_assignment_ckernel( ckb, 0, ndt.float32, None, ndt.int64, None, "single") ck = ckb.ckernel(_lowlevel.ExprSingleOperation) # Do an assignment using ctypes i64 = ctypes.c_int64(1234) pi64 = ctypes.pointer(i64) f32 = ctypes.c_float(1) ck(ctypes.addressof(f32), ctypes.addressof(pi64)) self.assertEqual(f32.value, 1234.0) def test_assignment_ckernel_strided(self): with _lowlevel.ckernel.CKernelBuilder() as ckb: _lowlevel.make_assignment_ckernel( ckb, 0, ndt.float32, None, ndt.type('string[15,"A"]'), None, 'strided') ck = ckb.ckernel(_lowlevel.ExprStridedOperation) # Do an assignment using a numpy array src = np.array(['3.25', '-1000', '1e5'], dtype='S15') src_ptr = ctypes.c_void_p(src.ctypes.data) src_stride = c_ssize_t(15) dst = np.arange(3, dtype=np.float32) ck(dst.ctypes.data, 4, ctypes.pointer(src_ptr), ctypes.pointer(src_stride), 3) self.assertEqual(dst.tolist(), [3.25, -1000, 1e5]) if __name__ == '__main__': unittest.main()
MontrealCorpusTools/PolyglotDB
refs/heads/main
tests/test_subannotations.py
4
import pytest from polyglotdb import CorpusContext from polyglotdb.query import Sum from polyglotdb.exceptions import AnnotationAttributeError def test_basic(subannotation_config): with CorpusContext(subannotation_config) as c: q = c.query_graph(c.phone).filter(c.phone.label == 'g') q = q.columns(c.phone.label.column_name('label')) q = q.columns(c.phone.voicing_during_closure.duration.column_name('voicing_during_closure')) q = q.order_by(c.phone.begin) res = q.all() assert (res[0]['label'] == 'g') assert ([round(x, 2) for x in res[0]['voicing_during_closure']] == [0.03, 0.01]) q = c.query_graph(c.phone).filter(c.phone.label == 'g') q = q.order_by(c.phone.begin) q = q.columns(c.phone.label.column_name('label'), Sum(c.phone.voicing_during_closure.duration).column_name('voicing_during_closure')) print(q.cypher()) res = q.all() print(res) print(res[0]) assert (res[0]['label'] == 'g') assert (round(res[0]['voicing_during_closure'], 2) == 0.04) def test_filter(subannotation_config): with CorpusContext(subannotation_config) as c: q = c.query_graph(c.phone) q = q.filter(c.phone.burst.begin == 0) print(q.cypher()) assert (q.all()[0]['label'] == 'k') def test_add_token_label(subannotation_config): with CorpusContext(subannotation_config) as c: q = c.query_graph(c.phone).filter(c.phone.label == 'ae') q.set_properties(aeness='such ae') q.create_subset('ae') c.encode_hierarchy() q = c.query_graph(c.phone).filter(c.phone.aeness == 'such ae') results = q.all() assert (len(results) > 0) assert (results[0]['label'] == 'ae') q = c.query_graph(c.phone).filter(c.phone.aeness == None) print(q.cypher()) results = q.all() assert (len(results) > 0) assert (all(x['label'] != 'ae' for x in results)) q = c.query_graph(c.phone).filter(c.phone.aeness == 'such ae') q.set_properties(aeness=None) c.encode_hierarchy() with pytest.raises(AnnotationAttributeError): q = c.query_graph(c.phone).filter(c.phone.aeness == 'such ae') results = q.all() q = c.query_lexicon(c.lexicon_phone).filter(c.lexicon_phone.label == 't') q.set_properties(tness='such t') q.create_subset('t') c.encode_hierarchy() q = c.query_graph(c.phone.filter_by_subset('t')) results = q.all() assert (len(results) > 0) assert (results[0]['label'] == 't') def test_delete(subannotation_config): with CorpusContext(subannotation_config) as c: q = c.query_graph(c.phone).filter(c.phone.label == 'ae') q.delete() q = c.query_graph(c.phone).filter(c.phone.label == 'ae') results = q.all() assert (len(results) == 0)
maarteninja/ml2014
refs/heads/master
three/minimize.py
1
#This program is distributed WITHOUT ANY WARRANTY; without even the implied #warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # #This file contains a Python version of Carl Rasmussen's Matlab-function #minimize.m # #minimize.m is copyright (C) 1999 - 2006, Carl Edward Rasmussen. #Python adaptation by Roland Memisevic 2008. # # #The following is the original copyright notice that comes with the #function minimize.m #(from http://www.kyb.tuebingen.mpg.de/bs/people/carl/code/minimize/Copyright): # # #"(C) Copyright 1999 - 2006, Carl Edward Rasmussen # #Permission is granted for anyone to copy, use, or modify these #programs and accompanying documents for purposes of research or #education, provided this copyright notice is retained, and note is #made of any changes that have been made. # #These programs and documents are distributed without any warranty, #express or implied. As the programs were written for research #purposes only, they have not been tested to the degree that would be #advisable in any important application. All use of these programs is #entirely at the user's own risk." """minimize.py This module contains a function 'minimize' that performs unconstrained gradient based optimization using nonlinear conjugate gradients. The function is a straightforward Python-translation of Carl Rasmussen's Matlab-function minimize.m """ from numpy import dot, isinf, isnan, any, sqrt, isreal, real, nan, inf def minimize(X, f, grad, args, maxnumlinesearch=None, maxnumfuneval=None, red=1.0, verbose=True): INT = 0.1;# don't reevaluate within 0.1 of the limit of the current bracket EXT = 3.0; # extrapolate maximum 3 times the current step-size MAX = 20; # max 20 function evaluations per line search RATIO = 10; # maximum allowed slope ratio SIG = 0.1;RHO = SIG/2;# SIG and RHO are the constants controlling the Wolfe- #Powell conditions. SIG is the maximum allowed absolute ratio between #previous and new slopes (derivatives in the search direction), thus setting #SIG to low (positive) values forces higher precision in the line-searches. #RHO is the minimum allowed fraction of the expected (from the slope at the #initial point in the linesearch). Constants must satisfy 0 < RHO < SIG < 1. #Tuning of SIG (depending on the nature of the function to be optimized) may #speed up the minimization; it is probably not worth playing much with RHO. SMALL = 10.**-16 #minimize.m uses matlab's realmin if maxnumlinesearch == None: if maxnumfuneval == None: raise "Specify maxnumlinesearch or maxnumfuneval" else: S = 'Function evaluation' length = maxnumfuneval else: if maxnumfuneval != None: raise "Specify either maxnumlinesearch or maxnumfuneval (not both)" else: S = 'Linesearch' length = maxnumlinesearch i = 0 # zero the run length counter ls_failed = 0 # no previous line search has failed f0 = f(X, *args) # get function value and gradient df0 = grad(X, *args) fX = [f0] i = i + (length<0) # count epochs?! s = -df0; d0 = -dot(s,s) # initial search direction (steepest) and slope x3 = red/(1.0-d0) # initial step is red/(|s|+1) while i < abs(length): # while not finished i = i + (length>0) # count iterations?! X0 = X; F0 = f0; dF0 = df0 # make a copy of current values if length>0: M = MAX else: M = min(MAX, -length-i) while 1: # keep extrapolating as long as necessary x2 = 0; f2 = f0; d2 = d0; f3 = f0; df3 = df0 success = 0 while (not success) and (M > 0): try: M = M - 1; i = i + (length<0) # count epochs?! f3 = f(X+x3*s, *args) df3 = grad(X+x3*s, *args) if isnan(f3) or isinf(f3) or any(isnan(df3)+isinf(df3)): print "an error in minimize error" print "f3 = ", f3 print "df3 = ", df3 return success = 1 except: # catch any error which occured in f x3 = (x2+x3)/2 # bisect and try again if f3 < F0: X0 = X+x3*s; F0 = f3; dF0 = df3 # keep best values d3 = dot(df3,s) # new slope if d3 > SIG*d0 or f3 > f0+x3*RHO*d0 or M == 0: # are we done extrapolating? break x1 = x2; f1 = f2; d1 = d2 # move point 2 to point 1 x2 = x3; f2 = f3; d2 = d3 # move point 3 to point 2 A = 6*(f1-f2)+3*(d2+d1)*(x2-x1) # make cubic extrapolation B = 3*(f2-f1)-(2*d1+d2)*(x2-x1) Z = B+sqrt(complex(B*B-A*d1*(x2-x1))) if Z != 0.0: x3 = x1-d1*(x2-x1)**2/Z # num. error possible, ok! else: x3 = inf if (not isreal(x3)) or isnan(x3) or isinf(x3) or (x3 < 0): # num prob | wrong sign? x3 = x2*EXT # extrapolate maximum amount elif x3 > x2*EXT: # new point beyond extrapolation limit? x3 = x2*EXT # extrapolate maximum amount elif x3 < x2+INT*(x2-x1): # new point too close to previous point? x3 = x2+INT*(x2-x1) x3 = real(x3) while (abs(d3) > -SIG*d0 or f3 > f0+x3*RHO*d0) and M > 0: # keep interpolating if (d3 > 0) or (f3 > f0+x3*RHO*d0): # choose subinterval x4 = x3; f4 = f3; d4 = d3 # move point 3 to point 4 else: x2 = x3; f2 = f3; d2 = d3 # move point 3 to point 2 if f4 > f0: x3 = x2-(0.5*d2*(x4-x2)**2)/(f4-f2-d2*(x4-x2)) # quadratic interpolation else: A = 6*(f2-f4)/(x4-x2)+3*(d4+d2) # cubic interpolation B = 3*(f4-f2)-(2*d2+d4)*(x4-x2) if A != 0: x3=x2+(sqrt(B*B-A*d2*(x4-x2)**2)-B)/A # num. error possible, ok! else: x3 = inf if isnan(x3) or isinf(x3): x3 = (x2+x4)/2 # if we had a numerical problem then bisect x3 = max(min(x3, x4-INT*(x4-x2)),x2+INT*(x4-x2)) # don't accept too close f3 = f(X+x3*s, *args) df3 = grad(X+x3*s, *args) if f3 < F0: X0 = X+x3*s; F0 = f3; dF0 = df3 # keep best values M = M - 1; i = i + (length<0) # count epochs?! d3 = dot(df3,s) # new slope if abs(d3) < -SIG*d0 and f3 < f0+x3*RHO*d0: # if line search succeeded X = X+x3*s; f0 = f3; fX.append(f0) # update variables if verbose: print '%s %6i; Value %4.6e\r' % (S, i, f0) s = (dot(df3,df3)-dot(df0,df3))/dot(df0,df0)*s - df3 # Polack-Ribiere CG direction df0 = df3 # swap derivatives d3 = d0; d0 = dot(df0,s) if d0 > 0: # new slope must be negative s = -df0; d0 = -dot(s,s) # otherwise use steepest direction x3 = x3 * min(RATIO, d3/(d0-SMALL)) # slope ratio but max RATIO ls_failed = 0 # this line search did not fail else: X = X0; f0 = F0; df0 = dF0 # restore best point so far if ls_failed or (i>abs(length)):# line search failed twice in a row break # or we ran out of time, so we give up s = -df0; d0 = -dot(s,s) # try steepest x3 = 1/(1-d0) ls_failed = 1 # this line search failed if verbose: print "\n" return X, fX, i
edwardzhou1980/bite-project
refs/heads/master
server/root.py
17
# Copyright 2012 Google Inc. All Rights Reserved. """Server root.""" __author__ = 'jason.stredwick@gmail.com (Jason Stredwick)' import os ROOT = os.path.dirname(__file__)
s3nk4s/flaskTutorials
refs/heads/master
FlaskApp/FlaskApp/venv/local/lib/python2.7/site-packages/setuptools/svn_utils.py
98
import os import re import sys from distutils import log import xml.dom.pulldom import shlex import locale import codecs import unicodedata import warnings from setuptools.compat import unicode from setuptools.py31compat import TemporaryDirectory from xml.sax.saxutils import unescape try: import urlparse except ImportError: import urllib.parse as urlparse from subprocess import Popen as _Popen, PIPE as _PIPE #NOTE: Use of the command line options require SVN 1.3 or newer (December 2005) # and SVN 1.3 hasn't been supported by the developers since mid 2008. #subprocess is called several times with shell=(sys.platform=='win32') #see the follow for more information: # http://bugs.python.org/issue8557 # http://stackoverflow.com/questions/5658622/ # python-subprocess-popen-environment-path def _run_command(args, stdout=_PIPE, stderr=_PIPE, encoding=None, stream=0): #regarding the shell argument, see: http://bugs.python.org/issue8557 try: proc = _Popen(args, stdout=stdout, stderr=stderr, shell=(sys.platform == 'win32')) data = proc.communicate()[stream] except OSError: return 1, '' #doubled checked and data = decode_as_string(data, encoding) #communciate calls wait() return proc.returncode, data def _get_entry_schedule(entry): schedule = entry.getElementsByTagName('schedule')[0] return "".join([t.nodeValue for t in schedule.childNodes if t.nodeType == t.TEXT_NODE]) def _get_target_property(target): property_text = target.getElementsByTagName('property')[0] return "".join([t.nodeValue for t in property_text.childNodes if t.nodeType == t.TEXT_NODE]) def _get_xml_data(decoded_str): if sys.version_info < (3, 0): #old versions want an encoded string data = decoded_str.encode('utf-8') else: data = decoded_str return data def joinpath(prefix, *suffix): if not prefix or prefix == '.': return os.path.join(*suffix) return os.path.join(prefix, *suffix) def determine_console_encoding(): try: #try for the preferred encoding encoding = locale.getpreferredencoding() #see if the locale.getdefaultlocale returns null #some versions of python\platforms return US-ASCII #when it cannot determine an encoding if not encoding or encoding == "US-ASCII": encoding = locale.getdefaultlocale()[1] if encoding: codecs.lookup(encoding) # make sure a lookup error is not made except (locale.Error, LookupError): encoding = None is_osx = sys.platform == "darwin" if not encoding: return ["US-ASCII", "utf-8"][is_osx] elif encoding.startswith("mac-") and is_osx: #certain versions of python would return mac-roman as default #OSX as a left over of earlier mac versions. return "utf-8" else: return encoding _console_encoding = determine_console_encoding() def decode_as_string(text, encoding=None): """ Decode the console or file output explicitly using getpreferredencoding. The text paraemeter should be a encoded string, if not no decode occurs If no encoding is given, getpreferredencoding is used. If encoding is specified, that is used instead. This would be needed for SVN --xml output. Unicode is explicitly put in composed NFC form. --xml should be UTF-8 (SVN Issue 2938) the discussion on the Subversion DEV List from 2007 seems to indicate the same. """ #text should be a byte string if encoding is None: encoding = _console_encoding if not isinstance(text, unicode): text = text.decode(encoding) text = unicodedata.normalize('NFC', text) return text def parse_dir_entries(decoded_str): '''Parse the entries from a recursive info xml''' doc = xml.dom.pulldom.parseString(_get_xml_data(decoded_str)) entries = list() for event, node in doc: if event == 'START_ELEMENT' and node.nodeName == 'entry': doc.expandNode(node) if not _get_entry_schedule(node).startswith('delete'): entries.append((node.getAttribute('path'), node.getAttribute('kind'))) return entries[1:] # do not want the root directory def parse_externals_xml(decoded_str, prefix=''): '''Parse a propget svn:externals xml''' prefix = os.path.normpath(prefix) prefix = os.path.normcase(prefix) doc = xml.dom.pulldom.parseString(_get_xml_data(decoded_str)) externals = list() for event, node in doc: if event == 'START_ELEMENT' and node.nodeName == 'target': doc.expandNode(node) path = os.path.normpath(node.getAttribute('path')) if os.path.normcase(path).startswith(prefix): path = path[len(prefix)+1:] data = _get_target_property(node) #data should be decoded already for external in parse_external_prop(data): externals.append(joinpath(path, external)) return externals # do not want the root directory def parse_external_prop(lines): """ Parse the value of a retrieved svn:externals entry. possible token setups (with quotng and backscaping in laters versions) URL[@#] EXT_FOLDERNAME [-r#] URL EXT_FOLDERNAME EXT_FOLDERNAME [-r#] URL """ externals = [] for line in lines.splitlines(): line = line.lstrip() # there might be a "\ " if not line: continue if sys.version_info < (3, 0): #shlex handles NULLs just fine and shlex in 2.7 tries to encode #as ascii automatiically line = line.encode('utf-8') line = shlex.split(line) if sys.version_info < (3, 0): line = [x.decode('utf-8') for x in line] #EXT_FOLDERNAME is either the first or last depending on where #the URL falls if urlparse.urlsplit(line[-1])[0]: external = line[0] else: external = line[-1] external = decode_as_string(external, encoding="utf-8") externals.append(os.path.normpath(external)) return externals def parse_prop_file(filename, key): found = False f = open(filename, 'rt') data = '' try: for line in iter(f.readline, ''): # can't use direct iter! parts = line.split() if len(parts) == 2: kind, length = parts data = f.read(int(length)) if kind == 'K' and data == key: found = True elif kind == 'V' and found: break finally: f.close() return data class SvnInfo(object): ''' Generic svn_info object. No has little knowledge of how to extract information. Use cls.load to instatiate according svn version. Paths are not filesystem encoded. ''' @staticmethod def get_svn_version(): # Temp config directory should be enough to check for repository # This is needed because .svn always creates .subversion and # some operating systems do not handle dot directory correctly. # Real queries in real svn repos with be concerned with it creation with TemporaryDirectory() as tempdir: code, data = _run_command(['svn', '--config-dir', tempdir, '--version', '--quiet']) if code == 0 and data: return data.strip() else: return '' #svnversion return values (previous implementations return max revision) # 4123:4168 mixed revision working copy # 4168M modified working copy # 4123S switched working copy # 4123:4168MS mixed revision, modified, switched working copy revision_re = re.compile(r'(?:([\-0-9]+):)?(\d+)([a-z]*)\s*$', re.I) @classmethod def load(cls, dirname=''): normdir = os.path.normpath(dirname) # Temp config directory should be enough to check for repository # This is needed because .svn always creates .subversion and # some operating systems do not handle dot directory correctly. # Real queries in real svn repos with be concerned with it creation with TemporaryDirectory() as tempdir: code, data = _run_command(['svn', '--config-dir', tempdir, 'info', normdir]) # Must check for some contents, as some use empty directories # in testcases, however only enteries is needed also the info # command above MUST have worked svn_dir = os.path.join(normdir, '.svn') is_svn_wd = (not code or os.path.isfile(os.path.join(svn_dir, 'entries'))) svn_version = tuple(cls.get_svn_version().split('.')) try: base_svn_version = tuple(int(x) for x in svn_version[:2]) except ValueError: base_svn_version = tuple() if not is_svn_wd: #return an instance of this NO-OP class return SvnInfo(dirname) if code or not base_svn_version or base_svn_version < (1, 3): warnings.warn(("No SVN 1.3+ command found: falling back " "on pre 1.7 .svn parsing"), DeprecationWarning) return SvnFileInfo(dirname) if base_svn_version < (1, 5): return Svn13Info(dirname) return Svn15Info(dirname) def __init__(self, path=''): self.path = path self._entries = None self._externals = None def get_revision(self): 'Retrieve the directory revision informatino using svnversion' code, data = _run_command(['svnversion', '-c', self.path]) if code: log.warn("svnversion failed") return 0 parsed = self.revision_re.match(data) if parsed: return int(parsed.group(2)) else: return 0 @property def entries(self): if self._entries is None: self._entries = self.get_entries() return self._entries @property def externals(self): if self._externals is None: self._externals = self.get_externals() return self._externals def iter_externals(self): ''' Iterate over the svn:external references in the repository path. ''' for item in self.externals: yield item def iter_files(self): ''' Iterate over the non-deleted file entries in the repository path ''' for item, kind in self.entries: if kind.lower() == 'file': yield item def iter_dirs(self, include_root=True): ''' Iterate over the non-deleted file entries in the repository path ''' if include_root: yield self.path for item, kind in self.entries: if kind.lower() == 'dir': yield item def get_entries(self): return [] def get_externals(self): return [] class Svn13Info(SvnInfo): def get_entries(self): code, data = _run_command(['svn', 'info', '-R', '--xml', self.path], encoding="utf-8") if code: log.debug("svn info failed") return [] return parse_dir_entries(data) def get_externals(self): #Previous to 1.5 --xml was not supported for svn propget and the -R #output format breaks the shlex compatible semantics. cmd = ['svn', 'propget', 'svn:externals'] result = [] for folder in self.iter_dirs(): code, lines = _run_command(cmd + [folder], encoding="utf-8") if code != 0: log.warn("svn propget failed") return [] #lines should a str for external in parse_external_prop(lines): if folder: external = os.path.join(folder, external) result.append(os.path.normpath(external)) return result class Svn15Info(Svn13Info): def get_externals(self): cmd = ['svn', 'propget', 'svn:externals', self.path, '-R', '--xml'] code, lines = _run_command(cmd, encoding="utf-8") if code: log.debug("svn propget failed") return [] return parse_externals_xml(lines, prefix=os.path.abspath(self.path)) class SvnFileInfo(SvnInfo): def __init__(self, path=''): super(SvnFileInfo, self).__init__(path) self._directories = None self._revision = None def _walk_svn(self, base): entry_file = joinpath(base, '.svn', 'entries') if os.path.isfile(entry_file): entries = SVNEntriesFile.load(base) yield (base, False, entries.parse_revision()) for path in entries.get_undeleted_records(): path = decode_as_string(path) path = joinpath(base, path) if os.path.isfile(path): yield (path, True, None) elif os.path.isdir(path): for item in self._walk_svn(path): yield item def _build_entries(self): entries = list() rev = 0 for path, isfile, dir_rev in self._walk_svn(self.path): if isfile: entries.append((path, 'file')) else: entries.append((path, 'dir')) rev = max(rev, dir_rev) self._entries = entries self._revision = rev def get_entries(self): if self._entries is None: self._build_entries() return self._entries def get_revision(self): if self._revision is None: self._build_entries() return self._revision def get_externals(self): prop_files = [['.svn', 'dir-prop-base'], ['.svn', 'dir-props']] externals = [] for dirname in self.iter_dirs(): prop_file = None for rel_parts in prop_files: filename = joinpath(dirname, *rel_parts) if os.path.isfile(filename): prop_file = filename if prop_file is not None: ext_prop = parse_prop_file(prop_file, 'svn:externals') #ext_prop should be utf-8 coming from svn:externals ext_prop = decode_as_string(ext_prop, encoding="utf-8") externals.extend(parse_external_prop(ext_prop)) return externals def svn_finder(dirname=''): #combined externals due to common interface #combined externals and entries due to lack of dir_props in 1.7 info = SvnInfo.load(dirname) for path in info.iter_files(): yield path for path in info.iter_externals(): sub_info = SvnInfo.load(path) for sub_path in sub_info.iter_files(): yield sub_path class SVNEntriesFile(object): def __init__(self, data): self.data = data @classmethod def load(class_, base): filename = os.path.join(base, '.svn', 'entries') f = open(filename) try: result = SVNEntriesFile.read(f) finally: f.close() return result @classmethod def read(class_, fileobj): data = fileobj.read() is_xml = data.startswith('<?xml') class_ = [SVNEntriesFileText, SVNEntriesFileXML][is_xml] return class_(data) def parse_revision(self): all_revs = self.parse_revision_numbers() + [0] return max(all_revs) class SVNEntriesFileText(SVNEntriesFile): known_svn_versions = { '1.4.x': 8, '1.5.x': 9, '1.6.x': 10, } def __get_cached_sections(self): return self.sections def get_sections(self): SECTION_DIVIDER = '\f\n' sections = self.data.split(SECTION_DIVIDER) sections = [x for x in map(str.splitlines, sections)] try: # remove the SVN version number from the first line svn_version = int(sections[0].pop(0)) if not svn_version in self.known_svn_versions.values(): log.warn("Unknown subversion verson %d", svn_version) except ValueError: return self.sections = sections self.get_sections = self.__get_cached_sections return self.sections def is_valid(self): return bool(self.get_sections()) def get_url(self): return self.get_sections()[0][4] def parse_revision_numbers(self): revision_line_number = 9 rev_numbers = [ int(section[revision_line_number]) for section in self.get_sections() if (len(section) > revision_line_number and section[revision_line_number]) ] return rev_numbers def get_undeleted_records(self): undeleted = lambda s: s and s[0] and (len(s) < 6 or s[5] != 'delete') result = [ section[0] for section in self.get_sections() if undeleted(section) ] return result class SVNEntriesFileXML(SVNEntriesFile): def is_valid(self): return True def get_url(self): "Get repository URL" urlre = re.compile('url="([^"]+)"') return urlre.search(self.data).group(1) def parse_revision_numbers(self): revre = re.compile(r'committed-rev="(\d+)"') return [ int(m.group(1)) for m in revre.finditer(self.data) ] def get_undeleted_records(self): entries_pattern = \ re.compile(r'name="([^"]+)"(?![^>]+deleted="true")', re.I) results = [ unescape(match.group(1)) for match in entries_pattern.finditer(self.data) ] return results if __name__ == '__main__': for name in svn_finder(sys.argv[1]): print(name)
mozilla/inventory
refs/heads/master
vendor-local/src/django-multidb-router/docs/conf.py
3
import sys import os sys.path.append(os.path.abspath('..')) # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' extensions = ['sphinx.ext.autodoc'] # General information about the project. project = u'multidb-router' copyright = u'2010, The Zamboni Collective' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.2' # The full version, including alpha/beta/rc tags. release = '0.2' # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The name of the Pygments (syntax highlighting) style to use. #pygments_style = 'sphinx'
gabrielfalcao/lettuce
refs/heads/master
tests/integration/lib/Django-1.2.5/django/conf/locale/pt/formats.py
80
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # DATE_FORMAT = r'j \de F \de Y' TIME_FORMAT = 'H:i:s' DATETIME_FORMAT = r'j \de F \de Y à\s H:i' YEAR_MONTH_FORMAT = r'F \de Y' MONTH_DAY_FORMAT = r'j \de F' SHORT_DATE_FORMAT = 'd/m/Y' SHORT_DATETIME_FORMAT = 'd/m/Y H:i' FIRST_DAY_OF_WEEK = 0 # Sunday DATE_INPUT_FORMATS = ( '%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06' # '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006' # '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006' ) TIME_INPUT_FORMATS = ( '%H:%M:%S', # '14:30:59' '%H:%M', # '14:30' ) DATETIME_INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59' '%d/%m/%Y %H:%M', # '25/10/2006 14:30' '%d/%m/%Y', # '25/10/2006' '%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59' '%d/%m/%y %H:%M', # '25/10/06 14:30' '%d/%m/%y', # '25/10/06' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
leilihh/cinder
refs/heads/master
tools/colorizer.py
36
#!/usr/bin/env python # Copyright (c) 2013, Nebula, Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Colorizer Code is borrowed from Twisted: # Copyright (c) 2001-2010 Twisted Matrix Laboratories. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Display a subunit stream through a colorized unittest test runner.""" import heapq import subunit import sys import unittest import testtools class _AnsiColorizer(object): """ANSI colorizer that wraps a stream object. colorizer is an object that loosely wraps around a stream, allowing callers to write text to the stream in a particular color. Colorizer classes must implement C{supported()} and C{write(text, color)}. """ _colors = dict(black=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37) def __init__(self, stream): self.stream = stream def supported(cls, stream=sys.stdout): """Check if platform is supported. A class method that returns True if the current platform supports coloring terminal output using this method. Returns False otherwise. """ if not stream.isatty(): return False # auto color only on TTYs try: import curses except ImportError: return False else: try: try: return curses.tigetnum("colors") > 2 except curses.error: curses.setupterm() return curses.tigetnum("colors") > 2 except Exception: # guess false in case of error return False supported = classmethod(supported) def write(self, text, color): """Write the given text to the stream in the given color. @param text: Text to be written to the stream. @param color: A string label for a color. e.g. 'red', 'white'. """ color = self._colors[color] self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) class _Win32Colorizer(object): """See _AnsiColorizer docstring.""" def __init__(self, stream): import win32console red, green, blue, bold = (win32console.FOREGROUND_RED, win32console.FOREGROUND_GREEN, win32console.FOREGROUND_BLUE, win32console.FOREGROUND_INTENSITY) self.stream = stream self.screenBuffer = win32console.GetStdHandle( win32console.STD_OUT_HANDLE) self._colors = { 'normal': red | green | blue, 'red': red | bold, 'green': green | bold, 'blue': blue | bold, 'yellow': red | green | bold, 'magenta': red | blue | bold, 'cyan': green | blue | bold, 'white': red | green | blue | bold } def supported(cls, stream=sys.stdout): try: import win32console screenBuffer = win32console.GetStdHandle( win32console.STD_OUT_HANDLE) except ImportError: return False import pywintypes try: screenBuffer.SetConsoleTextAttribute( win32console.FOREGROUND_RED | win32console.FOREGROUND_GREEN | win32console.FOREGROUND_BLUE) except pywintypes.error: return False else: return True supported = classmethod(supported) def write(self, text, color): color = self._colors[color] self.screenBuffer.SetConsoleTextAttribute(color) self.stream.write(text) self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) class _NullColorizer(object): """See _AnsiColorizer docstring.""" def __init__(self, stream): self.stream = stream def supported(cls, stream=sys.stdout): return True supported = classmethod(supported) def write(self, text, color): self.stream.write(text) def get_elapsed_time_color(elapsed_time): if elapsed_time > 1.0: return 'red' elif elapsed_time > 0.25: return 'yellow' else: return 'green' class NovaTestResult(testtools.TestResult): def __init__(self, stream, descriptions, verbosity): super(NovaTestResult, self).__init__() self.stream = stream self.showAll = verbosity > 1 self.num_slow_tests = 10 self.slow_tests = [] # this is a fixed-sized heap self.colorizer = None # NOTE(vish): reset stdout for the terminal check stdout = sys.stdout sys.stdout = sys.__stdout__ for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: if colorizer.supported(): self.colorizer = colorizer(self.stream) break sys.stdout = stdout self.start_time = None self.last_time = {} self.results = {} self.last_written = None def _writeElapsedTime(self, elapsed): color = get_elapsed_time_color(elapsed) self.colorizer.write(" %.2f" % elapsed, color) def _addResult(self, test, *args): try: name = test.id() except AttributeError: name = 'Unknown.unknown' test_class, test_name = name.rsplit('.', 1) elapsed = (self._now() - self.start_time).total_seconds() item = (elapsed, test_class, test_name) if len(self.slow_tests) >= self.num_slow_tests: heapq.heappushpop(self.slow_tests, item) else: heapq.heappush(self.slow_tests, item) self.results.setdefault(test_class, []) self.results[test_class].append((test_name, elapsed) + args) self.last_time[test_class] = self._now() self.writeTests() def _writeResult(self, test_name, elapsed, long_result, color, short_result, success): if self.showAll: self.stream.write(' %s' % str(test_name).ljust(66)) self.colorizer.write(long_result, color) if success: self._writeElapsedTime(elapsed) self.stream.writeln() else: self.colorizer.write(short_result, color) def addSuccess(self, test): super(NovaTestResult, self).addSuccess(test) self._addResult(test, 'OK', 'green', '.', True) def addFailure(self, test, err): if test.id() == 'process-returncode': return super(NovaTestResult, self).addFailure(test, err) self._addResult(test, 'FAIL', 'red', 'F', False) def addError(self, test, err): super(NovaTestResult, self).addFailure(test, err) self._addResult(test, 'ERROR', 'red', 'E', False) def addSkip(self, test, reason=None, details=None): super(NovaTestResult, self).addSkip(test, reason, details) self._addResult(test, 'SKIP', 'blue', 'S', True) def startTest(self, test): self.start_time = self._now() super(NovaTestResult, self).startTest(test) def writeTestCase(self, cls): if not self.results.get(cls): return if cls != self.last_written: self.colorizer.write(cls, 'white') self.stream.writeln() for result in self.results[cls]: self._writeResult(*result) del self.results[cls] self.stream.flush() self.last_written = cls def writeTests(self): time = self.last_time.get(self.last_written, self._now()) if not self.last_written or (self._now() - time).total_seconds() > 2.0: diff = 3.0 while diff > 2.0: classes = self.results.keys() oldest = min(classes, key=lambda x: self.last_time[x]) diff = (self._now() - self.last_time[oldest]).total_seconds() self.writeTestCase(oldest) else: self.writeTestCase(self.last_written) def done(self): self.stopTestRun() def stopTestRun(self): for cls in list(self.results): self.writeTestCase(cls) self.stream.writeln() self.writeSlowTests() def writeSlowTests(self): # Pare out 'fast' tests slow_tests = [item for item in self.slow_tests if get_elapsed_time_color(item[0]) != 'green'] if slow_tests: slow_total_time = sum(item[0] for item in slow_tests) slow = ("Slowest %i tests took %.2f secs:" % (len(slow_tests), slow_total_time)) self.colorizer.write(slow, 'yellow') self.stream.writeln() last_cls = None # sort by name for elapsed, cls, name in sorted(slow_tests, key=lambda x: x[1] + x[2]): if cls != last_cls: self.colorizer.write(cls, 'white') self.stream.writeln() last_cls = cls self.stream.write(' %s' % str(name).ljust(68)) self._writeElapsedTime(elapsed) self.stream.writeln() def printErrors(self): if self.showAll: self.stream.writeln() self.printErrorList('ERROR', self.errors) self.printErrorList('FAIL', self.failures) def printErrorList(self, flavor, errors): for test, err in errors: self.colorizer.write("=" * 70, 'red') self.stream.writeln() self.colorizer.write(flavor, 'red') self.stream.writeln(": %s" % test.id()) self.colorizer.write("-" * 70, 'red') self.stream.writeln() self.stream.writeln("%s" % err) test = subunit.ProtocolTestCase(sys.stdin, passthrough=None) if sys.version_info[0:2] <= (2, 6): runner = unittest.TextTestRunner(verbosity=2) else: runner = unittest.TextTestRunner(verbosity=2, resultclass=NovaTestResult) if runner.run(test).wasSuccessful(): exit_code = 0 else: exit_code = 1 sys.exit(exit_code)
mateor/betarepo
refs/heads/master
src/python/betarepo/datastructures/linear/concrete/stacks/dynamic_array_stack.py
1
# coding=utf-8 # The MIT License (MIT) # Copyright (c) 2016 mateor from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) from betarepo.datastructures.linear.concrete.stacks.fixed_array_stack import FixedArrayStack from betarepo.datastructures.linear.abstract.stack import Stack, StackException DEFAULT_GROWTH_FACTOR = 1.5 class DynamicArrayStack(Stack): """ Overallocate the array if pushing an item to an already full stack. The array size grows by a factor of a, and inserting n elements takes O(n) time overall - so each insertion is constant amortized time. """ # TOD)(mateo): Refactor in a major way. This is too much code for this idea. # The issue is that this models best if the DynamicArray implements a a wrapper around the FixedArray. # BUt then it requires all these method wrapper - there is a better way. # Maybe a decorator - although I don't want it adding noise perf analysis. # TODO(deallocate - must be a threshold less than 1/a to proovide hysteresis) def __init__(self, capacity=None, growth_factor=None): self.growth_factor = growth_factor or DEFAULT_GROWTH_FACTOR self.stack = FixedArrayStack(capacity) def __len__(self): return len(self.stack) def __repr__(self): return repr(self.stack) def __getitem__(self,index): return self.stack[index] def __iter__(self): i = 0 while i < len(self.stack): yield self.stack[i] i += 1 @property def size(self): return self.stack.size @property def capacity(self): return self.stack.capacity def push(self, value): try: self.stack.push(value) except StackException: self.resize() self.push(value) def pop(self): return self.stack.pop() def is_empty(self): return self.stack.is_empty() def make_empty(self): return self.stack.make_empty() def resize(self): new_size = int(self.stack.capacity * self.growth_factor) resized_stack = FixedArrayStack(capacity=new_size) for i in self.stack: resized_stack.push(i) self.stack = resized_stack
hachard/Cra-Magnet
refs/heads/master
flask/lib/python3.5/site-packages/bs4/tests/test_docs.py
607
"Test harness for doctests." # pylint: disable-msg=E0611,W0142 __metaclass__ = type __all__ = [ 'additional_tests', ] import atexit import doctest import os #from pkg_resources import ( # resource_filename, resource_exists, resource_listdir, cleanup_resources) import unittest DOCTEST_FLAGS = ( doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) # def additional_tests(): # "Run the doc tests (README.txt and docs/*, if any exist)" # doctest_files = [ # os.path.abspath(resource_filename('bs4', 'README.txt'))] # if resource_exists('bs4', 'docs'): # for name in resource_listdir('bs4', 'docs'): # if name.endswith('.txt'): # doctest_files.append( # os.path.abspath( # resource_filename('bs4', 'docs/%s' % name))) # kwargs = dict(module_relative=False, optionflags=DOCTEST_FLAGS) # atexit.register(cleanup_resources) # return unittest.TestSuite(( # doctest.DocFileSuite(*doctest_files, **kwargs)))
VictoriousCoin/Victorious-Coin
refs/heads/master
share/qt/extract_strings_qt.py
1294
#!/usr/bin/python ''' Extract _("...") strings for translation and convert to Qt4 stringdefs so that they can be picked up by Qt linguist. ''' from subprocess import Popen, PIPE import glob OUT_CPP="src/qt/bitcoinstrings.cpp" EMPTY=['""'] def parse_po(text): """ Parse 'po' format produced by xgettext. Return a list of (msgid,msgstr) tuples. """ messages = [] msgid = [] msgstr = [] in_msgid = False in_msgstr = False for line in text.split('\n'): line = line.rstrip('\r') if line.startswith('msgid '): if in_msgstr: messages.append((msgid, msgstr)) in_msgstr = False # message start in_msgid = True msgid = [line[6:]] elif line.startswith('msgstr '): in_msgid = False in_msgstr = True msgstr = [line[7:]] elif line.startswith('"'): if in_msgid: msgid.append(line) if in_msgstr: msgstr.append(line) if in_msgstr: messages.append((msgid, msgstr)) return messages files = glob.glob('src/*.cpp') + glob.glob('src/*.h') # xgettext -n --keyword=_ $FILES child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE) (out, err) = child.communicate() messages = parse_po(out) f = open(OUT_CPP, 'w') f.write("""#include <QtGlobal> // Automatically generated by extract_strings.py #ifdef __GNUC__ #define UNUSED __attribute__((unused)) #else #define UNUSED #endif """) f.write('static const char UNUSED *bitcoin_strings[] = {') for (msgid, msgstr) in messages: if msgid != EMPTY: f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid))) f.write('};') f.close()
dongjiaqiang/cassandra
refs/heads/trunk
pylib/cqlshlib/helptopics.py
10
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .cql3handling import simple_cql_types class CQLHelpTopics(object): def get_help_topics(self): return [t[5:] for t in dir(self) if t.startswith('help_')] def print_help_topic(self, topic): getattr(self, 'help_' + topic.lower())() def help_types(self): print "\n CQL types recognized by this version of cqlsh:\n" for t in simple_cql_types: print ' ' + t print """ For information on the various recognizable input formats for these types, or on controlling the formatting of cqlsh query output, see one of the following topics: HELP TIMESTAMP_INPUT HELP DATE_INPUT HELP TIME_INPUT HELP BLOB_INPUT HELP UUID_INPUT HELP BOOLEAN_INPUT HELP INT_INPUT HELP TEXT_OUTPUT HELP TIMESTAMP_OUTPUT """ def help_timestamp_input(self): print """ Timestamp input CQL supports any of the following ISO 8601 formats for timestamp specification: yyyy-mm-dd HH:mm yyyy-mm-dd HH:mm:ss yyyy-mm-dd HH:mmZ yyyy-mm-dd HH:mm:ssZ yyyy-mm-dd'T'HH:mm yyyy-mm-dd'T'HH:mmZ yyyy-mm-dd'T'HH:mm:ss yyyy-mm-dd'T'HH:mm:ssZ yyyy-mm-dd yyyy-mm-ddZ The Z in these formats refers to an RFC-822 4-digit time zone, expressing the time zone's difference from UTC. For example, a timestamp in Pacific Standard Time might be given thus: 2012-01-20 16:14:12-0800 If no time zone is supplied, the current time zone for the Cassandra server node will be used. """ def help_date_input(self): print """ Date input CQL supports the following format for date specification: yyyy-mm-dd """ def help_time_input(self): print """ Time input CQL supports the following format for time specification: HH:MM:SS HH:MM:SS.mmm HH:MM:SS.mmmuuu HH:MM:SS.mmmuuunnn """ def help_blob_input(self): print """ Blob input CQL blob data must be specified in a string literal as hexidecimal data. Example: to store the ASCII values for the characters in the string "CQL", use '43514c'. """ def help_uuid_input(self): print """ UUID input UUIDs may be specified in CQL using 32 hexidecimal characters, split up using dashes in the standard UUID format: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX """ def help_boolean_input(self): print """ Boolean input CQL accepts the strings 'true' and 'false' (case insensitive) as input for boolean types. """ def help_int_input(self): print """ Integer input CQL accepts the following integer types: tinyint - 1-byte signed integer smallint - 2-byte signed integer int - 4-byte signed integer bigint - 8-byte signed integer """ def help_timestamp_output(self): print """ Timestamp output Cqlsh will display timestamps in the following format by default: yyyy-mm-dd HH:mm:ssZ which is a format acceptable as CQL timestamp input as well. The output format can be changed by setting 'time_format' property in the [ui] section of .cqlshrc file. """ def help_text_output(self): print """ Textual output When control characters, or other characters which can't be encoded in your current locale, are found in values of 'text' or 'ascii' types, it will be shown as a backslash escape. If color is enabled, any such backslash escapes will be shown in a different color from the surrounding text. Unicode code points in your data will be output intact, if the encoding for your locale is capable of decoding them. If you prefer that non-ascii characters be shown with Python-style "\\uABCD" escape sequences, invoke cqlsh with an ASCII locale (for example, by setting the $LANG environment variable to "C"). """ help_ascii_output = help_text_output def help_create_index(self): print """ CREATE INDEX [<indexname>] ON <cfname> ( <colname> ); A CREATE INDEX statement is used to create a new, automatic secondary index on the given CQL table, for the named column. A name for the index itself can be specified before the ON keyword, if desired. A single column name must be specified inside the parentheses. It is not necessary for the column to exist on any current rows (Cassandra is schema-optional), but the column must already have a type (specified during the CREATE TABLE, or added afterwards with ALTER TABLE). """ def help_drop(self): print """ There are different variants of DROP. For more information, see one of the following: HELP DROP_KEYSPACE; HELP DROP_TABLE; HELP DROP_INDEX; HELP DROP_FUNCTION; HELP DROP_AGGREGATE; """ def help_drop_keyspace(self): print """ DROP KEYSPACE <keyspacename>; A DROP KEYSPACE statement results in the immediate, irreversible removal of a keyspace, including all column families in it, and all data contained in those column families. """ def help_drop_table(self): print """ DROP TABLE <tablename>; A DROP TABLE statement results in the immediate, irreversible removal of a CQL table and the underlying column family, including all data contained in it. """ help_drop_columnfamily = help_drop_table def help_drop_index(self): print """ DROP INDEX <indexname>; A DROP INDEX statement is used to drop an existing secondary index. """ def help_drop_function(self): print """ DROP FUNCTION ( IF EXISTS )? ( <keyspace> '.' )? <function-name> ( '(' <arg-type> ( ',' <arg-type> )* ')' )? DROP FUNCTION statement removes a function created using CREATE FUNCTION. You must specify the argument types (signature) of the function to drop if there are multiple functions with the same name but a different signature (overloaded functions). DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists. """ def help_drop_aggregate(self): print """ DROP AGGREGATE ( IF EXISTS )? ( <keyspace> '.' )? <aggregate-name> ( '(' <arg-type> ( ',' <arg-type> )* ')' )? The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE. You must specify the argument types of the aggregate to drop if there are multiple aggregates with the same name but a different signature (overloaded aggregates). DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists, and does nothing if a function with the signature does not exist. Signatures for user-defined aggregates follow the same rules as for user-defined functions. """ def help_truncate(self): print """ TRUNCATE <tablename>; TRUNCATE accepts a single argument for the table name, and permanently removes all data from it. """ def help_create(self): print """ There are different variants of CREATE. For more information, see one of the following: HELP CREATE_KEYSPACE; HELP CREATE_TABLE; HELP CREATE_INDEX; HELP CREATE_FUNCTION; HELP CREATE_AGGREGATE; """ def help_use(self): print """ USE <keyspacename>; Tells cqlsh and the connected Cassandra instance that you will be working in the given keyspace. All subsequent operations on tables or indexes will be in the context of this keyspace, unless otherwise specified, until another USE command is issued or the connection terminates. As always, when a keyspace name does not work as a normal identifier or number, it can be quoted using double quotes. """ def help_create_aggregate(self): print """ CREATE ( OR REPLACE )? AGGREGATE ( IF NOT EXISTS )? ( <keyspace> '.' )? <aggregate-name> '(' <arg-type> ( ',' <arg-type> )* ')' SFUNC ( <keyspace> '.' )? <state-functionname> STYPE <state-type> ( FINALFUNC ( <keyspace> '.' )? <final-functionname> )? ( INITCOND <init-cond> )? CREATE AGGREGATE creates or replaces a user-defined aggregate. CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate or replaces an existing one with the same signature. A CREATE AGGREGATE without OR REPLACE fails if an aggregate with the same signature already exists. CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate if it does not already exist. OR REPLACE and IF NOT EXIST cannot be used together. Aggregates belong to a keyspace. If no keyspace is specified in <aggregate-name>, the current keyspace is used (i.e. the keyspace specified using the USE statement). It is not possible to create a user-defined aggregate in one of the system keyspaces. Signatures for user-defined aggregates follow the same rules as for user-defined functions. STYPE defines the type of the state value and must be specified. The optional INITCOND defines the initial state value for the aggregate. It defaults to null. A non-null INITCOND must be specified for state functions that are declared with RETURNS NULL ON NULL INPUT. SFUNC references an existing function to be used as the state modifying function. The type of first argument of the state function must match STYPE. The remaining argument types of the state function must match the argument types of the aggregate function. State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and called with null. The optional FINALFUNC is called just before the aggregate result is returned. It must take only one argument with type STYPE. The return type of the FINALFUNC may be a different type. A final function declared with RETURNS NULL ON NULL INPUT means that the aggregate's return value will be null, if the last state is null. If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE. If a FINALFUNC is defined, it is the return type of that function. """ def help_create_function(self): print """ CREATE ( OR REPLACE )? FUNCTION ( IF NOT EXISTS )? ( <keyspace> '.' )? <function-name> '(' <arg-name> <arg-type> ( ',' <arg-name> <arg-type> )* ')' ( CALLED | RETURNS NULL ) ON NULL INPUT RETURNS <type> LANGUAGE <language> AS <body> CREATE FUNCTION creates or replaces a user-defined function. Signatures are used to distinguish individual functions. The signature consists of: The fully qualified function name - i.e keyspace plus function-name The concatenated list of all argument types Note that keyspace names, function names and argument types are subject to the default naming conventions and case-sensitivity rules. CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or replaces an existing one with the same signature. A CREATE FUNCTION without OR REPLACE fails if a function with the same signature already exists. Behavior on invocation with null values must be defined for each function. There are two options: RETURNS NULL ON NULL INPUT declares that the function will always return null if any of the input arguments is null. CALLED ON NULL INPUT declares that the function will always be executed. If the optional IF NOT EXISTS keywords are used, the function will only be created if another function with the same signature does not exist. OR REPLACE and IF NOT EXIST cannot be used together. Functions belong to a keyspace. If no keyspace is specified in <function-name>, the current keyspace is used (i.e. the keyspace specified using the USE statement). It is not possible to create a user-defined function in one of the system keyspaces. """ def help_create_table(self): print """ CREATE TABLE <cfname> ( <colname> <type> PRIMARY KEY [, <colname> <type> [, ...]] ) [WITH <optionname> = <val> [AND <optionname> = <val> [...]]]; CREATE TABLE statements create a new CQL table under the current keyspace. Valid table names are strings of alphanumeric characters and underscores, which begin with a letter. Each table requires a primary key, which will correspond to the underlying columnfamily key and key validator. It's important to note that the key type you use must be compatible with the partitioner in use. For example, OrderPreservingPartitioner and CollatingOrderPreservingPartitioner both require UTF-8 keys. In cql3 mode, a table can have multiple columns composing the primary key (see HELP COMPOUND_PRIMARY_KEYS). For more information, see one of the following: HELP CREATE_TABLE_TYPES; HELP CREATE_TABLE_OPTIONS; """ help_create_columnfamily = help_create_table def help_compound_primary_keys(self): print """ CREATE TABLE <cfname> ( <partition_key> <type>, <clustering_key1> type, <clustering_key2> type, [, ...]], PRIMARY KEY (<partition_key>, <clustering_key1>, <clustering_key2>); CREATE TABLE allows a primary key composed of multiple columns. When this is the case, specify the columns that take part in the compound key after all columns have been specified. , PRIMARY KEY( <key1>, <key2>, ... ) The partitioning key itself can be a compound key, in which case the first element of the PRIMARY KEY phrase should be parenthesized, as PRIMARY KEY ((<partition_key_part1>, <partition_key_part2>), <clustering_key>) """ def help_create_table_types(self): print """ CREATE TABLE: Specifying column types CREATE ... (KEY <type> PRIMARY KEY, othercol <type>) ... It is possible to assign columns a type during table creation. Columns configured with a type are validated accordingly when a write occurs, and intelligent CQL drivers and interfaces will be able to decode the column values correctly when receiving them. Column types are specified as a parenthesized, comma-separated list of column term and type pairs. See HELP TYPES; for the list of recognized types. """ help_create_columnfamily_types = help_create_table_types def help_create_table_options(self): print """ CREATE TABLE: Specifying columnfamily options CREATE TABLE blah (...) WITH optionname = val AND otheroption = val2; A number of optional keyword arguments can be supplied to control the configuration of a new CQL table, such as the size of the associated row and key caches for the underlying Cassandra columnfamily. Consult your CQL reference for the complete list of options and possible values. """ help_create_columnfamily_options = help_create_table_options def help_alter_alter(self): print """ ALTER TABLE: altering existing typed columns ALTER TABLE addamsFamily ALTER lastKnownLocation TYPE uuid; ALTER TABLE ... ALTER changes the expected storage type for a column. The column must already have a type in the column family metadata. The column may or may not already exist in current rows-- but be aware that no validation of existing data is done. The bytes stored in values for that column will remain unchanged, and if existing data is not deserializable according to the new type, this may cause your CQL driver or interface to report errors. """ def help_alter_add(self): print """ ALTER TABLE: adding a typed column ALTER TABLE addamsFamily ADD gravesite varchar; The ALTER TABLE ... ADD variant adds a typed column to a column family. The column must not already have a type in the column family metadata. See the warnings on HELP ALTER_ALTER regarding the lack of validation of existing data; they apply here as well. """ def help_alter_drop(self): print """ ALTER TABLE: dropping a typed column ALTER TABLE addamsFamily DROP gender; An ALTER TABLE ... DROP statement removes the type of a column from the column family metadata. Note that this does _not_ remove the column from current rows; it just removes the metadata saying that the bytes stored under that column are expected to be deserializable according to a certain type. """ def help_alter_with(self): print """ ALTER TABLE: changing column family properties ALTER TABLE addamsFamily WITH comment = 'Glad to be here!' AND read_repair_chance = 0.2; An ALTER TABLE ... WITH statement makes adjustments to the table properties, as defined when the table was created (see HELP CREATE_TABLE_OPTIONS and your Cassandra documentation for information about the supported parameter names and values). """ def help_delete_columns(self): print """ DELETE: specifying columns DELETE col1, col2, col3 FROM ... Following the DELETE keyword is an optional comma-delimited list of column name terms. When no column names are given, the remove applies to the entire row(s) matched by the WHERE clause. When column names do not parse as valid CQL identifiers, they can be quoted in single quotes (CQL 2) or double quotes (CQL 3). """ def help_delete_where(self): print """ DELETE: specifying rows DELETE ... WHERE keycol = 'some_key_value'; DELETE ... WHERE keycol1 = 'val1' AND keycol2 = 'val2'; DELETE ... WHERE keycol IN (key1, key2); The WHERE clause is used to determine to which row(s) a DELETE applies. The first form allows the specification of a precise row by specifying a particular primary key value (if the primary key has multiple columns, values for each must be given). The second form allows a list of key values to be specified using the IN operator and a parenthesized list of comma-delimited key values. """ def help_update_set(self): print """ UPDATE: Specifying Columns and Row UPDATE ... SET name1 = value1, name2 = value2 WHERE <key> = keyname; UPDATE ... SET name1 = value1, name2 = value2 WHERE <key> IN ('<key1>', '<key2>', ...) Rows are created or updated by supplying column names and values in term assignment format. Multiple columns can be set by separating the name/value pairs using commas. """ def help_update_counters(self): print """ UPDATE: Updating Counter Columns UPDATE ... SET name1 = name1 + <value> ... UPDATE ... SET name1 = name1 - <value> ... Counter columns can be incremented or decremented by an arbitrary numeric value though the assignment of an expression that adds or subtracts the value. """ def help_update_where(self): print """ UPDATE: Selecting rows to update UPDATE ... WHERE <keyname> = <keyval>; UPDATE ... WHERE <keyname> IN (<keyval1>, <keyval2>, ...); UPDATE ... WHERE <keycol1> = <keyval1> AND <keycol2> = <keyval2>; Each update statement requires a precise set of keys to be specified using a WHERE clause. If the table's primary key consists of multiple columns, an explicit value must be given for each for the UPDATE statement to make sense. """ def help_select_table(self): print """ SELECT: Specifying Table SELECT ... FROM [<keyspace>.]<tablename> ... The FROM clause is used to specify the CQL table applicable to a SELECT query. The keyspace in which the table exists can optionally be specified along with the table name, separated by a dot (.). This will not change the current keyspace of the session (see HELP USE). """ help_select_columnfamily = help_select_table def help_select_where(self): print """ SELECT: Filtering rows SELECT ... WHERE <key> = keyname AND name1 = value1 SELECT ... WHERE <key> >= startkey and <key> =< endkey AND name1 = value1 SELECT ... WHERE <key> IN ('<key>', '<key>', '<key>', ...) The WHERE clause provides for filtering the rows that appear in results. The clause can filter on a key name, or range of keys, and in the case of indexed columns, on column values. Key filters are specified using the KEY keyword or key alias name, a relational operator (one of =, >, >=, <, and <=), and a term value. When terms appear on both sides of a relational operator it is assumed the filter applies to an indexed column. With column index filters, the term on the left of the operator is the name, the term on the right is the value to filter _on_. Note: The greater-than and less-than operators (> and <) result in key ranges that are inclusive of the terms. There is no supported notion of "strictly" greater-than or less-than; these operators are merely supported as aliases to >= and <=. """ def help_select_limit(self): print """ SELECT: Limiting results SELECT ... WHERE <clause> [LIMIT n] ... Limiting the number of rows returned can be achieved by adding the LIMIT option to a SELECT expression. LIMIT defaults to 10,000 when left unset. """ class CQL3HelpTopics(CQLHelpTopics): def help_create_keyspace(self): print """ CREATE KEYSPACE <ksname> WITH replication = {'class':'<strategy>' [,'<option>':<val>]}; The CREATE KEYSPACE statement creates a new top-level namespace (aka "keyspace"). Valid names are any string constructed of alphanumeric characters and underscores. Names which do not work as valid identifiers or integers should be quoted as string literals. Properties such as replication strategy and count are specified during creation as key-value pairs in the 'replication' map: class [required]: The name of the replication strategy class which should be used for the new keyspace. Some often-used classes are SimpleStrategy and NetworkTopologyStrategy. other options [optional]: Most strategies require additional arguments which can be supplied as key-value pairs in the 'replication' map. Examples: To create a keyspace with NetworkTopologyStrategy and strategy option of "DC1" with a value of "1" and "DC2" with a value of "2" you would use the following statement: CREATE KEYSPACE <ksname> WITH replication = {'class':'NetworkTopologyStrategy', 'DC1':1, 'DC2':2}; To create a keyspace with SimpleStrategy and "replication_factor" option with a value of "3" you would use this statement: CREATE KEYSPACE <ksname> WITH replication = {'class':'SimpleStrategy', 'replication_factor':3}; """ def help_begin(self): print """ BEGIN [UNLOGGED|COUNTER] BATCH [USING TIMESTAMP <timestamp>] <insert or update or delete statement> ; [ <another insert or update or delete statement ; [...]] APPLY BATCH; BATCH supports setting a client-supplied optional global timestamp which will be used for each of the operations included in the batch. Only data modification statements (specifically, UPDATE, INSERT, and DELETE) are allowed in a BATCH statement. BATCH is _not_ an analogue for SQL transactions. _NOTE: Counter mutations are allowed only within COUNTER batches._ _NOTE: While there are no isolation guarantees, UPDATE queries are atomic within a given record._ """ help_apply = help_begin def help_select(self): print """ SELECT <selectExpr> FROM [<keyspace>.]<table> [WHERE <clause>] [ORDER BY <colname> [DESC]] [LIMIT m]; SELECT is used to read one or more records from a CQL table. It returns a set of rows matching the selection criteria specified. For more information, see one of the following: HELP SELECT_EXPR HELP SELECT_TABLE HELP SELECT_WHERE HELP SELECT_LIMIT """ def help_delete(self): print """ DELETE [<col1> [, <col2>, ...] FROM [<keyspace>.]<tablename> [USING TIMESTAMP <timestamp>] WHERE <keyname> = <keyvalue>; A DELETE is used to perform the removal of one or more columns from one or more rows. Each DELETE statement requires a precise set of row keys to be specified using a WHERE clause and the KEY keyword or key alias. For more information, see one of the following: HELP DELETE_USING HELP DELETE_COLUMNS HELP DELETE_WHERE """ def help_delete_using(self): print """ DELETE: the USING clause DELETE ... USING TIMESTAMP <timestamp>; <timestamp> defines the optional timestamp for the new tombstone record. It must be an integer. Cassandra timestamps are generally specified using milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC). """ def help_update(self): print """ UPDATE [<keyspace>.]<columnFamily> [USING [TIMESTAMP <timestamp>] [AND TTL <timeToLive>]] SET name1 = value1, name2 = value2 WHERE <keycol> = keyval [IF EXISTS]; An UPDATE is used to write one or more columns to a record in a table. No results are returned. The record's primary key must be completely and uniquely specified; that is, if the primary key includes multiple columns, all must be explicitly given in the WHERE clause. Statements begin with the UPDATE keyword followed by the name of the table to be updated. For more information, see one of the following: HELP UPDATE_USING HELP UPDATE_SET HELP UPDATE_COUNTERS HELP UPDATE_WHERE """ def help_update_using(self): print """ UPDATE: the USING clause UPDATE ... USING TIMESTAMP <timestamp>; UPDATE ... USING TTL <timeToLive>; The USING clause allows setting of certain query and data parameters. If multiple parameters need to be set, these may be joined using AND. Example: UPDATE ... USING TTL 43200 AND TIMESTAMP 1351620509603 <timestamp> defines the optional timestamp for the new column value(s). It must be an integer. Cassandra timestamps are generally specified using milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC). <timeToLive> defines the optional time to live (TTL) in seconds for the new column value(s). It must be an integer. """ def help_insert(self): print """ INSERT INTO [<keyspace>.]<tablename> ( <colname1>, <colname2> [, <colname3> [, ...]] ) VALUES ( <colval1>, <colval2> [, <colval3> [, ...]] ) [USING TIMESTAMP <timestamp>] [AND TTL <timeToLive>]; An INSERT is used to write one or more columns to a record in a CQL table. No results are returned. Values for all component columns in the table's primary key must be given. Also, there must be at least one non-primary-key column specified (Cassandra rows are not considered to exist with only a key and no associated columns). Unlike in SQL, the semantics of INSERT and UPDATE are identical. In either case a record is created if none existed before, and udpated when it does. For more information, see one of the following: HELP UPDATE HELP UPDATE_USING """ def help_select_expr(self): print """ SELECT: Specifying Columns SELECT name1, name2, name3 FROM ... SELECT COUNT(*) FROM ... The SELECT expression determines which columns will appear in the results and takes the form of a comma separated list of names. It is worth noting that unlike the projection in a SQL SELECT, there is no guarantee that the results will contain all of the columns specified. This is because Cassandra is schema-less and there are no guarantees that a given column exists. When the COUNT aggregate function is specified as a column to fetch, a single row will be returned, with a single column named "count" whose value is the number of rows from the pre-aggregation resultset. Currently, COUNT is the only function supported by CQL. """ def help_alter_drop(self): print """ ALTER TABLE: dropping a typed column ALTER TABLE addamsFamily DROP gender; An ALTER TABLE ... DROP statement removes the type of a column from the column family metadata. Dropped columns will immediately become unavailable in the queries and will not be included in compacted sstables in the future. If a column is readded, queries won't return values written before the column was last dropped. It is assumed that timestamps represent actual time, so if this is not your case, you should NOT readd previously dropped columns. Columns can't be dropped from tables defined with COMPACT STORAGE. """ def help_create(self): super(CQL3HelpTopics, self).help_create() print """ HELP CREATE_USER; HELP CREATE_ROLE; """ def help_alter(self): print """ ALTER TABLE <tablename> ALTER <columnname> TYPE <type>; ALTER TABLE <tablename> ADD <columnname> <type>; ALTER TABLE <tablename> RENAME <columnname> TO <columnname> [AND <columnname> TO <columnname>] ALTER TABLE <tablename> WITH <optionname> = <val> [AND <optionname> = <val> [...]]; An ALTER statement is used to manipulate table metadata. It allows you to add new typed columns, drop existing columns, change the data storage type of existing columns, or change table properties. No results are returned. See one of the following for more information: HELP ALTER_ALTER; HELP ALTER_ADD; HELP ALTER_DROP; HELP ALTER_RENAME; HELP ALTER_WITH; """ def help_alter_rename(self): print """ ALTER TABLE: renaming a column ALTER TABLE <tablename> RENAME <columnname> TO <columnname> [AND <columnname> TO <columnname>] The ALTER TABLE ... RENAME variant renames a typed column in a column family. """ def help_drop(self): super(CQL3HelpTopics, self).help_create() print """ HELP DROP_USER; HELP DROP_ROLE; """ def help_list(self): print """ There are different variants of LIST. For more information, see one of the following: HELP LIST_USERS; HELP LIST_PERMISSIONS; """ def help_create_user(self): print """ CREATE USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER]; CREATE USER creates a new Cassandra user account. Only superusers can issue CREATE USER requests. To create a superuser account use SUPERUSER option (NOSUPERUSER is the default). WITH PASSWORD clause should only be used with password-based authenticators, e.g. PasswordAuthenticator, SimpleAuthenticator. """ def help_alter_user(self): print """ ALTER USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER]; Use ALTER USER to change a user's superuser status and/or password (only with password-based authenticators). Superusers can change a user's password or superuser status (except their own). Users cannot change their own superuser status. Ordinary users can only change their password (if the configured authenticator is password-based). """ def help_drop_user(self): print """ DROP USER <username>; DROP USER removes an existing user. You have to be logged in as a superuser to issue a DROP USER statement. A user cannot drop themselves. """ def help_list_users(self): print """ LIST USERS; List existing users and their superuser status. """ def help_grant(self): print """ GRANT (<permission> [PERMISSION] | ALL [PERMISSIONS]) ON ALL KEYSPACES | KEYSPACE <keyspace> | [TABLE] [<keyspace>.]<table> TO [ROLE <rolename> | USER <username>] Grant the specified permission (or all permissions) on a resource to a role or user. To be able to grant a permission on some resource you have to have that permission yourself and also AUTHORIZE permission on it, or on one of its parent resources. See HELP PERMISSIONS for more info on the available permissions. """ def help_revoke(self): print """ REVOKE (<permission> [PERMISSION] | ALL [PERMISSIONS]) ON ALL KEYSPACES | KEYSPACE <keyspace> | [TABLE] [<keyspace>.]<table> FROM [ROLE <rolename> | USER <username>] Revokes the specified permission (or all permissions) on a resource from a role or user. To be able to revoke a permission on some resource you have to have that permission yourself and also AUTHORIZE permission on it, or on one of its parent resources. See HELP PERMISSIONS for more info on the available permissions. """ def help_list_permissions(self): print """ LIST (<permission> [PERMISSION] | ALL [PERMISSIONS]) [ON ALL KEYSPACES | KEYSPACE <keyspace> | [TABLE] [<keyspace>.]<table>] [OF [ROLE <rolename> | USER <username>] [NORECURSIVE] Omitting ON <resource> part will list permissions on ALL KEYSPACES, every keyspace and table. Omitting OF [ROLE <rolename> | USER <username>] part will list permissions of all roles and users. Omitting NORECURSIVE specifier will list permissions of the resource and all its parents (table, table's keyspace and ALL KEYSPACES). See HELP PERMISSIONS for more info on the available permissions. """ def help_permissions(self): print """ PERMISSIONS Cassandra has 6 permissions: ALTER: required for ALTER KEYSPCE, ALTER TABLE, CREATE INDEX, DROP INDEX AUTHORIZE: required for GRANT, REVOKE CREATE: required for CREATE KEYSPACE, CREATE TABLE DROP: required for DROP KEYSPACE, DROP TABLE MODIFY: required for INSERT, DELETE, UPDATE, TRUNCATE SELECT: required for SELECT """ def help_create_role(self): print """ CREATE ROLE <rolename>; CREATE ROLE creates a new Cassandra role. Only superusers can issue CREATE ROLE requests. To create a superuser account use SUPERUSER option (NOSUPERUSER is the default). """ def help_drop_role(self): print """ DROP ROLE <rolename>; DROP ROLE removes an existing role. You have to be logged in as a superuser to issue a DROP ROLE statement. """ def help_list_roles(self): print """ LIST ROLES [OF [ROLE <rolename> | USER <username>] [NORECURSIVE]]; Only superusers can use the OF clause to list the roles granted to a role or user. If a superuser omits the OF clause then all the created roles will be listed. If a non-superuser calls LIST ROLES then the roles granted to that user are listed. If NORECURSIVE is provided then only directly granted roles are listed. """ def help_grant_role(self): print """ GRANT ROLE <rolename> TO [ROLE <rolename> | USER <username>] Grant the specified role to another role or user. You have to be logged in as superuser to issue a GRANT ROLE statement. """ def help_revoke_role(self): print """ REVOKE ROLE <rolename> FROM [ROLE <rolename> | USER <username>] Revoke the specified role from another role or user. You have to be logged in as superuser to issue a REVOKE ROLE statement. """
burzillibus/RobHome
refs/heads/master
venv/lib/python2.7/site-packages/click/parser.py
199
# -*- coding: utf-8 -*- """ click.parser ~~~~~~~~~~~~ This module started out as largely a copy paste from the stdlib's optparse module with the features removed that we do not need from optparse because we implement them in Click on a higher level (for instance type handling, help formatting and a lot more). The plan is to remove more and more from here over time. The reason this is a different module and not optparse from the stdlib is that there are differences in 2.x and 3.x about the error messages generated and optparse in the stdlib uses gettext for no good reason and might cause us issues. """ import re from collections import deque from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \ BadArgumentUsage def _unpack_args(args, nargs_spec): """Given an iterable of arguments and an iterable of nargs specifications, it returns a tuple with all the unpacked arguments at the first index and all remaining arguments as the second. The nargs specification is the number of arguments that should be consumed or `-1` to indicate that this position should eat up all the remainders. Missing items are filled with `None`. """ args = deque(args) nargs_spec = deque(nargs_spec) rv = [] spos = None def _fetch(c): try: if spos is None: return c.popleft() else: return c.pop() except IndexError: return None while nargs_spec: nargs = _fetch(nargs_spec) if nargs == 1: rv.append(_fetch(args)) elif nargs > 1: x = [_fetch(args) for _ in range(nargs)] # If we're reversed, we're pulling in the arguments in reverse, # so we need to turn them around. if spos is not None: x.reverse() rv.append(tuple(x)) elif nargs < 0: if spos is not None: raise TypeError('Cannot have two nargs < 0') spos = len(rv) rv.append(None) # spos is the position of the wildcard (star). If it's not `None`, # we fill it with the remainder. if spos is not None: rv[spos] = tuple(args) args = [] rv[spos + 1:] = reversed(rv[spos + 1:]) return tuple(rv), list(args) def _error_opt_args(nargs, opt): if nargs == 1: raise BadOptionUsage('%s option requires an argument' % opt) raise BadOptionUsage('%s option requires %d arguments' % (opt, nargs)) def split_opt(opt): first = opt[:1] if first.isalnum(): return '', opt if opt[1:2] == first: return opt[:2], opt[2:] return first, opt[1:] def normalize_opt(opt, ctx): if ctx is None or ctx.token_normalize_func is None: return opt prefix, opt = split_opt(opt) return prefix + ctx.token_normalize_func(opt) def split_arg_string(string): """Given an argument string this attempts to split it into small parts.""" rv = [] for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)"' r'|\S+)\s*', string, re.S): arg = match.group().strip() if arg[:1] == arg[-1:] and arg[:1] in '"\'': arg = arg[1:-1].encode('ascii', 'backslashreplace') \ .decode('unicode-escape') try: arg = type(string)(arg) except UnicodeError: pass rv.append(arg) return rv class Option(object): def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None): self._short_opts = [] self._long_opts = [] self.prefixes = set() for opt in opts: prefix, value = split_opt(opt) if not prefix: raise ValueError('Invalid start character for option (%s)' % opt) self.prefixes.add(prefix[0]) if len(prefix) == 1 and len(value) == 1: self._short_opts.append(opt) else: self._long_opts.append(opt) self.prefixes.add(prefix) if action is None: action = 'store' self.dest = dest self.action = action self.nargs = nargs self.const = const self.obj = obj @property def takes_value(self): return self.action in ('store', 'append') def process(self, value, state): if self.action == 'store': state.opts[self.dest] = value elif self.action == 'store_const': state.opts[self.dest] = self.const elif self.action == 'append': state.opts.setdefault(self.dest, []).append(value) elif self.action == 'append_const': state.opts.setdefault(self.dest, []).append(self.const) elif self.action == 'count': state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 else: raise ValueError('unknown action %r' % self.action) state.order.append(self.obj) class Argument(object): def __init__(self, dest, nargs=1, obj=None): self.dest = dest self.nargs = nargs self.obj = obj def process(self, value, state): if self.nargs > 1: holes = sum(1 for x in value if x is None) if holes == len(value): value = None elif holes != 0: raise BadArgumentUsage('argument %s takes %d values' % (self.dest, self.nargs)) state.opts[self.dest] = value state.order.append(self.obj) class ParsingState(object): def __init__(self, rargs): self.opts = {} self.largs = [] self.rargs = rargs self.order = [] class OptionParser(object): """The option parser is an internal class that is ultimately used to parse options and arguments. It's modelled after optparse and brings a similar but vastly simplified API. It should generally not be used directly as the high level Click classes wrap it for you. It's not nearly as extensible as optparse or argparse as it does not implement features that are implemented on a higher level (such as types or defaults). :param ctx: optionally the :class:`~click.Context` where this parser should go with. """ def __init__(self, ctx=None): #: The :class:`~click.Context` for this parser. This might be #: `None` for some advanced use cases. self.ctx = ctx #: This controls how the parser deals with interspersed arguments. #: If this is set to `False`, the parser will stop on the first #: non-option. Click uses this to implement nested subcommands #: safely. self.allow_interspersed_args = True #: This tells the parser how to deal with unknown options. By #: default it will error out (which is sensible), but there is a #: second mode where it will ignore it and continue processing #: after shifting all the unknown options into the resulting args. self.ignore_unknown_options = False if ctx is not None: self.allow_interspersed_args = ctx.allow_interspersed_args self.ignore_unknown_options = ctx.ignore_unknown_options self._short_opt = {} self._long_opt = {} self._opt_prefixes = set(['-', '--']) self._args = [] def add_option(self, opts, dest, action=None, nargs=1, const=None, obj=None): """Adds a new option named `dest` to the parser. The destination is not inferred (unlike with optparse) and needs to be explicitly provided. Action can be any of ``store``, ``store_const``, ``append``, ``appnd_const`` or ``count``. The `obj` can be used to identify the option in the order list that is returned from the parser. """ if obj is None: obj = dest opts = [normalize_opt(opt, self.ctx) for opt in opts] option = Option(opts, dest, action=action, nargs=nargs, const=const, obj=obj) self._opt_prefixes.update(option.prefixes) for opt in option._short_opts: self._short_opt[opt] = option for opt in option._long_opts: self._long_opt[opt] = option def add_argument(self, dest, nargs=1, obj=None): """Adds a positional argument named `dest` to the parser. The `obj` can be used to identify the option in the order list that is returned from the parser. """ if obj is None: obj = dest self._args.append(Argument(dest=dest, nargs=nargs, obj=obj)) def parse_args(self, args): """Parses positional arguments and returns ``(values, args, order)`` for the parsed options and arguments as well as the leftover arguments if there are any. The order is a list of objects as they appear on the command line. If arguments appear multiple times they will be memorized multiple times as well. """ state = ParsingState(args) try: self._process_args_for_options(state) self._process_args_for_args(state) except UsageError: if self.ctx is None or not self.ctx.resilient_parsing: raise return state.opts, state.largs, state.order def _process_args_for_args(self, state): pargs, args = _unpack_args(state.largs + state.rargs, [x.nargs for x in self._args]) for idx, arg in enumerate(self._args): arg.process(pargs[idx], state) state.largs = args state.rargs = [] def _process_args_for_options(self, state): while state.rargs: arg = state.rargs.pop(0) arglen = len(arg) # Double dashes always handled explicitly regardless of what # prefixes are valid. if arg == '--': return elif arg[:1] in self._opt_prefixes and arglen > 1: self._process_opts(arg, state) elif self.allow_interspersed_args: state.largs.append(arg) else: state.rargs.insert(0, arg) return # Say this is the original argument list: # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] # ^ # (we are about to process arg(i)). # # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of # [arg0, ..., arg(i-1)] (any options and their arguments will have # been removed from largs). # # The while loop will usually consume 1 or more arguments per pass. # If it consumes 1 (eg. arg is an option that takes no arguments), # then after _process_arg() is done the situation is: # # largs = subset of [arg0, ..., arg(i)] # rargs = [arg(i+1), ..., arg(N-1)] # # If allow_interspersed_args is false, largs will always be # *empty* -- still a subset of [arg0, ..., arg(i-1)], but # not a very interesting subset! def _match_long_opt(self, opt, explicit_value, state): if opt not in self._long_opt: possibilities = [word for word in self._long_opt if word.startswith(opt)] raise NoSuchOption(opt, possibilities=possibilities) option = self._long_opt[opt] if option.takes_value: # At this point it's safe to modify rargs by injecting the # explicit value, because no exception is raised in this # branch. This means that the inserted value will be fully # consumed. if explicit_value is not None: state.rargs.insert(0, explicit_value) nargs = option.nargs if len(state.rargs) < nargs: _error_opt_args(nargs, opt) elif nargs == 1: value = state.rargs.pop(0) else: value = tuple(state.rargs[:nargs]) del state.rargs[:nargs] elif explicit_value is not None: raise BadOptionUsage('%s option does not take a value' % opt) else: value = None option.process(value, state) def _match_short_opt(self, arg, state): stop = False i = 1 prefix = arg[0] unknown_options = [] for ch in arg[1:]: opt = normalize_opt(prefix + ch, self.ctx) option = self._short_opt.get(opt) i += 1 if not option: if self.ignore_unknown_options: unknown_options.append(ch) continue raise NoSuchOption(opt) if option.takes_value: # Any characters left in arg? Pretend they're the # next arg, and stop consuming characters of arg. if i < len(arg): state.rargs.insert(0, arg[i:]) stop = True nargs = option.nargs if len(state.rargs) < nargs: _error_opt_args(nargs, opt) elif nargs == 1: value = state.rargs.pop(0) else: value = tuple(state.rargs[:nargs]) del state.rargs[:nargs] else: value = None option.process(value, state) if stop: break # If we got any unknown options we re-combinate the string of the # remaining options and re-attach the prefix, then report that # to the state as new larg. This way there is basic combinatorics # that can be achieved while still ignoring unknown arguments. if self.ignore_unknown_options and unknown_options: state.largs.append(prefix + ''.join(unknown_options)) def _process_opts(self, arg, state): explicit_value = None # Long option handling happens in two parts. The first part is # supporting explicitly attached values. In any case, we will try # to long match the option first. if '=' in arg: long_opt, explicit_value = arg.split('=', 1) else: long_opt = arg norm_long_opt = normalize_opt(long_opt, self.ctx) # At this point we will match the (assumed) long option through # the long option matching code. Note that this allows options # like "-foo" to be matched as long options. try: self._match_long_opt(norm_long_opt, explicit_value, state) except NoSuchOption: # At this point the long option matching failed, and we need # to try with short options. However there is a special rule # which says, that if we have a two character options prefix # (applies to "--foo" for instance), we do not dispatch to the # short option code and will instead raise the no option # error. if arg[:2] not in self._opt_prefixes: return self._match_short_opt(arg, state) if not self.ignore_unknown_options: raise state.largs.append(arg)
BaconPancakes/valor
refs/heads/master
lib/aiohttp/connector.py
17
import asyncio import functools import http.cookies import ssl import sys import traceback import warnings from collections import defaultdict from hashlib import md5, sha1, sha256 from itertools import chain from math import ceil from types import MappingProxyType import aiohttp from . import hdrs, helpers from .client import ClientRequest from .errors import (ClientOSError, ClientTimeoutError, FingerprintMismatch, HttpProxyError, ProxyConnectionError, ServerDisconnectedError) from .helpers import is_ip_address, sentinel from .resolver import DefaultResolver __all__ = ('BaseConnector', 'TCPConnector', 'ProxyConnector', 'UnixConnector') PY_343 = sys.version_info >= (3, 4, 3) HASHFUNC_BY_DIGESTLEN = { 16: md5, 20: sha1, 32: sha256, } class Connection: _source_traceback = None _transport = None def __init__(self, connector, key, request, transport, protocol, loop): self._key = key self._connector = connector self._request = request self._transport = transport self._protocol = protocol self._loop = loop self.reader = protocol.reader self.writer = protocol.writer if loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) def __repr__(self): return 'Connection<{}>'.format(self._key) def __del__(self, _warnings=warnings): if self._transport is not None: _warnings.warn('Unclosed connection {!r}'.format(self), ResourceWarning) if self._loop.is_closed(): return self._connector._release( self._key, self._request, self._transport, self._protocol, should_close=True) context = {'client_connection': self, 'message': 'Unclosed connection'} if self._source_traceback is not None: context['source_traceback'] = self._source_traceback self._loop.call_exception_handler(context) @property def loop(self): return self._loop def close(self): if self._transport is not None: self._connector._release( self._key, self._request, self._transport, self._protocol, should_close=True) self._transport = None def release(self): if self._transport is not None: self._connector._release( self._key, self._request, self._transport, self._protocol, should_close=False) self._transport = None def detach(self): self._transport = None @property def closed(self): return self._transport is None class BaseConnector(object): """Base connector class. conn_timeout - (optional) Connect timeout. keepalive_timeout - (optional) Keep-alive timeout. force_close - Set to True to force close and do reconnect after each request (and between redirects). limit - The limit of simultaneous connections to the same endpoint. loop - Optional event loop. """ _closed = True # prevent AttributeError in __del__ if ctor was failed _source_traceback = None def __init__(self, *, conn_timeout=None, keepalive_timeout=sentinel, force_close=False, limit=20, loop=None): if force_close: if keepalive_timeout is not None and \ keepalive_timeout is not sentinel: raise ValueError('keepalive_timeout cannot ' 'be set if force_close is True') else: if keepalive_timeout is sentinel: keepalive_timeout = 30 if loop is None: loop = asyncio.get_event_loop() self._closed = False if loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) self._conns = {} self._acquired = defaultdict(set) self._conn_timeout = conn_timeout self._keepalive_timeout = keepalive_timeout self._cleanup_handle = None self._force_close = force_close self._limit = limit self._waiters = defaultdict(list) self._loop = loop self._factory = functools.partial( aiohttp.StreamProtocol, loop=loop, disconnect_error=ServerDisconnectedError) self.cookies = http.cookies.SimpleCookie() def __del__(self, _warnings=warnings): if self._closed: return if not self._conns: return conns = [repr(c) for c in self._conns.values()] self.close() _warnings.warn("Unclosed connector {!r}".format(self), ResourceWarning) context = {'connector': self, 'connections': conns, 'message': 'Unclosed connector'} if self._source_traceback is not None: context['source_traceback'] = self._source_traceback self._loop.call_exception_handler(context) def __enter__(self): return self def __exit__(self, *exc): self.close() @property def force_close(self): """Ultimately close connection on releasing if True.""" return self._force_close @property def limit(self): """The limit for simultaneous connections to the same endpoint. Endpoints are the same if they are have equal (host, port, is_ssl) triple. If limit is None the connector has no limit. The default limit size is 20. """ return self._limit def _cleanup(self): """Cleanup unused transports.""" if self._cleanup_handle: self._cleanup_handle.cancel() self._cleanup_handle = None now = self._loop.time() connections = {} timeout = self._keepalive_timeout for key, conns in self._conns.items(): alive = [] for transport, proto, t0 in conns: if transport is not None: if proto and not proto.is_connected(): transport = None else: delta = t0 + self._keepalive_timeout - now if delta < 0: transport.close() transport = None elif delta < timeout: timeout = delta if transport is not None: alive.append((transport, proto, t0)) if alive: connections[key] = alive if connections: self._cleanup_handle = self._loop.call_at( ceil(now + timeout), self._cleanup) self._conns = connections def _start_cleanup_task(self): if self._cleanup_handle is None: now = self._loop.time() self._cleanup_handle = self._loop.call_at( ceil(now + self._keepalive_timeout), self._cleanup) def close(self): """Close all opened transports.""" ret = helpers.create_future(self._loop) ret.set_result(None) if self._closed: return ret self._closed = True try: if self._loop.is_closed(): return ret for key, data in self._conns.items(): for transport, proto, t0 in data: transport.close() for transport in chain(*self._acquired.values()): transport.close() if self._cleanup_handle: self._cleanup_handle.cancel() finally: self._conns.clear() self._acquired.clear() self._cleanup_handle = None return ret @property def closed(self): """Is connector closed. A readonly property. """ return self._closed @asyncio.coroutine def connect(self, req): """Get from pool or create new connection.""" key = (req.host, req.port, req.ssl) limit = self._limit if limit is not None: fut = helpers.create_future(self._loop) waiters = self._waiters[key] # The limit defines the maximum number of concurrent connections # for a key. Waiters must be counted against the limit, even before # the underlying connection is created. available = limit - len(waiters) - len(self._acquired[key]) # Don't wait if there are connections available. if available > 0: fut.set_result(None) # This connection will now count towards the limit. waiters.append(fut) try: if limit is not None: yield from fut transport, proto = self._get(key) if transport is None: try: if self._conn_timeout: transport, proto = yield from asyncio.wait_for( self._create_connection(req), self._conn_timeout, loop=self._loop) else: transport, proto = \ yield from self._create_connection(req) except asyncio.TimeoutError as exc: raise ClientTimeoutError( 'Connection timeout to host {0[0]}:{0[1]} ssl:{0[2]}' .format(key)) from exc except OSError as exc: raise ClientOSError( exc.errno, 'Cannot connect to host {0[0]}:{0[1]} ssl:{0[2]} [{1}]' .format(key, exc.strerror)) from exc except: self._release_waiter(key) raise self._acquired[key].add(transport) conn = Connection(self, key, req, transport, proto, self._loop) return conn def _get(self, key): try: conns = self._conns[key] except KeyError: return None, None t1 = self._loop.time() while conns: transport, proto, t0 = conns.pop() if transport is not None and proto.is_connected(): if t1 - t0 > self._keepalive_timeout: transport.close() transport = None else: if not conns: # The very last connection was reclaimed: drop the key del self._conns[key] return transport, proto # No more connections: drop the key del self._conns[key] return None, None def _release_waiter(self, key): waiters = self._waiters[key] while waiters: waiter = waiters.pop(0) if not waiter.done(): waiter.set_result(None) break def _release(self, key, req, transport, protocol, *, should_close=False): if self._closed: # acquired connection is already released on connector closing return acquired = self._acquired[key] try: acquired.remove(transport) except KeyError: # pragma: no cover # this may be result of undetermenistic order of objects # finalization due garbage collection. pass else: if self._limit is not None and len(acquired) < self._limit: self._release_waiter(key) resp = req.response if not should_close: if self._force_close: should_close = True elif resp is not None: should_close = resp._should_close reader = protocol.reader if should_close or (reader.output and not reader.output.at_eof()): transport.close() else: conns = self._conns.get(key) if conns is None: conns = self._conns[key] = [] conns.append((transport, protocol, self._loop.time())) reader.unset_parser() self._start_cleanup_task() @asyncio.coroutine def _create_connection(self, req): raise NotImplementedError() _SSL_OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0) class TCPConnector(BaseConnector): """TCP connector. verify_ssl - Set to True to check ssl certifications. fingerprint - Pass the binary md5, sha1, or sha256 digest of the expected certificate in DER format to verify that the certificate the server presents matches. See also https://en.wikipedia.org/wiki/Transport_Layer_Security#Certificate_pinning resolve - (Deprecated) Set to True to do DNS lookup for host name. resolver - Enable DNS lookups and use this resolver use_dns_cache - Use memory cache for DNS lookups. family - socket address family local_addr - local tuple of (host, port) to bind socket to conn_timeout - (optional) Connect timeout. keepalive_timeout - (optional) Keep-alive timeout. force_close - Set to True to force close and do reconnect after each request (and between redirects). limit - The limit of simultaneous connections to the same endpoint. loop - Optional event loop. """ def __init__(self, *, verify_ssl=True, fingerprint=None, resolve=sentinel, use_dns_cache=sentinel, family=0, ssl_context=None, local_addr=None, resolver=None, conn_timeout=None, keepalive_timeout=sentinel, force_close=False, limit=20, loop=None): super().__init__(conn_timeout=conn_timeout, keepalive_timeout=keepalive_timeout, force_close=force_close, limit=limit, loop=loop) if not verify_ssl and ssl_context is not None: raise ValueError( "Either disable ssl certificate validation by " "verify_ssl=False or specify ssl_context, not both.") self._verify_ssl = verify_ssl if fingerprint: digestlen = len(fingerprint) hashfunc = HASHFUNC_BY_DIGESTLEN.get(digestlen) if not hashfunc: raise ValueError('fingerprint has invalid length') self._hashfunc = hashfunc self._fingerprint = fingerprint if resolve is not sentinel: warnings.warn(("resolve parameter is deprecated, " "use use_dns_cache instead"), DeprecationWarning, stacklevel=2) if use_dns_cache is not sentinel and resolve is not sentinel: if use_dns_cache != resolve: raise ValueError("use_dns_cache must agree with resolve") _use_dns_cache = use_dns_cache elif use_dns_cache is not sentinel: _use_dns_cache = use_dns_cache elif resolve is not sentinel: _use_dns_cache = resolve else: _use_dns_cache = True if resolver is None: resolver = DefaultResolver(loop=self._loop) self._resolver = resolver self._use_dns_cache = _use_dns_cache self._cached_hosts = {} self._ssl_context = ssl_context self._family = family self._local_addr = local_addr @property def verify_ssl(self): """Do check for ssl certifications?""" return self._verify_ssl @property def fingerprint(self): """Expected ssl certificate fingerprint.""" return self._fingerprint @property def ssl_context(self): """SSLContext instance for https requests. Lazy property, creates context on demand. """ if self._ssl_context is None: if not self._verify_ssl: sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslcontext.options |= ssl.OP_NO_SSLv2 sslcontext.options |= ssl.OP_NO_SSLv3 sslcontext.options |= _SSL_OP_NO_COMPRESSION sslcontext.set_default_verify_paths() else: sslcontext = ssl.create_default_context() self._ssl_context = sslcontext return self._ssl_context @property def family(self): """Socket family like AF_INET.""" return self._family @property def use_dns_cache(self): """True if local DNS caching is enabled.""" return self._use_dns_cache @property def cached_hosts(self): """Read-only dict of cached DNS record.""" return MappingProxyType(self._cached_hosts) def clear_dns_cache(self, host=None, port=None): """Remove specified host/port or clear all dns local cache.""" if host is not None and port is not None: self._cached_hosts.pop((host, port), None) elif host is not None or port is not None: raise ValueError("either both host and port " "or none of them are allowed") else: self._cached_hosts.clear() @property def resolve(self): """Do DNS lookup for host name?""" warnings.warn((".resolve property is deprecated, " "use .dns_cache instead"), DeprecationWarning, stacklevel=2) return self.use_dns_cache @property def resolved_hosts(self): """The dict of (host, port) -> (ipaddr, port) pairs.""" warnings.warn((".resolved_hosts property is deprecated, " "use .cached_hosts instead"), DeprecationWarning, stacklevel=2) return self.cached_hosts def clear_resolved_hosts(self, host=None, port=None): """Remove specified host/port or clear all resolve cache.""" warnings.warn((".clear_resolved_hosts() is deprecated, " "use .clear_dns_cache() instead"), DeprecationWarning, stacklevel=2) if host is not None and port is not None: self.clear_dns_cache(host, port) else: self.clear_dns_cache() @asyncio.coroutine def _resolve_host(self, host, port): if is_ip_address(host): return [{'hostname': host, 'host': host, 'port': port, 'family': self._family, 'proto': 0, 'flags': 0}] if self._use_dns_cache: key = (host, port) if key not in self._cached_hosts: self._cached_hosts[key] = yield from \ self._resolver.resolve(host, port, family=self._family) return self._cached_hosts[key] else: res = yield from self._resolver.resolve( host, port, family=self._family) return res @asyncio.coroutine def _create_connection(self, req): """Create connection. Has same keyword arguments as BaseEventLoop.create_connection. """ if req.proxy: transport, proto = yield from self._create_proxy_connection(req) else: transport, proto = yield from self._create_direct_connection(req) return transport, proto @asyncio.coroutine def _create_direct_connection(self, req): if req.ssl: sslcontext = self.ssl_context else: sslcontext = None hosts = yield from self._resolve_host(req.host, req.port) exc = None for hinfo in hosts: try: host = hinfo['host'] port = hinfo['port'] transp, proto = yield from self._loop.create_connection( self._factory, host, port, ssl=sslcontext, family=hinfo['family'], proto=hinfo['proto'], flags=hinfo['flags'], server_hostname=hinfo['hostname'] if sslcontext else None, local_addr=self._local_addr) has_cert = transp.get_extra_info('sslcontext') if has_cert and self._fingerprint: sock = transp.get_extra_info('socket') if not hasattr(sock, 'getpeercert'): # Workaround for asyncio 3.5.0 # Starting from 3.5.1 version # there is 'ssl_object' extra info in transport sock = transp._ssl_protocol._sslpipe.ssl_object # gives DER-encoded cert as a sequence of bytes (or None) cert = sock.getpeercert(binary_form=True) assert cert got = self._hashfunc(cert).digest() expected = self._fingerprint if got != expected: transp.close() raise FingerprintMismatch(expected, got, host, port) return transp, proto except OSError as e: exc = e else: raise ClientOSError(exc.errno, 'Can not connect to %s:%s [%s]' % (req.host, req.port, exc.strerror)) from exc @asyncio.coroutine def _create_proxy_connection(self, req): proxy_req = ClientRequest( hdrs.METH_GET, req.proxy, headers={hdrs.HOST: req.host}, auth=req.proxy_auth, loop=self._loop) try: # create connection to proxy server transport, proto = yield from self._create_direct_connection( proxy_req) except OSError as exc: raise ProxyConnectionError(*exc.args) from exc if not req.ssl: req.path = '{scheme}://{host}{path}'.format(scheme=req.scheme, host=req.netloc, path=req.path) if hdrs.AUTHORIZATION in proxy_req.headers: auth = proxy_req.headers[hdrs.AUTHORIZATION] del proxy_req.headers[hdrs.AUTHORIZATION] if not req.ssl: req.headers[hdrs.PROXY_AUTHORIZATION] = auth else: proxy_req.headers[hdrs.PROXY_AUTHORIZATION] = auth if req.ssl: # For HTTPS requests over HTTP proxy # we must notify proxy to tunnel connection # so we send CONNECT command: # CONNECT www.python.org:443 HTTP/1.1 # Host: www.python.org # # next we must do TLS handshake and so on # to do this we must wrap raw socket into secure one # asyncio handles this perfectly proxy_req.method = hdrs.METH_CONNECT proxy_req.path = '{}:{}'.format(req.host, req.port) key = (req.host, req.port, req.ssl) conn = Connection(self, key, proxy_req, transport, proto, self._loop) self._acquired[key].add(conn._transport) proxy_resp = proxy_req.send(conn.writer, conn.reader) try: resp = yield from proxy_resp.start(conn, True) except: proxy_resp.close() conn.close() raise else: conn.detach() if resp.status != 200: raise HttpProxyError(code=resp.status, message=resp.reason) rawsock = transport.get_extra_info('socket', default=None) if rawsock is None: raise RuntimeError( "Transport does not expose socket instance") transport.pause_reading() transport, proto = yield from self._loop.create_connection( self._factory, ssl=self.ssl_context, sock=rawsock, server_hostname=req.host) finally: proxy_resp.close() return transport, proto class ProxyConnector(TCPConnector): """Http Proxy connector. Deprecated, use ClientSession.request with proxy parameters. Is still here for backward compatibility. proxy - Proxy URL address. Only HTTP proxy supported. proxy_auth - (optional) Proxy HTTP Basic Auth proxy_auth - aiohttp.helpers.BasicAuth conn_timeout - (optional) Connect timeout. keepalive_timeout - (optional) Keep-alive timeout. force_close - Set to True to force close and do reconnect after each request (and between redirects). limit - The limit of simultaneous connections to the same endpoint. loop - Optional event loop. Usage: >>> conn = ProxyConnector(proxy="http://some.proxy.com") >>> session = ClientSession(connector=conn) >>> resp = yield from session.get('http://python.org') """ def __init__(self, proxy, *, proxy_auth=None, force_close=True, conn_timeout=None, keepalive_timeout=sentinel, limit=20, loop=None): warnings.warn("ProxyConnector is deprecated, use " "client.get(url, proxy=proxy_url) instead", DeprecationWarning) super().__init__(force_close=force_close, conn_timeout=conn_timeout, keepalive_timeout=keepalive_timeout, limit=limit, loop=loop) self._proxy = proxy self._proxy_auth = proxy_auth @property def proxy(self): return self._proxy @property def proxy_auth(self): return self._proxy_auth @asyncio.coroutine def _create_connection(self, req): """ Use TCPConnector _create_connection, to emulate old ProxyConnector. """ req.update_proxy(self._proxy, self._proxy_auth) transport, proto = yield from super()._create_connection(req) return transport, proto class UnixConnector(BaseConnector): """Unix socket connector. path - Unix socket path. conn_timeout - (optional) Connect timeout. keepalive_timeout - (optional) Keep-alive timeout. force_close - Set to True to force close and do reconnect after each request (and between redirects). limit - The limit of simultaneous connections to the same endpoint. loop - Optional event loop. Usage: >>> conn = UnixConnector(path='/path/to/socket') >>> session = ClientSession(connector=conn) >>> resp = yield from session.get('http://python.org') """ def __init__(self, path, force_close=False, conn_timeout=None, keepalive_timeout=sentinel, limit=20, loop=None): super().__init__(force_close=force_close, conn_timeout=conn_timeout, keepalive_timeout=keepalive_timeout, limit=limit, loop=loop) self._path = path @property def path(self): """Path to unix socket.""" return self._path @asyncio.coroutine def _create_connection(self, req): return (yield from self._loop.create_unix_connection( self._factory, self._path))
Ebag333/Pyfa
refs/heads/master
eos/effects/shipbonushybridtrackingatc2.py
2
# shipBonusHybridTrackingATC2 # # Used by: # Ship: Adrestia type = "passive" def handler(fit, ship, context): fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Medium Hybrid Turret"), "trackingSpeed", ship.getModifiedItemAttr("shipBonusATC2"))
3dfxmadscientist/CBSS
refs/heads/master
addons/hr_recruitment/__openerp__.py
52
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved # $Id$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Recruitment Process', 'version': '1.0', 'category': 'Human Resources', 'sequence': 25, 'summary': 'Jobs, Recruitment, Applications, Job Interviews', 'description': """ Manage job positions and the recruitment process ================================================ This application allows you to easily keep track of jobs, vacancies, applications, interviews... It is integrated with the mail gateway to automatically fetch email sent to <jobs@yourcompany.com> in the list of applications. It's also integrated with the document management system to store and search in the CV base and find the candidate that you are looking for. Similarly, it is integrated with the survey module to allow you to define interviews for different jobs. You can define the different phases of interviews and easily rate the applicant from the kanban view. """, 'author': 'OpenERP SA', 'website': 'http://www.openerp.com', 'images': ['images/hr_recruitment_analysis.jpeg','images/hr_recruitment_applicants.jpeg'], 'depends': [ 'base_status', 'decimal_precision', 'hr', 'survey', 'base_calendar', 'fetchmail', ], 'data': [ 'wizard/hr_recruitment_employee_hired.xml', 'wizard/hr_recruitment_create_partner_job_view.xml', 'hr_recruitment_view.xml', 'hr_recruitment_menu.xml', 'security/hr_recruitment_security.xml', 'security/ir.model.access.csv', 'report/hr_recruitment_report_view.xml', 'board_hr_recruitment_statistical_view.xml', 'hr_recruitment_installer_view.xml', 'res_config_view.xml', 'hr_recruitment_data.xml' ], 'demo': ['hr_recruitment_demo.yml'], 'test': ['test/recruitment_process.yml'], 'installable': True, 'auto_install': False, 'application': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
ralphtheninja/cjdns
refs/heads/master
node_build/dependencies/libuv/build/gyp/test/library/gyptest-static.py
72
#!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies simple build of a "Hello, world!" program with static libraries, including verifying that libraries are rebuilt correctly when functions move between libraries. """ import TestGyp test = TestGyp.TestGyp() if test.format == 'android': # This test currently fails on android. Investigate why, fix the issues # responsible, and reenable this test on android. See bug: # https://code.google.com/p/gyp/issues/detail?id=436 test.skip_test(message='Test fails on android. Fix and reenable.\n') test.run_gyp('library.gyp', '-Dlibrary=static_library', '-Dmoveable_function=lib1', chdir='src') test.relocate('src', 'relocate/src') test.build('library.gyp', test.ALL, chdir='relocate/src') expect = """\ Hello from program.c Hello from lib1.c Hello from lib2.c Hello from lib1_moveable.c """ test.run_built_executable('program', chdir='relocate/src', stdout=expect) test.run_gyp('library.gyp', '-Dlibrary=static_library', '-Dmoveable_function=lib2', chdir='relocate/src') # Update program.c to force a rebuild. test.sleep() contents = test.read('relocate/src/program.c') contents = contents.replace('Hello', 'Hello again') test.write('relocate/src/program.c', contents) test.build('library.gyp', test.ALL, chdir='relocate/src') expect = """\ Hello again from program.c Hello from lib1.c Hello from lib2.c Hello from lib2_moveable.c """ test.run_built_executable('program', chdir='relocate/src', stdout=expect) test.run_gyp('library.gyp', '-Dlibrary=static_library', '-Dmoveable_function=lib1', chdir='relocate/src') # Update program.c and lib2.c to force a rebuild. test.sleep() contents = test.read('relocate/src/program.c') contents = contents.replace('again', 'again again') test.write('relocate/src/program.c', contents) # TODO(sgk): we have to force a rebuild of lib2 so that it weeds out # the "moved" module. This should be done in gyp by adding a dependency # on the generated .vcproj file itself. test.touch('relocate/src/lib2.c') test.build('library.gyp', test.ALL, chdir='relocate/src') expect = """\ Hello again again from program.c Hello from lib1.c Hello from lib2.c Hello from lib1_moveable.c """ test.run_built_executable('program', chdir='relocate/src', stdout=expect) test.pass_test()
Alberto-Beralix/Beralix
refs/heads/master
i386-squashfs-root/usr/share/system-config-printer/troubleshoot/CheckLocalServerPublishing.py
2
#!/usr/bin/python ## Printing troubleshooter ## Copyright (C) 2008 Red Hat, Inc. ## Copyright (C) 2008 Tim Waugh <twaugh@redhat.com> ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import cups from timedops import TimedOperation from base import * class CheckLocalServerPublishing(Question): def __init__ (self, troubleshooter): Question.__init__ (self, troubleshooter, "Is local server publishing?") vbox = self.initial_vbox (_("Server Not Exporting Printers"), _("Although one or more printers are marked " "as being shared, this print server is " "not exporting shared printers to the " "network.") + '\n\n' + _("Enable the 'Publish shared printers " "connected to this system' option in " "the server settings using the printing " "administration tool.") + ' ' + _(TEXT_start_print_admin_tool)) troubleshooter.new_page (vbox, self) def display (self): self.answers = {} cups.setServer ('') parent = self.troubleshooter.get_window () try: c = self.timedop (cups.Connection, parent=parent).run () printers = self.timedop (c.getPrinters, parent=parent).run () if len (printers) == 0: return False for name, printer in printers.iteritems (): if printer.get ('printer-is-shared', False): break attr = self.timedop (c.getPrinterAttributes, args=(name,), parent=parent).run () except RuntimeError: return False except cups.IPPError: return False if not printer.get ('printer-is-shared', False): return False if attr.get ('server-is-sharing-printers', True): # server-is-sharing-printers is in CUPS 1.4 return False return True def collect_answer (self): if self.displayed: return { 'local_server_exporting_printers': False } return {} def cancel_operation (self): self.op.cancel () def timedop (self, *args, **kwargs): self.op = TimedOperation (*args, **kwargs) return self.op
tschaume/gnuplot-py
refs/heads/master
gnuplot_Suites.py
2
# $Id$ # This file is provided as part of the Gnuplot.py package for the # convenience of Mac users. It was generated primarily using gensuitemodule # with Mac gnuplot 3.7.1a. Thanks to Anthony M. Ingraldi and Noboru Yamamoto # for helping with this. # file contains # # class gnuplot_Suite # class odds_and_ends # class Standard_Suite # class Miscellaneous_Events # """Suite gnuplot Suite: Events supplied by gnuplot Level 1, version 1 Generated from Alpha:Desktop Folder:gnuplot.1:gnuplot 3.7.1a AETE/AEUT resource version 1/0, language 0, script 0 """ import aetools import MacOS _code = 'GPSE' class gnuplot_Suite: def gnuexec(self, _object=None, _attributes={}, **_arguments): """exec: execute a gnuplot command Required argument: gnuplot command Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'GPSE' _subcode = 'exec' if _arguments: raise TypeError, 'No optional args expected' _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] def plot(self, _object=None, _attributes={}, **_arguments): """plot: create a 2-D plot Required argument: data to be plotted Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'GPLT' _subcode = 'plot' if _arguments: raise TypeError, 'No optional args expected' _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] def splot(self, _object=None, _attributes={}, **_arguments): """splot: create a 3-D plot Required argument: data to be plotted Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'GPLT' _subcode = 'splt' if _arguments: raise TypeError, 'No optional args expected' _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] class graph(aetools.ComponentItem): """graph - graph - a subclass of window""" want = 'cGRF' class picture(aetools.NProperty): """picture - gnuplot graph in "PICT" format""" which = 'PICT' want = 'PICT' graph._propdict = { 'picture' : picture, } graph._elemdict = { } _Enum_lyty = { 'line' : 'typ1', # line 'points' : 'typ2', # points 'impulses' : 'typ3', # impulses 'linespoints' : 'typ4', # linespoints 'dots' : 'typ5', # dots 'steps' : 'typ6', # steps 'fsteps' : 'typ7', # fsteps 'errorbars' : 'typ8', # errorbars 'xerrorbars' : 'typ9', # xerrorbars 'yerrorbars' : 'ty10', # yerrorbars 'xyerrorbars' : 'ty11', # xyerrorbars 'boxes' : 'ty12', # boxes 'boxerrorbars' : 'ty13', # boxerrorbars 'boxxyerrorbars' : 'ty14', # boxxyerrorbars 'vector' : 'ty19', # vector } # # Indices of types declared in this module # _classdeclarations = { 'cGRF' : graph, } _propdeclarations = { 'PICT' : picture, } _compdeclarations = { } _enumdeclarations = { 'lyty' : _Enum_lyty, } """Suite odds and ends: Things that should be in some standard suite, but arenÕt Level 1, version 1 Generated from Alpha:Desktop Folder:gnuplot.1:gnuplot 3.7.1a AETE/AEUT resource version 1/0, language 0, script 0 """ import aetools import MacOS _code = 'Odds' class odds_and_ends: def select(self, _object=None, _attributes={}, **_arguments): """select: Select the specified object Required argument: the object to select Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'misc' _subcode = 'slct' if _arguments: raise TypeError, 'No optional args expected' _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] # # Indices of types declared in this module # _classdeclarations = { } _propdeclarations = { } _compdeclarations = { } _enumdeclarations = { } """Suite Standard Suite: Common terms for most applications Level 1, version 1 Generated from Alpha:Desktop Folder:gnuplot.1:gnuplot 3.7.1a AETE/AEUT resource version 1/0, language 0, script 0 """ import aetools import MacOS _code = 'CoRe' class Standard_Suite: _argmap_close = { 'saving' : 'savo', '_in' : 'kfil', } def close(self, _object, _attributes={}, **_arguments): """close: Close an object Required argument: the objects to close Keyword argument saving: specifies whether or not changes should be saved before closing Keyword argument _in: the file in which to save the object Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'core' _subcode = 'clos' aetools.keysubst(_arguments, self._argmap_close) _arguments['----'] = _object aetools.enumsubst(_arguments, 'savo', _Enum_savo) _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] def data_size(self, _object, _attributes={}, **_arguments): """data size: Return the size in bytes of an object Required argument: the object whose data size is to be returned Keyword argument _attributes: AppleEvent attribute dictionary Returns: the size of the object in bytes """ _code = 'core' _subcode = 'dsiz' if _arguments: raise TypeError, 'No optional args expected' _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] def get(self, _object, _attributes={}, **_arguments): """get: Get the data for an object Required argument: the object whose data is to be returned Keyword argument _attributes: AppleEvent attribute dictionary Returns: The data from the object """ _code = 'core' _subcode = 'getd' if _arguments: raise TypeError, 'No optional args expected' _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] _argmap_make = { 'new' : 'kocl', 'at' : 'insh', 'with_data' : 'data', 'with_properties' : 'prdt', } def make(self, _no_object=None, _attributes={}, **_arguments): """make: Make a new element Keyword argument new: the class of the new element Keyword argument at: the location at which to insert the element Keyword argument with_data: the initial data for the element Keyword argument with_properties: the initial values for the properties of the element Keyword argument _attributes: AppleEvent attribute dictionary Returns: Object specifier for the new element """ _code = 'core' _subcode = 'crel' aetools.keysubst(_arguments, self._argmap_make) if _no_object != None: raise TypeError, 'No direct arg expected' _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] def open(self, _object, _attributes={}, **_arguments): """open: Open the specified object(s) Required argument: Objects to open. Can be a list of files or an object specifier. Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'aevt' _subcode = 'odoc' if _arguments: raise TypeError, 'No optional args expected' _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] def _print(self, _object, _attributes={}, **_arguments): """print: Print the specified object(s) Required argument: Objects to print. Can be a list of files or an object specifier. Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'aevt' _subcode = 'pdoc' if _arguments: raise TypeError, 'No optional args expected' _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] _argmap_save = { '_in' : 'kfil', 'as' : 'fltp', } def save(self, _object, _attributes={}, **_arguments): """save: save a set of objects Required argument: Objects to save. Keyword argument _in: the file in which to save the object(s) Keyword argument as: the file type of the document in which to save the data Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'core' _subcode = 'save' aetools.keysubst(_arguments, self._argmap_save) _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] _argmap_set = { 'to' : 'data', } def set(self, _object, _attributes={}, **_arguments): """set: Set an objectÕs data Required argument: the object to change Keyword argument to: the new value Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'core' _subcode = 'setd' aetools.keysubst(_arguments, self._argmap_set) _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] class application(aetools.ComponentItem): """application - An application program""" want = 'capp' # element 'cwin' as ['indx', 'name', 'rele'] # element 'docu' as ['name'] class window(aetools.ComponentItem): """window - A Window""" want = 'cwin' class bounds(aetools.NProperty): """bounds - the boundary rectangle for the window""" which = 'pbnd' want = 'qdrt' class closeable(aetools.NProperty): """closeable - Does the window have a close box?""" which = 'hclb' want = 'bool' class titled(aetools.NProperty): """titled - Does the window have a title bar?""" which = 'ptit' want = 'bool' class index(aetools.NProperty): """index - the number of the window""" which = 'pidx' want = 'long' class floating(aetools.NProperty): """floating - Does the window float?""" which = 'isfl' want = 'bool' class modal(aetools.NProperty): """modal - Is the window modal?""" which = 'pmod' want = 'bool' class resizable(aetools.NProperty): """resizable - Is the window resizable?""" which = 'prsz' want = 'bool' class zoomable(aetools.NProperty): """zoomable - Is the window zoomable?""" which = 'iszm' want = 'bool' class zoomed(aetools.NProperty): """zoomed - Is the window zoomed?""" which = 'pzum' want = 'bool' class name(aetools.NProperty): """name - the title of the window""" which = 'pnam' want = 'itxt' class visible(aetools.NProperty): """visible - is the window visible?""" which = 'pvis' want = 'bool' class position(aetools.NProperty): """position - upper left coordinates of window""" which = 'ppos' want = 'QDpt' class document(aetools.ComponentItem): """document - A Document""" want = 'docu' # repeated property name the title of the document class modified(aetools.NProperty): """modified - Has the document been modified since the last save?""" which = 'imod' want = 'bool' application._propdict = { } application._elemdict = { 'window' : window, 'document' : document, } window._propdict = { 'bounds' : bounds, 'closeable' : closeable, 'titled' : titled, 'index' : index, 'floating' : floating, 'modal' : modal, 'resizable' : resizable, 'zoomable' : zoomable, 'zoomed' : zoomed, 'name' : name, 'visible' : visible, 'position' : position, } window._elemdict = { } document._propdict = { 'name' : name, 'modified' : modified, } document._elemdict = { } _Enum_savo = { 'yes' : 'yes ', # Save objects now 'no' : 'no ', # Do not save objects 'ask' : 'ask ', # Ask the user whether to save } # # Indices of types declared in this module # _classdeclarations = { 'cwin' : window, 'docu' : document, 'capp' : application, } _propdeclarations = { 'ptit' : titled, 'pidx' : index, 'ppos' : position, 'pnam' : name, 'pbnd' : bounds, 'imod' : modified, 'isfl' : floating, 'hclb' : closeable, 'iszm' : zoomable, 'pmod' : modal, 'pzum' : zoomed, 'pvis' : visible, 'prsz' : resizable, } _compdeclarations = { } _enumdeclarations = { 'savo' : _Enum_savo, } """Suite Miscellaneous Events: Useful events that arenÕt in any other suite Level 1, version 1 Generated from Alpha:Desktop Folder:gnuplot.1:gnuplot 3.7.1a AETE/AEUT resource version 1/0, language 0, script 0 """ import aetools import MacOS _code = 'misc' class Miscellaneous_Events: def revert(self, _object, _attributes={}, **_arguments): """revert: Revert an object to the most recently saved version Required argument: object to revert Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'misc' _subcode = 'rvrt' if _arguments: raise TypeError, 'No optional args expected' _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] def do_script(self, _object=None, _attributes={}, **_arguments): """do script: execute a gnuplot script Required argument: a gnuplot script Keyword argument _attributes: AppleEvent attribute dictionary """ _code = 'misc' _subcode = 'dosc' if _arguments: raise TypeError, 'No optional args expected' _arguments['----'] = _object _reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes) if _arguments.has_key('errn'): raise aetools.Error, aetools.decodeerror(_arguments) # XXXX Optionally decode result if _arguments.has_key('----'): return _arguments['----'] # # Indices of types declared in this module # _classdeclarations = { } _propdeclarations = { } _compdeclarations = { } _enumdeclarations = { }
agendaTCC/AgendaTCC
refs/heads/master
tccweb/apps/emails/sendmail.py
1
from django.db.models import Q from ast import literal_eval import re from django.contrib.auth import get_user_model as User from disciplinas.models import Disciplina from semestre.models import Semestre from projetos.models import ProjetoDeGraduacao from departamentos.models import Departamento def alunosMatriculadosAtual(semestre): semestre = semestre.filter(atual = True) disciplinas = Disciplina.objects.filter(semestre = semestre) query = Q(aluno_disciplina__in=disciplinas) return query def docentesResponsaveisAtual(semestre): semestre = semestre.filter(atual = True) disciplinas = Disciplina.objects.filter(semestre = semestre) query = Q(professor_disciplina__in = disciplinas) return query def alunosMatriculados(semestre): disciplinas = Disciplina.objects.filter(semestre = semestre) query = Q(aluno_disciplina__in=disciplinas) return query def docentesResponsaveis(semestre): disciplinas = Disciplina.objects.filter(semestre = semestre) query = Q(professor_disciplina__in = disciplinas) return query def alunosInscritosAtual(semestre): semestre = semestre.filter(atual = True) disciplina = Disciplina.objects.filter(semestre=semestre) projetos = ProjetoDeGraduacao.objects.filter(disciplina__in = disciplina) query = Q(projeto_aluno__in = projetos) return query QUERYLIST = { '1':['Enviar e-mail para alunos Matriculados no semestre atual',alunosMatriculadosAtual], '2':['Enviar e-mail para docentes Responsaveis do semestre atual',docentesResponsaveisAtual], '3':['Enviar e-mail para alunos Matriculados(Todos)',alunosMatriculados], '4':['Enviar e-mail para docentes Responsaveis(Todos)',docentesResponsaveis], '5':['Enviar e-mail para alunos inscritos (projeto submetido)',alunosInscritosAtual] } def options(): list =[] for key,option in QUERYLIST.iteritems(): list.append((key,option[0])) OPCOES = tuple(list) return OPCOES def send_emails(options,departamento, assunto = None,corpo = None): if options != []: options = literal_eval(options) query = Q() semestre = Semestre.objects.filter(grupo = departamento) for item in options: f = QUERYLIST[item][1] query.add(f(semestre), query.OR) usuarios = User().objects.filter(query) dispatch(usuarios,assunto, corpo) def dispatch(usuarios, assunto, corpo): sended = [] if usuarios: for item in usuarios: if not item in sended: item.email_user(assunto, corpo) sended.append(item) def compose_body(dict,body_text): if dict and body_text: string = body_text for key,value in dict.iteritems(): string = re.sub(r"\{\{\s*\b"+key+r"\b\s*\}\}",value ,string) return string else: return None
CJ8664/servo
refs/heads/master
tests/wpt/web-platform-tests/html/semantics/forms/form-submission-0/form-echo.py
80
def main(request, response): bytes = bytearray(request.raw_input.read()) bytes_string = " ".join("%02x" % b for b in bytes) return ( [("Content-Type", "text/plain")], bytes_string )
zhuwenping/python-for-android
refs/heads/master
python3-alpha/python3-src/Lib/test/crashers/gc_inspection.py
195
""" gc.get_referrers() can be used to see objects before they are fully built. Note that this is only an example. There are many ways to crash Python by using gc.get_referrers(), as well as many extension modules (even when they are using perfectly documented patterns to build objects). Identifying and removing all places that expose to the GC a partially-built object is a long-term project. A patch was proposed on SF specifically for this example but I consider fixing just this single example a bit pointless (#1517042). A fix would include a whole-scale code review, possibly with an API change to decouple object creation and GC registration, and according fixes to the documentation for extension module writers. It's unlikely to happen, though. So this is currently classified as "gc.get_referrers() is dangerous, use only for debugging". """ import gc def g(): marker = object() yield marker # now the marker is in the tuple being constructed [tup] = [x for x in gc.get_referrers(marker) if type(x) is tuple] print(tup) print(tup[1]) tuple(g())
halberom/ansible
refs/heads/devel
lib/ansible/executor/__init__.py
7690
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type
youprofit/ansible
refs/heads/devel
test/units/module_utils/__init__.py
7690
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type
hlin117/scikit-learn
refs/heads/master
examples/linear_model/plot_theilsen.py
100
""" ==================== Theil-Sen Regression ==================== Computes a Theil-Sen Regression on a synthetic dataset. See :ref:`theil_sen_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the Theil-Sen estimator is robust against outliers. It has a breakdown point of about 29.3% in case of a simple linear regression which means that it can tolerate arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional case. The estimation of the model is done by calculating the slopes and intercepts of a subpopulation of all possible combinations of p subsample points. If an intercept is fitted, p must be greater than or equal to n_features + 1. The final slope and intercept is then defined as the spatial median of these slopes and intercepts. In certain cases Theil-Sen performs better than :ref:`RANSAC <ransac_regression>` which is also a robust method. This is illustrated in the second example below where outliers with respect to the x-axis perturb RANSAC. Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in general a priori knowledge about the data and the nature of the outliers is needed. Due to the computational complexity of Theil-Sen it is recommended to use it only for small problems in terms of number of samples and features. For larger problems the ``max_subpopulation`` parameter restricts the magnitude of all possible combinations of p subsample points to a randomly chosen subset and therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger problems with the drawback of losing some of its mathematical properties since it then works on a random subset. """ # Author: Florian Wilhelm -- <florian.wilhelm@gmail.com> # License: BSD 3 clause import time import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression, TheilSenRegressor from sklearn.linear_model import RANSACRegressor print(__doc__) estimators = [('OLS', LinearRegression()), ('Theil-Sen', TheilSenRegressor(random_state=42)), ('RANSAC', RANSACRegressor(random_state=42)), ] colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen'} lw = 2 ############################################################################## # Outliers only in the y direction np.random.seed(0) n_samples = 200 # Linear model y = 3*x + N(2, 0.1**2) x = np.random.randn(n_samples) w = 3. c = 2. noise = 0.1 * np.random.randn(n_samples) y = w * x + c + noise # 10% outliers y[-20:] += -20 * x[-20:] X = x[:, np.newaxis] plt.scatter(x, y, color='indigo', marker='x', s=40) line_x = np.array([-3, 3]) for name, estimator in estimators: t0 = time.time() estimator.fit(X, y) elapsed_time = time.time() - t0 y_pred = estimator.predict(line_x.reshape(2, 1)) plt.plot(line_x, y_pred, color=colors[name], linewidth=lw, label='%s (fit time: %.2fs)' % (name, elapsed_time)) plt.axis('tight') plt.legend(loc='upper left') plt.title("Corrupt y") ############################################################################## # Outliers in the X direction np.random.seed(0) # Linear model y = 3*x + N(2, 0.1**2) x = np.random.randn(n_samples) noise = 0.1 * np.random.randn(n_samples) y = 3 * x + 2 + noise # 10% outliers x[-20:] = 9.9 y[-20:] += 22 X = x[:, np.newaxis] plt.figure() plt.scatter(x, y, color='indigo', marker='x', s=40) line_x = np.array([-3, 10]) for name, estimator in estimators: t0 = time.time() estimator.fit(X, y) elapsed_time = time.time() - t0 y_pred = estimator.predict(line_x.reshape(2, 1)) plt.plot(line_x, y_pred, color=colors[name], linewidth=lw, label='%s (fit time: %.2fs)' % (name, elapsed_time)) plt.axis('tight') plt.legend(loc='upper left') plt.title("Corrupt x") plt.show()
AndrewGrossman/fuzzy-octo-batman
refs/heads/master
src/accounts/forms.py
69
from __future__ import unicode_literals from django.contrib.auth.forms import AuthenticationForm from django import forms from crispy_forms.helper import FormHelper from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions from authtools import forms as authtoolsforms from django.contrib.auth import forms as authforms from django.core.urlresolvers import reverse class LoginForm(AuthenticationForm): remember_me = forms.BooleanField(required=False, initial=False) def __init__(self, *args, **kwargs): super(LoginForm, self).__init__(*args, **kwargs) self.helper = FormHelper() self.fields["username"].widget.input_type = "email" # ugly hack self.helper.layout = Layout( Field('username', placeholder="Enter Email", autofocus=""), Field('password', placeholder="Enter Password"), HTML('<a href="{}">Forgot Password?</a>'.format( reverse("accounts:password-reset"))), Field('remember_me'), Submit('sign_in', 'Log in', css_class="btn btn-lg btn-primary btn-block"), ) class SignupForm(authtoolsforms.UserCreationForm): def __init__(self, *args, **kwargs): super(SignupForm, self).__init__(*args, **kwargs) self.helper = FormHelper() self.fields["email"].widget.input_type = "email" # ugly hack self.helper.layout = Layout( Field('email', placeholder="Enter Email", autofocus=""), Field('name', placeholder="Enter Full Name"), Field('password1', placeholder="Enter Password"), Field('password2', placeholder="Re-enter Password"), Submit('sign_up', 'Sign up', css_class="btn-warning"), ) class PasswordChangeForm(authforms.PasswordChangeForm): def __init__(self, *args, **kwargs): super(PasswordChangeForm, self).__init__(*args, **kwargs) self.helper = FormHelper() self.helper.layout = Layout( Field('old_password', placeholder="Enter old password", autofocus=""), Field('new_password1', placeholder="Enter new password"), Field('new_password2', placeholder="Enter new password (again)"), Submit('pass_change', 'Change Password', css_class="btn-warning"), ) class PasswordResetForm(authtoolsforms.FriendlyPasswordResetForm): def __init__(self, *args, **kwargs): super(PasswordResetForm, self).__init__(*args, **kwargs) self.helper = FormHelper() self.helper.layout = Layout( Field('email', placeholder="Enter email", autofocus=""), Submit('pass_reset', 'Reset Password', css_class="btn-warning"), ) class SetPasswordForm(authforms.SetPasswordForm): def __init__(self, *args, **kwargs): super(SetPasswordForm, self).__init__(*args, **kwargs) self.helper = FormHelper() self.helper.layout = Layout( Field('new_password1', placeholder="Enter new password", autofocus=""), Field('new_password2', placeholder="Enter new password (again)"), Submit('pass_change', 'Change Password', css_class="btn-warning"), )
Sotera/Datawake-Legacy
refs/heads/master
memex-datawake-stream/src/datawakeio/local_entity_data_connector.py
4
""" Copyright 2014 Sotera Defense Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import mysql.connector from datawakeio.data_connector import ExtractedDataConnector class MySqlEntityDataConnector(ExtractedDataConnector): """Provides connection to local mysql database for extracted entity data""" def __init__(self, config): """ :param config: database connection info and table names { user:..., database:..., password:..., host:...., } :return: a new EntityDataConnector for a mysql database """ ExtractedDataConnector.__init__(self) self.config = config self.cnx = None def open(self): """ Open a new database connection. """ self.close() user = self.config['user'] db = self.config['database'] pw = self.config['password'] host = self.config['host'] self.cnx = mysql.connector.connect(user=user, password=pw, host=host, database=db) def close(self): """Close any existing database connection. """ if self.cnx is not None: try: self.cnx.close() except: pass finally: self.cnx = None def _checkConn(self): """Open a new database conneciton if one does not currently exist.""" if self.cnx is None: self.open() def insert_entities(self, url, entity_type, entity_values): self._checkConn() cursor = self.cnx.cursor() try: for entity_value in entity_values: sql = "select count(1) from general_extractor_web_index where url = %s and entity_type = %s and entity_value = %s" params = [url,entity_type,entity_value] cursor.execute(sql,params) count = cursor.fetchall()[0][0] if count == 0: sql = "INSERT INTO general_extractor_web_index (url,entity_type,entity_value) VALUES (%s,%s,%s)" cursor.execute(sql,params) self.cnx.commit() cursor.close() except: self.close() raise def insert_domain_entities(self, domain,url, entity_type, entity_values): self._checkConn() cursor = self.cnx.cursor() try: for entity_value in entity_values: sql = "select count(1) from domain_extractor_web_index where domain = %s and url = %s and entity_type = %s and entity_value = %s" params = [domain,url,entity_type,entity_value] cursor.execute(sql,params) count = cursor.fetchall()[0][0] if count == 0: sql = "INSERT INTO domain_extractor_web_index (domain,url,entity_type,entity_value) VALUES (%s,%s,%s,%s)" cursor.execute(sql,params) self.cnx.commit() cursor.close() except: self.close() raise # # DOMAINS #### def get_domain_entity_matches(self, domain, type, values): self._checkConn() cursor = self.cnx.cursor() sql = "" params = [] max = len(values) - 1 for i in range(len(values)): params.append(domain + '\0' + type + '\0' + values[i]) sql = sql + "select rowkey from datawake_domain_entities where rowkey = %s" if i < max: sql = sql + " union all " try: cursor.execute(sql, params) rows = cursor.fetchall() cursor.close() return map(lambda x: x[0].split('\0')[2], rows) except: self.close() raise
hongliang5623/sentry
refs/heads/master
src/sentry/management/commands/celerybeat.py
29
""" Start the celery clock service from the Django management command. """ from __future__ import absolute_import, unicode_literals from celery.bin import beat from sentry.celery import app from sentry.queue.command import CeleryCommand beat = beat.beat(app=app) # this is a reimplementation of the djcelery 'celerybeat' command class Command(CeleryCommand): """Run the celery periodic task scheduler.""" options = (CeleryCommand.options + beat.get_options() + beat.preload_options) help = 'Old alias to the "celery beat" command.' def handle(self, *args, **options): beat.run(*args, **options)
modelrockettier/wget
refs/heads/master
testenv/conf/expected_files.py
6
from difflib import unified_diff import os import sys from conf import hook from exc.test_failed import TestFailed """ Post-Test Hook: ExpectedFiles This is a Post-Test hook that checks the test directory for the files it contains. A dictionary object is passed to it, which contains a mapping of filenames and contents of all the files that the directory is expected to contain. Raises a TestFailed exception if the expected files are not found or if extra files are found, else returns gracefully. """ @hook() class ExpectedFiles: def __init__(self, expected_fs): self.expected_fs = expected_fs @staticmethod def gen_local_fs_snapshot(): snapshot = {} for parent, dirs, files in os.walk('.'): for name in files: # pubring.kbx will be created by libgpgme if $HOME doesn't contain the .gnupg directory. # setting $HOME to CWD (in base_test.py) breaks two Metalink tests, so we skip this file here. if name == 'pubring.kbx': continue f = {'content': ''} file_path = os.path.join(parent, name) with open(file_path) as fp: f['content'] = fp.read() snapshot[file_path[2:]] = f return snapshot def __call__(self, test_obj): local_fs = self.gen_local_fs_snapshot() for file in self.expected_fs: if file.name in local_fs: local_file = local_fs.pop(file.name) formatted_content = test_obj._replace_substring(file.content) if formatted_content != local_file['content']: for line in unified_diff(local_file['content'], formatted_content, fromfile='Actual', tofile='Expected'): print(line, file=sys.stderr) raise TestFailed('Contents of %s do not match' % file.name) else: raise TestFailed('Expected file %s not found.' % file.name) if local_fs: print(local_fs) raise TestFailed('Extra files downloaded.')
mixturemodel-flow/tensorflow
refs/heads/master
tensorflow/contrib/learn/python/learn/datasets/load_csv_test.py
137
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.learn.python.learn import datasets from tensorflow.python.platform import test class LoadCsvTest(test.TestCase): """Test load csv functions.""" def testIris(self): iris = datasets.load_iris() self.assertTupleEqual(iris.data.shape, (150, 4)) self.assertTupleEqual(iris.target.shape, (150,)) def testBoston(self): boston = datasets.load_boston() self.assertTupleEqual(boston.data.shape, (506, 13)) self.assertTupleEqual(boston.target.shape, (506,)) if __name__ == "__main__": test.main()
fengbaicanhe/intellij-community
refs/heads/master
python/helpers/pydev/tests_python/_debugger_case19.py
100
class A: def __init__(self): self.__var = 10 if __name__ == '__main__': a = A() print(a._A__var) # Evaluate 'a.__var' should give a._A__var_ print('TEST SUCEEDED')
Genomon-Project/Genomon
refs/heads/devel
scripts/genomon_pipeline/dna_resource/pre_pmsignature.py
2
#! /usr/bin/env python from genomon_pipeline.stage_task import * class Res_PrePmsignature(Stage_task): task_name = "pre_pmsignature" script_template = """ #!/bin/bash # # Set SGE # #$ -S /bin/bash # set shell in UGE #$ -cwd # execute at the submitted dir pwd # print current working directory hostname # print hostname date # print date set -xv set -eu set -o pipefail if [ -e {output_file}.tmp ]; then rm {output_file}.tmp fi for input_file in {input_files} do if [ `cat $input_file | grep -P '^[^#]' | wc -l` -gt 1 ]; then cut -s -f 1,2,3,5,6 $input_file | tail -n +2 | grep -P '^[^\t]+\t([Cc]hr)?[0-9XY]' > {output_file}.cut cut -s -f 1 {output_file}.cut > {output_file}.cut1 cut -s -f 2 {output_file}.cut | sed "s/^/chr/" | sed -e "s/^chr[Cc]hr/chr/g" > {output_file}.cut2 cut -s -f 3,4,5 {output_file}.cut > {output_file}.cut3 paste {output_file}.cut1 {output_file}.cut2 {output_file}.cut3 >> {output_file}.tmp rm {output_file}.cut {output_file}.cut1 {output_file}.cut2 {output_file}.cut3 fi done if [ -e {output_file}.tmp ]; then mv {output_file}.tmp {output_file} else touch {output_file} fi """ def __init__(self, qsub_option, script_dir): super(Res_PrePmsignature, self).__init__(qsub_option, script_dir)
lgscofield/odoo
refs/heads/8.0
addons/account_budget/tests/test_theoreticalamount.py
229
# -*- coding: utf-8 -*- from datetime import datetime from mock import patch from openerp.tests.common import TransactionCase from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT # --------------------------------------------------------- # Tests # --------------------------------------------------------- class TestTheoreticalAmount(TransactionCase): def setUp(self): super(TestTheoreticalAmount, self).setUp() cr, uid, = self.cr, self.uid crossovered_budget_id = self.registry('crossovered.budget').create(cr, uid, { 'name': 'test budget name', 'code': 'test budget code', 'date_from': '2014-01-01', 'date_to': '2014-12-31', }) crossovered_budget_line_obj = self.registry('crossovered.budget.lines') crossovered_budget_line_id = crossovered_budget_line_obj.create(cr, uid, { 'crossovered_budget_id': crossovered_budget_id, 'general_budget_id': self.ref('account_budget.account_budget_post_sales0'), 'date_from': '2014-01-01', 'date_to': '2014-12-31', 'planned_amount': -364, }) self.line = crossovered_budget_line_obj.browse(cr, uid, crossovered_budget_line_id) self.patcher = patch('openerp.addons.account_budget.account_budget.datetime', wraps=datetime) self.mock_datetime = self.patcher.start() def test_01(self): """Start""" date = datetime.strptime('2014-01-01 00:00:00', DEFAULT_SERVER_DATETIME_FORMAT) self.mock_datetime.now.return_value = date self.assertEqual(self.line.theoritical_amount, 0) def test_02(self): """After 24 hours""" date = datetime.strptime('2014-01-02 00:00:00', DEFAULT_SERVER_DATETIME_FORMAT) self.mock_datetime.now.return_value = date self.assertEqual(self.line.theoritical_amount, -1) def test_03(self): """After 36 hours""" date = datetime.strptime('2014-01-02 12:00:00', DEFAULT_SERVER_DATETIME_FORMAT) self.mock_datetime.now.return_value = date self.assertEqual(self.line.theoritical_amount, -1.5) def test_04(self): """After 48 hours""" date = datetime.strptime('2014-01-03 00:00:00', DEFAULT_SERVER_DATETIME_FORMAT) self.mock_datetime.now.return_value = date self.assertEqual(self.line.theoritical_amount, -2) def test_05(self): """After 10 days""" date = datetime.strptime('2014-01-11 00:00:00', DEFAULT_SERVER_DATETIME_FORMAT) self.mock_datetime.now.return_value = date self.assertEqual(self.line.theoritical_amount, -10) def test_06(self): """After 50 days""" date = datetime.strptime('2014-02-20 00:00:00', DEFAULT_SERVER_DATETIME_FORMAT) self.mock_datetime.now.return_value = date self.assertEqual(self.line.theoritical_amount, -50) def test_07(self): """After 182 days, exactly half of the budget line""" date = datetime.strptime('2014-07-02 00:00:00', DEFAULT_SERVER_DATETIME_FORMAT) self.mock_datetime.now.return_value = date self.assertEqual(self.line.theoritical_amount, -182) def test_08(self): """After 308 days at noon""" date = datetime.strptime('2014-11-05 12:00:00', DEFAULT_SERVER_DATETIME_FORMAT) # remember, remember self.mock_datetime.now.return_value = date self.assertEqual(self.line.theoritical_amount, -308.5) def test_09(self): """One day before""" date = datetime.strptime('2014-12-30 00:00:00', DEFAULT_SERVER_DATETIME_FORMAT) self.mock_datetime.now.return_value = date self.assertEqual(self.line.theoritical_amount, -363) def test_10(self): """At last""" date = datetime.strptime('2014-12-31 00:00:00', DEFAULT_SERVER_DATETIME_FORMAT) self.mock_datetime.now.return_value = date self.assertEqual(self.line.theoritical_amount, -364) def tearDown(self): self.patcher.stop() super(TestTheoreticalAmount, self).tearDown()
vmiklos/vmexam
refs/heads/master
python/irssi/chanstat.py
1
import irssi, time, os ignore = [] statfile = os.path.join(os.environ['HOME'], ".irssi", "chanstat") def cmd_chanstat(data, server, witem): def chancmp(a, b): return cmp(a[1], b[1]) prevdate = None datelimit = None datestr = "" if data == "yesterday": datestr = " previous" prevdate = 86400 datelimit = 86400 nick = irssi.active_server().nick current = time.localtime() fro = int(time.mktime(current) - current.tm_hour*3600 - current.tm_min*60 - current.tm_sec) if prevdate: fro -= prevdate to = None if datelimit: to = fro + datelimit sock = open(statfile) lines = [] for i in sock.readlines(): items = i.strip().split(' ') if int(items[0]) > fro and items[2] not in ignore and items[2][0] == "#": if to: if int(items[0]) < to: lines.append(items) else: lines.append(items) sock.close() total = len(lines) chans = {} for i in lines: chan = i[2] if chan not in chans.keys(): chans[chan] = 1 else: chans[chan] += 1 sorted = [] for k, v in chans.items(): sorted.append([k, v]) sorted.sort(chancmp, reverse=True) if nick[-1] == 's': s = "'" else: s = "'s" dstr = "%s%s day: %s" % (s, datestr, " ".join(["%s [%sm]" % (i, j) for i, j in sorted])) if witem: witem.command("/me %s" % dstr) else: print "* %s %s" % (nick, dstr) def timer(): win = irssi.active_win() if win.active and win.active_server: sock = open(statfile, "a") sock.write("%s%s %s\n" % (int(time.time()), win.active_server.tag, win.active.name)) sock.close() irssi.timeout_add(1000*60, timer) timer() irssi.command_bind('chanstat', cmd_chanstat)
albertjiang/gambit_agg_old
refs/heads/master_gamerep_agg
contrib/scripts/build/minint.py
2
# # A Python script to test the Gambit Python wrapper by building # the minimum-unique-integer game # import gambit, sys K = int(sys.argv[2]) N = int(sys.argv[1]) nfg = gambit.NewTable([K for i in range(N)]) # Pre-create outcomes. Outcome 'i' is the outcome where player 'i' wins. for pl in xrange(1, nfg.NumPlayers() + 1): outcome = nfg.NewOutcome() outcome.SetLabel('%d wins' % pl) outcome.SetPayoff(pl, "1") for profile in gambit.StrategyIterator(gambit.StrategySupport(nfg)): choices = [ profile.GetStrategy(pl).GetNumber() for pl in range(1, nfg.NumPlayers() + 1) ] for ch in range(1, K+1): if len([x for x in choices if x == ch]) == 1: # We have a winner profile.SetOutcome(nfg.GetOutcome(choices.index(ch) + 1)) break # If we don't have a winner, leave outcome null, payoffs zero print nfg.AsNfgFile()
CMPUT404W16/social-dist
refs/heads/master
fbook/api/postAPI.py
1
from .api import api from ..db import db from .. models import Post, User, Comment, RemoteUser, Image_Posts from flask_restful import Resource, reqparse from flask.ext.login import current_user from flask import request, abort from bauth import auth from apiHelper import ApiHelper import json import dateutil.parser import datetime helper = ApiHelper() class BasePostAPI(Resource): decorators = [auth.login_required] BASE_URL = "" API_URL = "" def generate_post_response(self, page=0, page_size=50): response = {"query": "posts", "size": page_size} posts = self.posts.order_by(Post.timestamp.desc()).paginate(page+1, page_size) response['count'] = posts.total total_page = posts.total / page_size # check whether the page is the first page if page != 0: response['previous'] = self.API_URL + "?page=%s&size=%s" % (page-1, page_size) if page != total_page-1 and total_page > 1: response['next'] = self.API_URL + "?page=%s&size=%s" % (page+1, page_size) response['posts'] = [self.generate_post(x) for x in posts.items] return response def generate_author(self, id): user = User.query.filter_by(id=id).first() if user is None: user = RemoteUser.query.filter_by(id=id).first() if user is None: u = helper.get('author', {'author_id': id}) if len(u) > 0: u = u[0] author = { "id": u['id'], "host": u['host'], "displayname": u['displayname'], "url": "%s/author/%s" % (u['host'], u['id']), "github": u['github'] } remote = RemoteUser(id=u['id'], host=u['host'], username=u['displayname']) db.session.add(remote) db.session.commit() else: author = { 'id': id, 'host': 'Unknown', 'displayname': 'Unknown', 'url': 'Unknown', "github": 'Unknown' } else: author = { "id": user.id, "host": "http://" + user.host, "displayname": user.username, "url": "http://%s/author/%s" % (user.host, user.id), 'github': user.github } else: author = {"id": user.id, "host": "http://" + user.host, "displayname": user.username, "github": user.github, "url": "http://%s/author/%s" % (user.host, user.id)} return author def generate_post(self, cu): post = {"title": cu.title, "content": cu.body, "author": self.generate_author(cu.author_id), "published": cu.timestamp.isoformat(), "id": cu.id, "target": cu.target } query = Image_Posts.query.filter_by(post_id=cu.id).all() if len(query) > 0: image_id = query[0].__dict__['image_id'] post['image'] = 'http://floating-sands-69681.herokuapp.com/image/' + str(image_id) else: post['image'] = None if cu.privacy == 0: post['visibility'] = 'PUBLIC' elif cu.privacy == 1: post['visibility'] = 'PRIVATE' elif cu.privacy == 2: post['visibility'] = 'FRIENDS' elif cu.privacy == 3: post['visibility'] = 'SOMEONE' elif cu.privacy == 4: post['visibility'] = 'SERVERONLY' elif cu.privacy == 5: post['visibility'] = 'FOAF' else: post['visibility'] = 'PUBLIC' post["contentType"] = "text/x-markdown" if cu.markdown == "T" else "text/plain" # 5 comments per page. comments = cu.comments.paginate(1, 5) post["count"] = comments.total if comments.pages > 1: post["next"] = self.BASE_URL + "posts/%s/comments" % cu.id post["comments"] = [self.generate_comment(x) for x in comments.items] return post def generate_comment_response(self, cu, page=0, page_size=50): response = {"query": "comments", "size": page_size} comments = cu.order_by(Comment.timestamp.desc()).paginate(page+1, page_size) response['count'] = comments.total total_page = comments.total / page_size # check whether the page is the first page if page != 0: response['previous'] = self.API_URL + "?page=%s&size=%s" % (page-1, page_size) if page != total_page-1 and total_page > 1: response['next'] = self.API_URL + "?page=%s&size=%s" % (page+1, page_size) response['comments'] = [self.generate_comment(x) for x in comments.items] return response def generate_comment(self, cu): comments = {"comment": cu.body, "contentType": "text/plain", "id": cu.id, "published": cu.timestamp.isoformat(), "author": self.generate_author(cu.author_id), } return comments class PostAPI(BasePostAPI): def get(self, post_id=None): parser = reqparse.RequestParser() parser.add_argument('page', type=int, default=0) parser.add_argument('page_size', type=int, default=50) args = parser.parse_args() if post_id is None: self.posts = Post.query # self.posts = Post.query else: self.posts = Post.query.filter_by(id=post_id) # check post exist or not self.posts.first_or_404() return self.generate_post_response(args.page, args.page_size) def put(self, post_id): pass def post(self, post_id): parser = reqparse.RequestParser() parser.add_argument('page', type=int, default=0) parser.add_argument('page_size', type=int, default=50) args = parser.parse_args() self.posts = Post.query.filter_by(id=post_id) self.posts.first_or_404() def options(self): return {"Hello": "World"} class AuthorPost(BasePostAPI): def get(self, author_id=None): parser = reqparse.RequestParser() parser.add_argument('page', type=int, default=0) parser.add_argument('page_size', type=int, default=50) args = parser.parse_args() if author_id is None: # return all post visible to current authenticated user # self.posts = Post.query.filter_by(0) self.posts = Post.query else: # check user exist or not. user = User.query.filter_by(id=author_id).first_or_404() # return all post write by author_id # and visible to current authenticated user self.posts = Post.query.filter_by(author_id=author_id) return self.generate_post_response(args.page, args.page_size) class CommentAPI(BasePostAPI): def get(self, post_id): parser = reqparse.RequestParser() parser.add_argument('page', type=int, default=0) parser.add_argument('page_size', type=int, default=50) args = parser.parse_args() # check user exist or not. cu = Post.query.filter_by(id=post_id).first_or_404() return self.generate_comment_response(cu.comments, args.page, args.page_size) def post(self, post_id): post = Post.query.get_or_404(post_id) parser = reqparse.RequestParser() parser.add_argument("author", type=dict, required=True) parser.add_argument("comment", type=str, required=True) parser.add_argument("contentType", default='text/plain', type=str) parser.add_argument("published", default='text/plain', type=str) args = parser.parse_args() author_id = args.author.get("id", None) author_name = args.author.get("displayName", None) if author_id is None or author_name is None: abort(400, "Missing author_id or displayName.") # parse time try: date = dateutil.parser.parse(args.published) except ValueError: date = datetime.datetime.utcnow() comment = Comment(body=args.comment, post=post, timestamp=date, author_id=author_id, author=author_name) comment.set_id() db.session.add(comment) db.session.commit() return self.get(post_id) api.add_resource(PostAPI, '/api/posts', endpoint="public_post") api.add_resource(PostAPI, '/api/posts/') api.add_resource(PostAPI, '/api/posts/<string:post_id>', endpoint="post_id") api.add_resource(AuthorPost, '/api/author/posts', endpoint="author_post") api.add_resource(AuthorPost, '/api/author/<string:author_id>/posts', endpoint='author_post_id') api.add_resource(CommentAPI, '/api/posts/<string:post_id>/comments', endpoint="post_comments")
zhwa/thunder
refs/heads/master
thunder/rdds/fileio/writers.py
8
"""Classes that abstract writing to various types of filesystems. Currently two types of 'filesystem' are supported: * the local file system, via python's native file() objects * Amazon's S3 or Google Storage, using the boto library (only if boto is installed; boto is not a requirement) For each filesystem, three types of writer classes are provided: * parallel writers are intended to serve as a data sink at the end of a Spark workflow. They provide a `writerFcn(kv)` method, which is intended to be used inside a Spark foreach() call (for instance: myrdd.foreach(writer.writerFcn)). They expect to be given key, value pairs where the key is a filename (not including a directory component in the path), and the value is a string buffer. * file writers abstract across the supported filesystems, providing a common writeFile(buf) interface that writes the contents of buf to a file object specified at writer initialization. * collected file writers are intended to be used after a Spark collect() call. They provide similar functionality to the parallel writers, but operate only on the driver rather than distributed across all nodes. This is intended to make it easier to write the results of an analysis to the local filesystem, without requiring NFS or a similar distributed file system available on all worker nodes. """ import os import shutil import urllib import urlparse from thunder.rdds.fileio.readers import _BotoClient, getByScheme from thunder.utils.aws import S3ConnectionWithAnon _haveBoto = False try: import boto _haveBoto = True except ImportError: boto = None class LocalFSParallelWriter(object): def __init__(self, dataPath, overwrite=False, **kwargs): self._dataPath = dataPath # thanks stack overflow: # http://stackoverflow.com/questions/5977576/is-there-a-convenient-way-to-map-a-file-uri-to-os-path self._absPath = urllib.url2pathname(urlparse.urlparse(dataPath).path) self._overwrite = overwrite self._checked = False self._checkDirectory() def _checkDirectory(self): if not self._checked: if os.path.isfile(self._absPath): raise ValueError("LocalFSParallelWriter must be initialized with path to directory not file" + " in order to use writerFcn. Got: " + self._dataPath) if os.path.isdir(self._absPath): if self._overwrite: shutil.rmtree(self._absPath) else: raise ValueError("Directory %s already exists, and overwrite is false" % self._dataPath) os.mkdir(self._absPath) self._checked = True def writerFcn(self, kv): label, buf = kv with open(os.path.join(self._absPath, label), 'wb') as f: f.write(buf) class _BotoWriter(_BotoClient): def __init__(self, awsCredentialsOverride=None): super(_BotoWriter, self).__init__(awsCredentialsOverride=awsCredentialsOverride) self._storageScheme = None self._contextActive = False self._conn = None self._keyName = None self._bucket = None def activateContext(self, dataPath, isDirectory): """ Set up a boto connection. """ parsed = _BotoClient.parseQuery(dataPath) storageScheme = parsed[0] bucketName = parsed[1] keyName = parsed[2] if storageScheme == 's3' or storageScheme == 's3n': conn = S3ConnectionWithAnon(*self.awsCredentialsOverride.credentials) bucket = conn.get_bucket(bucketName) elif storageScheme == 'gs': conn = boto.storage_uri(bucketName, 'gs') bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + storageScheme) if isDirectory and (not keyName.endswith("/")): keyName += "/" self._storageScheme = storageScheme self._conn = conn self._keyName = keyName self._bucket = bucket self._contextActive = True @property def bucket(self): return self._bucket @property def keyName(self): return self._keyName @property def contextActive(self): return self._contextActive class BotoParallelWriter(_BotoWriter): def __init__(self, dataPath, overwrite=False, awsCredentialsOverride=None): super(BotoParallelWriter, self).__init__(awsCredentialsOverride=awsCredentialsOverride) self._dataPath = dataPath self._overwrite = overwrite def writerFcn(self, kv): if not self.contextActive: self.activateContext(self._dataPath, True) label, buf = kv self._bucket.new_key(self.keyName + label).set_contents_from_string(buf) class LocalFSFileWriter(object): def __init__(self, dataPath, filename, overwrite=False, **kwargs): self._dataPath = dataPath self._filename = filename self._absPath = os.path.join(urllib.url2pathname(urlparse.urlparse(dataPath).path), filename) self._overwrite = overwrite self._checked = False def _checkWriteFile(self): if not self._checked: if os.path.isdir(self._absPath): raise ValueError("LocalFSFileWriter must be initialized with path to file, not directory," + " in order to use writeFile. Got path: '%s', filename: '%s'" % (self._dataPath, self._filename)) if (not self._overwrite) and os.path.exists(self._absPath): raise ValueError("File %s already exists, and overwrite is false" % self._dataPath) self._checked = True def writeFile(self, buf): self._checkWriteFile() with open(os.path.join(self._absPath), 'wb') as f: f.write(buf) class BotoFileWriter(_BotoWriter): def __init__(self, dataPath, filename, overwrite=False, awsCredentialsOverride=None): super(BotoFileWriter, self).__init__(awsCredentialsOverride=awsCredentialsOverride) self._dataPath = dataPath self._filename = filename self._overwrite = overwrite def writeFile(self, buf): if not self.contextActive: self.activateContext(self._dataPath, True) self._bucket.new_key(self.keyName + self._filename).set_contents_from_string(buf) class LocalFSCollectedFileWriter(object): def __init__(self, dataPath, overwrite=False, **kwargs): self._dataPath = dataPath self._absPath = urllib.url2pathname(urlparse.urlparse(dataPath).path) self._overwrite = overwrite self._checked = False def _checkDirectory(self): # todo: this is duplicated code with LocalFSParallelWriter if not self._checked: if os.path.isfile(self._absPath): raise ValueError("LocalFSCollectedFileWriter must be initialized with path to directory not file" + " in order to use writerFcn. Got: " + self._dataPath) if os.path.isdir(self._absPath): if self._overwrite: shutil.rmtree(self._absPath) else: raise ValueError("Directory %s already exists, and overwrite is false" % self._dataPath) os.mkdir(self._absPath) # will throw error if is already a file self._checked = True def writeCollectedFiles(self, labelBufSequence): self._checkDirectory() for filename, buf in labelBufSequence: absPath = os.path.join(self._absPath, filename) with open(absPath, 'wb') as f: f.write(buf) class BotoCollectedFileWriter(_BotoWriter): # todo: needs to check before writing if overwrite is True def __init__(self, dataPath, overwrite=False, awsCredentialsOverride=None): super(BotoCollectedFileWriter, self).__init__(awsCredentialsOverride=awsCredentialsOverride) self._dataPath = dataPath self._overwrite = overwrite def writeCollectedFiles(self, labelBufSequence): if not self.contextActive: self.activateContext(self._dataPath, True) for filename, buf in labelBufSequence: self._bucket.new_key(self.keyName + filename).set_contents_from_string(buf) SCHEMAS_TO_PARALLELWRITERS = { '': LocalFSParallelWriter, 'file': LocalFSParallelWriter, 'gs': BotoParallelWriter, 's3': BotoParallelWriter, 's3n': BotoParallelWriter, 'hdfs': None, 'http': None, 'https': None, 'ftp': None } SCHEMAS_TO_FILEWRITERS = { '': LocalFSFileWriter, 'file': LocalFSFileWriter, 'gs': BotoFileWriter, 's3': BotoFileWriter, 's3n': BotoFileWriter, 'hdfs': None, 'http': None, 'https': None, 'ftp': None } SCHEMAS_TO_COLLECTEDFILEWRITERS = { '': LocalFSCollectedFileWriter, 'file': LocalFSCollectedFileWriter, 'gs': BotoCollectedFileWriter, 's3': BotoCollectedFileWriter, 's3n': BotoCollectedFileWriter, 'hdfs': None, 'http': None, 'https': None, 'ftp': None } def getParallelWriterForPath(dataPath): """Returns the class of a parallel file writer suitable for the scheme used by `datapath`. The resulting class object must still be instantiated in order to get a usable instance of the class. Throws NotImplementedError if the requested scheme is explicitly not supported (e.g. "ftp://"). Returns LocalFSParallelWriter if scheme is absent or not recognized. """ return getByScheme(dataPath, SCHEMAS_TO_PARALLELWRITERS, LocalFSParallelWriter) def getFileWriterForPath(dataPath): """Returns the class of a file writer suitable for the scheme used by `datapath`. The resulting class object must still be instantiated in order to get a usable instance of the class. Throws NotImplementedError if the requested scheme is explicitly not supported (e.g. "ftp://"). Returns LocalFSFileWriter if scheme is absent or not recognized. """ return getByScheme(dataPath, SCHEMAS_TO_FILEWRITERS, LocalFSFileWriter) def getCollectedFileWriterForPath(dataPath): """Returns the class of a collected file writer suitable for the scheme used by `datapath`. The resulting class object must still be instantiated in order to get a usable instance of the class. Throws NotImplementedError if the requested scheme is explicitly not supported (e.g. "ftp://"). Returns LocalFSCollectedFileWriter if scheme is absent or not recognized. """ return getByScheme(dataPath, SCHEMAS_TO_COLLECTEDFILEWRITERS, LocalFSCollectedFileWriter)
40223246/w16b_test
refs/heads/master
static/Brython3.1.3-20150514-095342/Lib/decimal.py
623
# Copyright (c) 2004 Python Software Foundation. # All rights reserved. # Written by Eric Price <eprice at tjhsst.edu> # and Facundo Batista <facundo at taniquetil.com.ar> # and Raymond Hettinger <python at rcn.com> # and Aahz <aahz at pobox.com> # and Tim Peters # This module should be kept in sync with the latest updates of the # IBM specification as it evolves. Those updates will be treated # as bug fixes (deviation from the spec is a compatibility, usability # bug) and will be backported. At this point the spec is stabilizing # and the updates are becoming fewer, smaller, and less significant. """ This is an implementation of decimal floating point arithmetic based on the General Decimal Arithmetic Specification: http://speleotrove.com/decimal/decarith.html and IEEE standard 854-1987: http://en.wikipedia.org/wiki/IEEE_854-1987 Decimal floating point has finite precision with arbitrarily large bounds. The purpose of this module is to support arithmetic using familiar "schoolhouse" rules and to avoid some of the tricky representation issues associated with binary floating point. The package is especially useful for financial applications or for contexts where users have expectations that are at odds with binary floating point (for instance, in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected Decimal('0.00')). Here are some examples of using the decimal module: >>> from decimal import * >>> setcontext(ExtendedContext) >>> Decimal(0) Decimal('0') >>> Decimal('1') Decimal('1') >>> Decimal('-.0123') Decimal('-0.0123') >>> Decimal(123456) Decimal('123456') >>> Decimal('123.45e12345678') Decimal('1.2345E+12345680') >>> Decimal('1.33') + Decimal('1.27') Decimal('2.60') >>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41') Decimal('-2.20') >>> dig = Decimal(1) >>> print(dig / Decimal(3)) 0.333333333 >>> getcontext().prec = 18 >>> print(dig / Decimal(3)) 0.333333333333333333 >>> print(dig.sqrt()) 1 >>> print(Decimal(3).sqrt()) 1.73205080756887729 >>> print(Decimal(3) ** 123) 4.85192780976896427E+58 >>> inf = Decimal(1) / Decimal(0) >>> print(inf) Infinity >>> neginf = Decimal(-1) / Decimal(0) >>> print(neginf) -Infinity >>> print(neginf + inf) NaN >>> print(neginf * inf) -Infinity >>> print(dig / 0) Infinity >>> getcontext().traps[DivisionByZero] = 1 >>> print(dig / 0) Traceback (most recent call last): ... ... ... decimal.DivisionByZero: x / 0 >>> c = Context() >>> c.traps[InvalidOperation] = 0 >>> print(c.flags[InvalidOperation]) 0 >>> c.divide(Decimal(0), Decimal(0)) Decimal('NaN') >>> c.traps[InvalidOperation] = 1 >>> print(c.flags[InvalidOperation]) 1 >>> c.flags[InvalidOperation] = 0 >>> print(c.flags[InvalidOperation]) 0 >>> print(c.divide(Decimal(0), Decimal(0))) Traceback (most recent call last): ... ... ... decimal.InvalidOperation: 0 / 0 >>> print(c.flags[InvalidOperation]) 1 >>> c.flags[InvalidOperation] = 0 >>> c.traps[InvalidOperation] = 0 >>> print(c.divide(Decimal(0), Decimal(0))) NaN >>> print(c.flags[InvalidOperation]) 1 >>> """ __all__ = [ # Two major classes 'Decimal', 'Context', # Contexts 'DefaultContext', 'BasicContext', 'ExtendedContext', # Exceptions 'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero', 'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow', 'FloatOperation', # Constants for use in setting up contexts 'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING', 'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP', # Functions for manipulating contexts 'setcontext', 'getcontext', 'localcontext', # Limits for the C version for compatibility 'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY', # C version: compile time choice that enables the thread local context 'HAVE_THREADS' ] __version__ = '1.70' # Highest version of the spec this complies with # See http://speleotrove.com/decimal/ import copy as _copy import math as _math import numbers as _numbers import sys try: from collections import namedtuple as _namedtuple DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent') except ImportError: DecimalTuple = lambda *args: args # Rounding ROUND_DOWN = 'ROUND_DOWN' ROUND_HALF_UP = 'ROUND_HALF_UP' ROUND_HALF_EVEN = 'ROUND_HALF_EVEN' ROUND_CEILING = 'ROUND_CEILING' ROUND_FLOOR = 'ROUND_FLOOR' ROUND_UP = 'ROUND_UP' ROUND_HALF_DOWN = 'ROUND_HALF_DOWN' ROUND_05UP = 'ROUND_05UP' # Compatibility with the C version HAVE_THREADS = True if sys.maxsize == 2**63-1: MAX_PREC = 999999999999999999 MAX_EMAX = 999999999999999999 MIN_EMIN = -999999999999999999 else: MAX_PREC = 425000000 MAX_EMAX = 425000000 MIN_EMIN = -425000000 MIN_ETINY = MIN_EMIN - (MAX_PREC-1) # Errors class DecimalException(ArithmeticError): """Base exception class. Used exceptions derive from this. If an exception derives from another exception besides this (such as Underflow (Inexact, Rounded, Subnormal) that indicates that it is only called if the others are present. This isn't actually used for anything, though. handle -- Called when context._raise_error is called and the trap_enabler is not set. First argument is self, second is the context. More arguments can be given, those being after the explanation in _raise_error (For example, context._raise_error(NewError, '(-x)!', self._sign) would call NewError().handle(context, self._sign).) To define a new exception, it should be sufficient to have it derive from DecimalException. """ def handle(self, context, *args): pass class Clamped(DecimalException): """Exponent of a 0 changed to fit bounds. This occurs and signals clamped if the exponent of a result has been altered in order to fit the constraints of a specific concrete representation. This may occur when the exponent of a zero result would be outside the bounds of a representation, or when a large normal number would have an encoded exponent that cannot be represented. In this latter case, the exponent is reduced to fit and the corresponding number of zero digits are appended to the coefficient ("fold-down"). """ #brython fixme pass class InvalidOperation(DecimalException): """An invalid operation was performed. Various bad things cause this: Something creates a signaling NaN -INF + INF 0 * (+-)INF (+-)INF / (+-)INF x % 0 (+-)INF % x x._rescale( non-integer ) sqrt(-x) , x > 0 0 ** 0 x ** (non-integer) x ** (+-)INF An operand is invalid The result of the operation after these is a quiet positive NaN, except when the cause is a signaling NaN, in which case the result is also a quiet NaN, but with the original sign, and an optional diagnostic information. """ def handle(self, context, *args): if args: ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True) return ans._fix_nan(context) return _NaN class ConversionSyntax(InvalidOperation): """Trying to convert badly formed string. This occurs and signals invalid-operation if an string is being converted to a number and it does not conform to the numeric string syntax. The result is [0,qNaN]. """ def handle(self, context, *args): return _NaN class DivisionByZero(DecimalException, ZeroDivisionError): """Division by 0. This occurs and signals division-by-zero if division of a finite number by zero was attempted (during a divide-integer or divide operation, or a power operation with negative right-hand operand), and the dividend was not zero. The result of the operation is [sign,inf], where sign is the exclusive or of the signs of the operands for divide, or is 1 for an odd power of -0, for power. """ def handle(self, context, sign, *args): return _SignedInfinity[sign] class DivisionImpossible(InvalidOperation): """Cannot perform the division adequately. This occurs and signals invalid-operation if the integer result of a divide-integer or remainder operation had too many digits (would be longer than precision). The result is [0,qNaN]. """ def handle(self, context, *args): return _NaN class DivisionUndefined(InvalidOperation, ZeroDivisionError): """Undefined result of division. This occurs and signals invalid-operation if division by zero was attempted (during a divide-integer, divide, or remainder operation), and the dividend is also zero. The result is [0,qNaN]. """ def handle(self, context, *args): return _NaN class Inexact(DecimalException): """Had to round, losing information. This occurs and signals inexact whenever the result of an operation is not exact (that is, it needed to be rounded and any discarded digits were non-zero), or if an overflow or underflow condition occurs. The result in all cases is unchanged. The inexact signal may be tested (or trapped) to determine if a given operation (or sequence of operations) was inexact. """ #brython fix me pass class InvalidContext(InvalidOperation): """Invalid context. Unknown rounding, for example. This occurs and signals invalid-operation if an invalid context was detected during an operation. This can occur if contexts are not checked on creation and either the precision exceeds the capability of the underlying concrete representation or an unknown or unsupported rounding was specified. These aspects of the context need only be checked when the values are required to be used. The result is [0,qNaN]. """ def handle(self, context, *args): return _NaN class Rounded(DecimalException): """Number got rounded (not necessarily changed during rounding). This occurs and signals rounded whenever the result of an operation is rounded (that is, some zero or non-zero digits were discarded from the coefficient), or if an overflow or underflow condition occurs. The result in all cases is unchanged. The rounded signal may be tested (or trapped) to determine if a given operation (or sequence of operations) caused a loss of precision. """ #brython fix me pass class Subnormal(DecimalException): """Exponent < Emin before rounding. This occurs and signals subnormal whenever the result of a conversion or operation is subnormal (that is, its adjusted exponent is less than Emin, before any rounding). The result in all cases is unchanged. The subnormal signal may be tested (or trapped) to determine if a given or operation (or sequence of operations) yielded a subnormal result. """ #brython fix me pass class Overflow(Inexact, Rounded): """Numerical overflow. This occurs and signals overflow if the adjusted exponent of a result (from a conversion or from an operation that is not an attempt to divide by zero), after rounding, would be greater than the largest value that can be handled by the implementation (the value Emax). The result depends on the rounding mode: For round-half-up and round-half-even (and for round-half-down and round-up, if implemented), the result of the operation is [sign,inf], where sign is the sign of the intermediate result. For round-down, the result is the largest finite number that can be represented in the current precision, with the sign of the intermediate result. For round-ceiling, the result is the same as for round-down if the sign of the intermediate result is 1, or is [0,inf] otherwise. For round-floor, the result is the same as for round-down if the sign of the intermediate result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded will also be raised. """ def handle(self, context, sign, *args): if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_HALF_DOWN, ROUND_UP): return _SignedInfinity[sign] if sign == 0: if context.rounding == ROUND_CEILING: return _SignedInfinity[sign] return _dec_from_triple(sign, '9'*context.prec, context.Emax-context.prec+1) if sign == 1: if context.rounding == ROUND_FLOOR: return _SignedInfinity[sign] return _dec_from_triple(sign, '9'*context.prec, context.Emax-context.prec+1) class Underflow(Inexact, Rounded, Subnormal): """Numerical underflow with result rounded to 0. This occurs and signals underflow if a result is inexact and the adjusted exponent of the result would be smaller (more negative) than the smallest value that can be handled by the implementation (the value Emin). That is, the result is both inexact and subnormal. The result after an underflow will be a subnormal number rounded, if necessary, so that its exponent is not less than Etiny. This may result in 0 with the sign of the intermediate result and an exponent of Etiny. In all cases, Inexact, Rounded, and Subnormal will also be raised. """ #brython fix me pass class FloatOperation(DecimalException, TypeError): """Enable stricter semantics for mixing floats and Decimals. If the signal is not trapped (default), mixing floats and Decimals is permitted in the Decimal() constructor, context.create_decimal() and all comparison operators. Both conversion and comparisons are exact. Any occurrence of a mixed operation is silently recorded by setting FloatOperation in the context flags. Explicit conversions with Decimal.from_float() or context.create_decimal_from_float() do not set the flag. Otherwise (the signal is trapped), only equality comparisons and explicit conversions are silent. All other mixed operations raise FloatOperation. """ #brython fix me pass # List of public traps and flags _signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded, Underflow, InvalidOperation, Subnormal, FloatOperation] # Map conditions (per the spec) to signals _condition_map = {ConversionSyntax:InvalidOperation, DivisionImpossible:InvalidOperation, DivisionUndefined:InvalidOperation, InvalidContext:InvalidOperation} # Valid rounding modes _rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING, ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP) ##### Context Functions ################################################## # The getcontext() and setcontext() function manage access to a thread-local # current context. Py2.4 offers direct support for thread locals. If that # is not available, use threading.current_thread() which is slower but will # work for older Pythons. If threads are not part of the build, create a # mock threading object with threading.local() returning the module namespace. try: import threading except ImportError: # Python was compiled without threads; create a mock object instead class MockThreading(object): def local(self, sys=sys): return sys.modules[__name__] threading = MockThreading() del MockThreading try: threading.local except AttributeError: # To fix reloading, force it to create a new context # Old contexts have different exceptions in their dicts, making problems. if hasattr(threading.current_thread(), '__decimal_context__'): del threading.current_thread().__decimal_context__ def setcontext(context): """Set this thread's context to context.""" if context in (DefaultContext, BasicContext, ExtendedContext): context = context.copy() context.clear_flags() threading.current_thread().__decimal_context__ = context def getcontext(): """Returns this thread's context. If this thread does not yet have a context, returns a new context and sets this thread's context. New contexts are copies of DefaultContext. """ try: return threading.current_thread().__decimal_context__ except AttributeError: context = Context() threading.current_thread().__decimal_context__ = context return context else: local = threading.local() if hasattr(local, '__decimal_context__'): del local.__decimal_context__ def getcontext(_local=local): """Returns this thread's context. If this thread does not yet have a context, returns a new context and sets this thread's context. New contexts are copies of DefaultContext. """ try: return _local.__decimal_context__ except AttributeError: context = Context() _local.__decimal_context__ = context return context def setcontext(context, _local=local): """Set this thread's context to context.""" if context in (DefaultContext, BasicContext, ExtendedContext): context = context.copy() context.clear_flags() _local.__decimal_context__ = context del threading, local # Don't contaminate the namespace def localcontext(ctx=None): """Return a context manager for a copy of the supplied context Uses a copy of the current context if no context is specified The returned context manager creates a local decimal context in a with statement: def sin(x): with localcontext() as ctx: ctx.prec += 2 # Rest of sin calculation algorithm # uses a precision 2 greater than normal return +s # Convert result to normal precision def sin(x): with localcontext(ExtendedContext): # Rest of sin calculation algorithm # uses the Extended Context from the # General Decimal Arithmetic Specification return +s # Convert result to normal context >>> setcontext(DefaultContext) >>> print(getcontext().prec) 28 >>> with localcontext(): ... ctx = getcontext() ... ctx.prec += 2 ... print(ctx.prec) ... 30 >>> with localcontext(ExtendedContext): ... print(getcontext().prec) ... 9 >>> print(getcontext().prec) 28 """ if ctx is None: ctx = getcontext() return _ContextManager(ctx) ##### Decimal class ####################################################### # Do not subclass Decimal from numbers.Real and do not register it as such # (because Decimals are not interoperable with floats). See the notes in # numbers.py for more detail. class Decimal(object): """Floating point class for decimal arithmetic.""" __slots__ = ('_exp','_int','_sign', '_is_special') # Generally, the value of the Decimal instance is given by # (-1)**_sign * _int * 10**_exp # Special values are signified by _is_special == True # We're immutable, so use __new__ not __init__ def __new__(cls, value="0", context=None): """Create a decimal point instance. >>> Decimal('3.14') # string input Decimal('3.14') >>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent) Decimal('3.14') >>> Decimal(314) # int Decimal('314') >>> Decimal(Decimal(314)) # another decimal instance Decimal('314') >>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay Decimal('3.14') """ # Note that the coefficient, self._int, is actually stored as # a string rather than as a tuple of digits. This speeds up # the "digits to integer" and "integer to digits" conversions # that are used in almost every arithmetic operation on # Decimals. This is an internal detail: the as_tuple function # and the Decimal constructor still deal with tuples of # digits. self = object.__new__(cls) # From a string # REs insist on real strings, so we can too. if isinstance(value, str): value=value.strip().lower() if value.startswith("-"): self._sign = 1 value=value[1:] else: self._sign = 0 if value in ('', 'nan'): self._is_special = True self._int = '' #if m.group('signal'): #figure out what a signaling NaN is later # self._exp = 'N' #else: # self._exp = 'n' self._exp='n' return self if value in ('inf', 'infinity'): self._int = '0' self._exp = 'F' self._is_special = True return self import _jsre as re _m=re.match("^\d*\.?\d*(e\+?\d*)?$", value) if not _m: self._is_special = True self._int = '' self._exp='n' return self if '.' in value: intpart, fracpart=value.split('.') if 'e' in fracpart: fracpart, exp=fracpart.split('e') exp=int(exp) else: exp=0 #self._int = str(int(intpart+fracpart)) self._int = intpart+fracpart self._exp = exp - len(fracpart) self._is_special = False return self else: #is this a pure int? self._is_special = False if 'e' in value: self._int, _exp=value.split('e') self._exp=int(_exp) #print(self._int, self._exp) else: self._int = value self._exp = 0 return self #m = _parser(value.strip()) #if m is None: if context is None: context = getcontext() return context._raise_error(ConversionSyntax, "Invalid literal for Decimal: %r" % value) #if m.group('sign') == "-": # self._sign = 1 #else: # self._sign = 0 #intpart = m.group('int') #if intpart is not None: # # finite number # fracpart = m.group('frac') or '' # exp = int(m.group('exp') or '0') # self._int = str(int(intpart+fracpart)) # self._exp = exp - len(fracpart) # self._is_special = False #else: # diag = m.group('diag') # if diag is not None: # # NaN # self._int = str(int(diag or '0')).lstrip('0') # if m.group('signal'): # self._exp = 'N' # else: # self._exp = 'n' # else: # # infinity # self._int = '0' # self._exp = 'F' # self._is_special = True #return self # From an integer if isinstance(value, int): if value >= 0: self._sign = 0 else: self._sign = 1 self._exp = 0 self._int = str(abs(value)) self._is_special = False return self # From another decimal if isinstance(value, Decimal): self._exp = value._exp self._sign = value._sign self._int = value._int self._is_special = value._is_special return self # From an internal working value if isinstance(value, _WorkRep): self._sign = value.sign self._int = str(value.int) self._exp = int(value.exp) self._is_special = False return self # tuple/list conversion (possibly from as_tuple()) if isinstance(value, (list,tuple)): if len(value) != 3: raise ValueError('Invalid tuple size in creation of Decimal ' 'from list or tuple. The list or tuple ' 'should have exactly three elements.') # process sign. The isinstance test rejects floats if not (isinstance(value[0], int) and value[0] in (0,1)): raise ValueError("Invalid sign. The first value in the tuple " "should be an integer; either 0 for a " "positive number or 1 for a negative number.") self._sign = value[0] if value[2] == 'F': # infinity: value[1] is ignored self._int = '0' self._exp = value[2] self._is_special = True else: # process and validate the digits in value[1] digits = [] for digit in value[1]: if isinstance(digit, int) and 0 <= digit <= 9: # skip leading zeros if digits or digit != 0: digits.append(digit) else: raise ValueError("The second value in the tuple must " "be composed of integers in the range " "0 through 9.") if value[2] in ('n', 'N'): # NaN: digits form the diagnostic self._int = ''.join(map(str, digits)) self._exp = value[2] self._is_special = True elif isinstance(value[2], int): # finite number: digits give the coefficient self._int = ''.join(map(str, digits or [0])) self._exp = value[2] self._is_special = False else: raise ValueError("The third value in the tuple must " "be an integer, or one of the " "strings 'F', 'n', 'N'.") return self if isinstance(value, float): if context is None: context = getcontext() context._raise_error(FloatOperation, "strict semantics for mixing floats and Decimals are " "enabled") value = Decimal.from_float(value) self._exp = value._exp self._sign = value._sign self._int = value._int self._is_special = value._is_special return self raise TypeError("Cannot convert %r to Decimal" % value) # @classmethod, but @decorator is not valid Python 2.3 syntax, so # don't use it (see notes on Py2.3 compatibility at top of file) def from_float(cls, f): """Converts a float to a decimal number, exactly. Note that Decimal.from_float(0.1) is not the same as Decimal('0.1'). Since 0.1 is not exactly representable in binary floating point, the value is stored as the nearest representable value which is 0x1.999999999999ap-4. The exact equivalent of the value in decimal is 0.1000000000000000055511151231257827021181583404541015625. >>> Decimal.from_float(0.1) Decimal('0.1000000000000000055511151231257827021181583404541015625') >>> Decimal.from_float(float('nan')) Decimal('NaN') >>> Decimal.from_float(float('inf')) Decimal('Infinity') >>> Decimal.from_float(-float('inf')) Decimal('-Infinity') >>> Decimal.from_float(-0.0) Decimal('-0') """ if isinstance(f, int): # handle integer inputs return cls(f) if not isinstance(f, float): raise TypeError("argument must be int or float.") if _math.isinf(f) or _math.isnan(f): return cls(repr(f)) if _math.copysign(1.0, f) == 1.0: sign = 0 else: sign = 1 n, d = abs(f).as_integer_ratio() k = d.bit_length() - 1 result = _dec_from_triple(sign, str(n*5**k), -k) if cls is Decimal: return result else: return cls(result) from_float = classmethod(from_float) def _isnan(self): """Returns whether the number is not actually one. 0 if a number 1 if NaN 2 if sNaN """ if self._is_special: exp = self._exp if exp == 'n': return 1 elif exp == 'N': return 2 return 0 def _isinfinity(self): """Returns whether the number is infinite 0 if finite or not a number 1 if +INF -1 if -INF """ if self._exp == 'F': if self._sign: return -1 return 1 return 0 def _check_nans(self, other=None, context=None): """Returns whether the number is not actually one. if self, other are sNaN, signal if self, other are NaN return nan return 0 Done before operations. """ self_is_nan = self._isnan() if other is None: other_is_nan = False else: other_is_nan = other._isnan() if self_is_nan or other_is_nan: if context is None: context = getcontext() if self_is_nan == 2: return context._raise_error(InvalidOperation, 'sNaN', self) if other_is_nan == 2: return context._raise_error(InvalidOperation, 'sNaN', other) if self_is_nan: return self._fix_nan(context) return other._fix_nan(context) return 0 def _compare_check_nans(self, other, context): """Version of _check_nans used for the signaling comparisons compare_signal, __le__, __lt__, __ge__, __gt__. Signal InvalidOperation if either self or other is a (quiet or signaling) NaN. Signaling NaNs take precedence over quiet NaNs. Return 0 if neither operand is a NaN. """ if context is None: context = getcontext() if self._is_special or other._is_special: if self.is_snan(): return context._raise_error(InvalidOperation, 'comparison involving sNaN', self) elif other.is_snan(): return context._raise_error(InvalidOperation, 'comparison involving sNaN', other) elif self.is_qnan(): return context._raise_error(InvalidOperation, 'comparison involving NaN', self) elif other.is_qnan(): return context._raise_error(InvalidOperation, 'comparison involving NaN', other) return 0 def __bool__(self): """Return True if self is nonzero; otherwise return False. NaNs and infinities are considered nonzero. """ return self._is_special or self._int != '0' def _cmp(self, other): """Compare the two non-NaN decimal instances self and other. Returns -1 if self < other, 0 if self == other and 1 if self > other. This routine is for internal use only.""" if self._is_special or other._is_special: self_inf = self._isinfinity() other_inf = other._isinfinity() if self_inf == other_inf: return 0 elif self_inf < other_inf: return -1 else: return 1 # check for zeros; Decimal('0') == Decimal('-0') if not self: if not other: return 0 else: return -((-1)**other._sign) if not other: return (-1)**self._sign # If different signs, neg one is less if other._sign < self._sign: return -1 if self._sign < other._sign: return 1 self_adjusted = self.adjusted() other_adjusted = other.adjusted() if self_adjusted == other_adjusted: self_padded = self._int + '0'*(self._exp - other._exp) other_padded = other._int + '0'*(other._exp - self._exp) if self_padded == other_padded: return 0 elif self_padded < other_padded: return -(-1)**self._sign else: return (-1)**self._sign elif self_adjusted > other_adjusted: return (-1)**self._sign else: # self_adjusted < other_adjusted return -((-1)**self._sign) # Note: The Decimal standard doesn't cover rich comparisons for # Decimals. In particular, the specification is silent on the # subject of what should happen for a comparison involving a NaN. # We take the following approach: # # == comparisons involving a quiet NaN always return False # != comparisons involving a quiet NaN always return True # == or != comparisons involving a signaling NaN signal # InvalidOperation, and return False or True as above if the # InvalidOperation is not trapped. # <, >, <= and >= comparisons involving a (quiet or signaling) # NaN signal InvalidOperation, and return False if the # InvalidOperation is not trapped. # # This behavior is designed to conform as closely as possible to # that specified by IEEE 754. def __eq__(self, other, context=None): self, other = _convert_for_comparison(self, other, equality_op=True) if other is NotImplemented: return other if self._check_nans(other, context): return False return self._cmp(other) == 0 def __ne__(self, other, context=None): self, other = _convert_for_comparison(self, other, equality_op=True) if other is NotImplemented: return other if self._check_nans(other, context): return True return self._cmp(other) != 0 def __lt__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other ans = self._compare_check_nans(other, context) if ans: return False return self._cmp(other) < 0 def __le__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other ans = self._compare_check_nans(other, context) if ans: return False return self._cmp(other) <= 0 def __gt__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other ans = self._compare_check_nans(other, context) if ans: return False return self._cmp(other) > 0 def __ge__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other ans = self._compare_check_nans(other, context) if ans: return False return self._cmp(other) >= 0 def compare(self, other, context=None): """Compares one to another. -1 => a < b 0 => a = b 1 => a > b NaN => one is NaN Like __cmp__, but returns Decimal instances. """ other = _convert_other(other, raiseit=True) # Compare(NaN, NaN) = NaN if (self._is_special or other and other._is_special): ans = self._check_nans(other, context) if ans: return ans return Decimal(self._cmp(other)) def __hash__(self): """x.__hash__() <==> hash(x)""" # In order to make sure that the hash of a Decimal instance # agrees with the hash of a numerically equal integer, float # or Fraction, we follow the rules for numeric hashes outlined # in the documentation. (See library docs, 'Built-in Types'). if self._is_special: if self.is_snan(): raise TypeError('Cannot hash a signaling NaN value.') elif self.is_nan(): return _PyHASH_NAN else: if self._sign: return -_PyHASH_INF else: return _PyHASH_INF if self._exp >= 0: exp_hash = pow(10, self._exp, _PyHASH_MODULUS) else: exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS) hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS ans = hash_ if self >= 0 else -hash_ return -2 if ans == -1 else ans def as_tuple(self): """Represents the number as a triple tuple. To show the internals exactly as they are. """ return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp) def __repr__(self): """Represents the number as an instance of Decimal.""" # Invariant: eval(repr(d)) == d return "Decimal('%s')" % str(self) def __str__(self, eng=False, context=None): """Return string representation of the number in scientific notation. Captures all of the information in the underlying representation. """ sign = ['', '-'][self._sign] if self._is_special: if self._exp == 'F': return sign + 'Infinity' elif self._exp == 'n': return sign + 'NaN' + self._int else: # self._exp == 'N' return sign + 'sNaN' + self._int # number of digits of self._int to left of decimal point leftdigits = self._exp + len(self._int) # dotplace is number of digits of self._int to the left of the # decimal point in the mantissa of the output string (that is, # after adjusting the exponent) if self._exp <= 0 and leftdigits > -6: # no exponent required dotplace = leftdigits elif not eng: # usual scientific notation: 1 digit on left of the point dotplace = 1 elif self._int == '0': # engineering notation, zero dotplace = (leftdigits + 1) % 3 - 1 else: # engineering notation, nonzero dotplace = (leftdigits - 1) % 3 + 1 if dotplace <= 0: intpart = '0' fracpart = '.' + '0'*(-dotplace) + self._int elif dotplace >= len(self._int): intpart = self._int+'0'*(dotplace-len(self._int)) fracpart = '' else: intpart = self._int[:dotplace] fracpart = '.' + self._int[dotplace:] if leftdigits == dotplace: exp = '' else: if context is None: context = getcontext() exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace) return sign + intpart + fracpart + exp def to_eng_string(self, context=None): """Convert to engineering-type string. Engineering notation has an exponent which is a multiple of 3, so there are up to 3 digits left of the decimal place. Same rules for when in exponential and when as a value as in __str__. """ return self.__str__(eng=True, context=context) def __neg__(self, context=None): """Returns a copy with the sign switched. Rounds, if it has reason. """ if self._is_special: ans = self._check_nans(context=context) if ans: return ans if context is None: context = getcontext() if not self and context.rounding != ROUND_FLOOR: # -Decimal('0') is Decimal('0'), not Decimal('-0'), except # in ROUND_FLOOR rounding mode. ans = self.copy_abs() else: ans = self.copy_negate() return ans._fix(context) def __pos__(self, context=None): """Returns a copy, unless it is a sNaN. Rounds the number (if more then precision digits) """ if self._is_special: ans = self._check_nans(context=context) if ans: return ans if context is None: context = getcontext() if not self and context.rounding != ROUND_FLOOR: # + (-0) = 0, except in ROUND_FLOOR rounding mode. ans = self.copy_abs() else: ans = Decimal(self) return ans._fix(context) def __abs__(self, round=True, context=None): """Returns the absolute value of self. If the keyword argument 'round' is false, do not round. The expression self.__abs__(round=False) is equivalent to self.copy_abs(). """ if not round: return self.copy_abs() if self._is_special: ans = self._check_nans(context=context) if ans: return ans if self._sign: ans = self.__neg__(context=context) else: ans = self.__pos__(context=context) return ans def __add__(self, other, context=None): """Returns self + other. -INF + INF (or the reverse) cause InvalidOperation errors. """ other = _convert_other(other) if other is NotImplemented: return other if context is None: context = getcontext() if self._is_special or other._is_special: ans = self._check_nans(other, context) if ans: return ans if self._isinfinity(): # If both INF, same sign => same as both, opposite => error. if self._sign != other._sign and other._isinfinity(): return context._raise_error(InvalidOperation, '-INF + INF') return Decimal(self) if other._isinfinity(): return Decimal(other) # Can't both be infinity here exp = min(self._exp, other._exp) negativezero = 0 if context.rounding == ROUND_FLOOR and self._sign != other._sign: # If the answer is 0, the sign should be negative, in this case. negativezero = 1 if not self and not other: sign = min(self._sign, other._sign) if negativezero: sign = 1 ans = _dec_from_triple(sign, '0', exp) ans = ans._fix(context) return ans if not self: exp = max(exp, other._exp - context.prec-1) ans = other._rescale(exp, context.rounding) ans = ans._fix(context) return ans if not other: exp = max(exp, self._exp - context.prec-1) ans = self._rescale(exp, context.rounding) ans = ans._fix(context) return ans op1 = _WorkRep(self) op2 = _WorkRep(other) op1, op2 = _normalize(op1, op2, context.prec) result = _WorkRep() if op1.sign != op2.sign: # Equal and opposite if op1.int == op2.int: ans = _dec_from_triple(negativezero, '0', exp) ans = ans._fix(context) return ans if op1.int < op2.int: op1, op2 = op2, op1 # OK, now abs(op1) > abs(op2) if op1.sign == 1: result.sign = 1 op1.sign, op2.sign = op2.sign, op1.sign else: result.sign = 0 # So we know the sign, and op1 > 0. elif op1.sign == 1: result.sign = 1 op1.sign, op2.sign = (0, 0) else: result.sign = 0 # Now, op1 > abs(op2) > 0 if op2.sign == 0: result.int = op1.int + op2.int else: result.int = op1.int - op2.int result.exp = op1.exp ans = Decimal(result) ans = ans._fix(context) return ans __radd__ = __add__ def __sub__(self, other, context=None): """Return self - other""" other = _convert_other(other) if other is NotImplemented: return other if self._is_special or other._is_special: ans = self._check_nans(other, context=context) if ans: return ans # self - other is computed as self + other.copy_negate() return self.__add__(other.copy_negate(), context=context) def __rsub__(self, other, context=None): """Return other - self""" other = _convert_other(other) if other is NotImplemented: return other return other.__sub__(self, context=context) def __mul__(self, other, context=None): """Return self * other. (+-) INF * 0 (or its reverse) raise InvalidOperation. """ other = _convert_other(other) if other is NotImplemented: return other if context is None: context = getcontext() resultsign = self._sign ^ other._sign if self._is_special or other._is_special: ans = self._check_nans(other, context) if ans: return ans if self._isinfinity(): if not other: return context._raise_error(InvalidOperation, '(+-)INF * 0') return _SignedInfinity[resultsign] if other._isinfinity(): if not self: return context._raise_error(InvalidOperation, '0 * (+-)INF') return _SignedInfinity[resultsign] resultexp = self._exp + other._exp # Special case for multiplying by zero if not self or not other: ans = _dec_from_triple(resultsign, '0', resultexp) # Fixing in case the exponent is out of bounds ans = ans._fix(context) return ans # Special case for multiplying by power of 10 if self._int == '1': ans = _dec_from_triple(resultsign, other._int, resultexp) ans = ans._fix(context) return ans if other._int == '1': ans = _dec_from_triple(resultsign, self._int, resultexp) ans = ans._fix(context) return ans op1 = _WorkRep(self) op2 = _WorkRep(other) ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp) ans = ans._fix(context) return ans __rmul__ = __mul__ def __truediv__(self, other, context=None): """Return self / other.""" other = _convert_other(other) if other is NotImplemented: return NotImplemented if context is None: context = getcontext() sign = self._sign ^ other._sign if self._is_special or other._is_special: ans = self._check_nans(other, context) if ans: return ans if self._isinfinity() and other._isinfinity(): return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF') if self._isinfinity(): return _SignedInfinity[sign] if other._isinfinity(): context._raise_error(Clamped, 'Division by infinity') return _dec_from_triple(sign, '0', context.Etiny()) # Special cases for zeroes if not other: if not self: return context._raise_error(DivisionUndefined, '0 / 0') return context._raise_error(DivisionByZero, 'x / 0', sign) if not self: exp = self._exp - other._exp coeff = 0 else: # OK, so neither = 0, INF or NaN shift = len(other._int) - len(self._int) + context.prec + 1 exp = self._exp - other._exp - shift op1 = _WorkRep(self) op2 = _WorkRep(other) if shift >= 0: coeff, remainder = divmod(op1.int * 10**shift, op2.int) else: coeff, remainder = divmod(op1.int, op2.int * 10**-shift) if remainder: # result is not exact; adjust to ensure correct rounding if coeff % 5 == 0: coeff += 1 else: # result is exact; get as close to ideal exponent as possible ideal_exp = self._exp - other._exp while exp < ideal_exp and coeff % 10 == 0: coeff //= 10 exp += 1 ans = _dec_from_triple(sign, str(coeff), exp) return ans._fix(context) def _divide(self, other, context): """Return (self // other, self % other), to context.prec precision. Assumes that neither self nor other is a NaN, that self is not infinite and that other is nonzero. """ sign = self._sign ^ other._sign if other._isinfinity(): ideal_exp = self._exp else: ideal_exp = min(self._exp, other._exp) expdiff = self.adjusted() - other.adjusted() if not self or other._isinfinity() or expdiff <= -2: return (_dec_from_triple(sign, '0', 0), self._rescale(ideal_exp, context.rounding)) if expdiff <= context.prec: op1 = _WorkRep(self) op2 = _WorkRep(other) if op1.exp >= op2.exp: op1.int *= 10**(op1.exp - op2.exp) else: op2.int *= 10**(op2.exp - op1.exp) q, r = divmod(op1.int, op2.int) if q < 10**context.prec: return (_dec_from_triple(sign, str(q), 0), _dec_from_triple(self._sign, str(r), ideal_exp)) # Here the quotient is too large to be representable ans = context._raise_error(DivisionImpossible, 'quotient too large in //, % or divmod') return ans, ans def __rtruediv__(self, other, context=None): """Swaps self/other and returns __truediv__.""" other = _convert_other(other) if other is NotImplemented: return other return other.__truediv__(self, context=context) def __divmod__(self, other, context=None): """ Return (self // other, self % other) """ other = _convert_other(other) if other is NotImplemented: return other if context is None: context = getcontext() ans = self._check_nans(other, context) if ans: return (ans, ans) sign = self._sign ^ other._sign if self._isinfinity(): if other._isinfinity(): ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)') return ans, ans else: return (_SignedInfinity[sign], context._raise_error(InvalidOperation, 'INF % x')) if not other: if not self: ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)') return ans, ans else: return (context._raise_error(DivisionByZero, 'x // 0', sign), context._raise_error(InvalidOperation, 'x % 0')) quotient, remainder = self._divide(other, context) remainder = remainder._fix(context) return quotient, remainder def __rdivmod__(self, other, context=None): """Swaps self/other and returns __divmod__.""" other = _convert_other(other) if other is NotImplemented: return other return other.__divmod__(self, context=context) def __mod__(self, other, context=None): """ self % other """ other = _convert_other(other) if other is NotImplemented: return other if context is None: context = getcontext() ans = self._check_nans(other, context) if ans: return ans if self._isinfinity(): return context._raise_error(InvalidOperation, 'INF % x') elif not other: if self: return context._raise_error(InvalidOperation, 'x % 0') else: return context._raise_error(DivisionUndefined, '0 % 0') remainder = self._divide(other, context)[1] remainder = remainder._fix(context) return remainder def __rmod__(self, other, context=None): """Swaps self/other and returns __mod__.""" other = _convert_other(other) if other is NotImplemented: return other return other.__mod__(self, context=context) def remainder_near(self, other, context=None): """ Remainder nearest to 0- abs(remainder-near) <= other/2 """ if context is None: context = getcontext() other = _convert_other(other, raiseit=True) ans = self._check_nans(other, context) if ans: return ans # self == +/-infinity -> InvalidOperation if self._isinfinity(): return context._raise_error(InvalidOperation, 'remainder_near(infinity, x)') # other == 0 -> either InvalidOperation or DivisionUndefined if not other: if self: return context._raise_error(InvalidOperation, 'remainder_near(x, 0)') else: return context._raise_error(DivisionUndefined, 'remainder_near(0, 0)') # other = +/-infinity -> remainder = self if other._isinfinity(): ans = Decimal(self) return ans._fix(context) # self = 0 -> remainder = self, with ideal exponent ideal_exponent = min(self._exp, other._exp) if not self: ans = _dec_from_triple(self._sign, '0', ideal_exponent) return ans._fix(context) # catch most cases of large or small quotient expdiff = self.adjusted() - other.adjusted() if expdiff >= context.prec + 1: # expdiff >= prec+1 => abs(self/other) > 10**prec return context._raise_error(DivisionImpossible) if expdiff <= -2: # expdiff <= -2 => abs(self/other) < 0.1 ans = self._rescale(ideal_exponent, context.rounding) return ans._fix(context) # adjust both arguments to have the same exponent, then divide op1 = _WorkRep(self) op2 = _WorkRep(other) if op1.exp >= op2.exp: op1.int *= 10**(op1.exp - op2.exp) else: op2.int *= 10**(op2.exp - op1.exp) q, r = divmod(op1.int, op2.int) # remainder is r*10**ideal_exponent; other is +/-op2.int * # 10**ideal_exponent. Apply correction to ensure that # abs(remainder) <= abs(other)/2 if 2*r + (q&1) > op2.int: r -= op2.int q += 1 if q >= 10**context.prec: return context._raise_error(DivisionImpossible) # result has same sign as self unless r is negative sign = self._sign if r < 0: sign = 1-sign r = -r ans = _dec_from_triple(sign, str(r), ideal_exponent) return ans._fix(context) def __floordiv__(self, other, context=None): """self // other""" other = _convert_other(other) if other is NotImplemented: return other if context is None: context = getcontext() ans = self._check_nans(other, context) if ans: return ans if self._isinfinity(): if other._isinfinity(): return context._raise_error(InvalidOperation, 'INF // INF') else: return _SignedInfinity[self._sign ^ other._sign] if not other: if self: return context._raise_error(DivisionByZero, 'x // 0', self._sign ^ other._sign) else: return context._raise_error(DivisionUndefined, '0 // 0') return self._divide(other, context)[0] def __rfloordiv__(self, other, context=None): """Swaps self/other and returns __floordiv__.""" other = _convert_other(other) if other is NotImplemented: return other return other.__floordiv__(self, context=context) def __float__(self): """Float representation.""" if self._isnan(): if self.is_snan(): raise ValueError("Cannot convert signaling NaN to float") s = "-nan" if self._sign else "nan" else: s = str(self) return float(s) def __int__(self): """Converts self to an int, truncating if necessary.""" if self._is_special: if self._isnan(): raise ValueError("Cannot convert NaN to integer") elif self._isinfinity(): raise OverflowError("Cannot convert infinity to integer") s = (-1)**self._sign if self._exp >= 0: return s*int(self._int)*10**self._exp else: return s*int(self._int[:self._exp] or '0') __trunc__ = __int__ def real(self): return self real = property(real) def imag(self): return Decimal(0) imag = property(imag) def conjugate(self): return self def __complex__(self): return complex(float(self)) def _fix_nan(self, context): """Decapitate the payload of a NaN to fit the context""" payload = self._int # maximum length of payload is precision if clamp=0, # precision-1 if clamp=1. max_payload_len = context.prec - context.clamp if len(payload) > max_payload_len: payload = payload[len(payload)-max_payload_len:].lstrip('0') return _dec_from_triple(self._sign, payload, self._exp, True) return Decimal(self) def _fix(self, context): """Round if it is necessary to keep self within prec precision. Rounds and fixes the exponent. Does not raise on a sNaN. Arguments: self - Decimal instance context - context used. """ if self._is_special: if self._isnan(): # decapitate payload if necessary return self._fix_nan(context) else: # self is +/-Infinity; return unaltered return Decimal(self) # if self is zero then exponent should be between Etiny and # Emax if clamp==0, and between Etiny and Etop if clamp==1. Etiny = context.Etiny() Etop = context.Etop() if not self: exp_max = [context.Emax, Etop][context.clamp] new_exp = min(max(self._exp, Etiny), exp_max) if new_exp != self._exp: context._raise_error(Clamped) return _dec_from_triple(self._sign, '0', new_exp) else: return Decimal(self) # exp_min is the smallest allowable exponent of the result, # equal to max(self.adjusted()-context.prec+1, Etiny) exp_min = len(self._int) + self._exp - context.prec if exp_min > Etop: # overflow: exp_min > Etop iff self.adjusted() > Emax ans = context._raise_error(Overflow, 'above Emax', self._sign) context._raise_error(Inexact) context._raise_error(Rounded) return ans self_is_subnormal = exp_min < Etiny if self_is_subnormal: exp_min = Etiny # round if self has too many digits if self._exp < exp_min: digits = len(self._int) + self._exp - exp_min if digits < 0: self = _dec_from_triple(self._sign, '1', exp_min-1) digits = 0 rounding_method = self._pick_rounding_function[context.rounding] changed = rounding_method(self, digits) coeff = self._int[:digits] or '0' if changed > 0: coeff = str(int(coeff)+1) if len(coeff) > context.prec: coeff = coeff[:-1] exp_min += 1 # check whether the rounding pushed the exponent out of range if exp_min > Etop: ans = context._raise_error(Overflow, 'above Emax', self._sign) else: ans = _dec_from_triple(self._sign, coeff, exp_min) # raise the appropriate signals, taking care to respect # the precedence described in the specification if changed and self_is_subnormal: context._raise_error(Underflow) if self_is_subnormal: context._raise_error(Subnormal) if changed: context._raise_error(Inexact) context._raise_error(Rounded) if not ans: # raise Clamped on underflow to 0 context._raise_error(Clamped) return ans if self_is_subnormal: context._raise_error(Subnormal) # fold down if clamp == 1 and self has too few digits if context.clamp == 1 and self._exp > Etop: context._raise_error(Clamped) self_padded = self._int + '0'*(self._exp - Etop) return _dec_from_triple(self._sign, self_padded, Etop) # here self was representable to begin with; return unchanged return Decimal(self) # for each of the rounding functions below: # self is a finite, nonzero Decimal # prec is an integer satisfying 0 <= prec < len(self._int) # # each function returns either -1, 0, or 1, as follows: # 1 indicates that self should be rounded up (away from zero) # 0 indicates that self should be truncated, and that all the # digits to be truncated are zeros (so the value is unchanged) # -1 indicates that there are nonzero digits to be truncated def _round_down(self, prec): """Also known as round-towards-0, truncate.""" if _all_zeros(self._int, prec): return 0 else: return -1 def _round_up(self, prec): """Rounds away from 0.""" return -self._round_down(prec) def _round_half_up(self, prec): """Rounds 5 up (away from 0)""" if self._int[prec] in '56789': return 1 elif _all_zeros(self._int, prec): return 0 else: return -1 def _round_half_down(self, prec): """Round 5 down""" if _exact_half(self._int, prec): return -1 else: return self._round_half_up(prec) def _round_half_even(self, prec): """Round 5 to even, rest to nearest.""" if _exact_half(self._int, prec) and \ (prec == 0 or self._int[prec-1] in '02468'): return -1 else: return self._round_half_up(prec) def _round_ceiling(self, prec): """Rounds up (not away from 0 if negative.)""" if self._sign: return self._round_down(prec) else: return -self._round_down(prec) def _round_floor(self, prec): """Rounds down (not towards 0 if negative)""" if not self._sign: return self._round_down(prec) else: return -self._round_down(prec) def _round_05up(self, prec): """Round down unless digit prec-1 is 0 or 5.""" if prec and self._int[prec-1] not in '05': return self._round_down(prec) else: return -self._round_down(prec) _pick_rounding_function = dict( ROUND_DOWN = _round_down, ROUND_UP = _round_up, ROUND_HALF_UP = _round_half_up, ROUND_HALF_DOWN = _round_half_down, ROUND_HALF_EVEN = _round_half_even, ROUND_CEILING = _round_ceiling, ROUND_FLOOR = _round_floor, ROUND_05UP = _round_05up, ) def __round__(self, n=None): """Round self to the nearest integer, or to a given precision. If only one argument is supplied, round a finite Decimal instance self to the nearest integer. If self is infinite or a NaN then a Python exception is raised. If self is finite and lies exactly halfway between two integers then it is rounded to the integer with even last digit. >>> round(Decimal('123.456')) 123 >>> round(Decimal('-456.789')) -457 >>> round(Decimal('-3.0')) -3 >>> round(Decimal('2.5')) 2 >>> round(Decimal('3.5')) 4 >>> round(Decimal('Inf')) Traceback (most recent call last): ... OverflowError: cannot round an infinity >>> round(Decimal('NaN')) Traceback (most recent call last): ... ValueError: cannot round a NaN If a second argument n is supplied, self is rounded to n decimal places using the rounding mode for the current context. For an integer n, round(self, -n) is exactly equivalent to self.quantize(Decimal('1En')). >>> round(Decimal('123.456'), 0) Decimal('123') >>> round(Decimal('123.456'), 2) Decimal('123.46') >>> round(Decimal('123.456'), -2) Decimal('1E+2') >>> round(Decimal('-Infinity'), 37) Decimal('NaN') >>> round(Decimal('sNaN123'), 0) Decimal('NaN123') """ if n is not None: # two-argument form: use the equivalent quantize call if not isinstance(n, int): raise TypeError('Second argument to round should be integral') exp = _dec_from_triple(0, '1', -n) return self.quantize(exp) # one-argument form if self._is_special: if self.is_nan(): raise ValueError("cannot round a NaN") else: raise OverflowError("cannot round an infinity") return int(self._rescale(0, ROUND_HALF_EVEN)) def __floor__(self): """Return the floor of self, as an integer. For a finite Decimal instance self, return the greatest integer n such that n <= self. If self is infinite or a NaN then a Python exception is raised. """ if self._is_special: if self.is_nan(): raise ValueError("cannot round a NaN") else: raise OverflowError("cannot round an infinity") return int(self._rescale(0, ROUND_FLOOR)) def __ceil__(self): """Return the ceiling of self, as an integer. For a finite Decimal instance self, return the least integer n such that n >= self. If self is infinite or a NaN then a Python exception is raised. """ if self._is_special: if self.is_nan(): raise ValueError("cannot round a NaN") else: raise OverflowError("cannot round an infinity") return int(self._rescale(0, ROUND_CEILING)) def fma(self, other, third, context=None): """Fused multiply-add. Returns self*other+third with no rounding of the intermediate product self*other. self and other are multiplied together, with no rounding of the result. The third operand is then added to the result, and a single final rounding is performed. """ other = _convert_other(other, raiseit=True) third = _convert_other(third, raiseit=True) # compute product; raise InvalidOperation if either operand is # a signaling NaN or if the product is zero times infinity. if self._is_special or other._is_special: if context is None: context = getcontext() if self._exp == 'N': return context._raise_error(InvalidOperation, 'sNaN', self) if other._exp == 'N': return context._raise_error(InvalidOperation, 'sNaN', other) if self._exp == 'n': product = self elif other._exp == 'n': product = other elif self._exp == 'F': if not other: return context._raise_error(InvalidOperation, 'INF * 0 in fma') product = _SignedInfinity[self._sign ^ other._sign] elif other._exp == 'F': if not self: return context._raise_error(InvalidOperation, '0 * INF in fma') product = _SignedInfinity[self._sign ^ other._sign] else: product = _dec_from_triple(self._sign ^ other._sign, str(int(self._int) * int(other._int)), self._exp + other._exp) return product.__add__(third, context) def _power_modulo(self, other, modulo, context=None): """Three argument version of __pow__""" other = _convert_other(other) if other is NotImplemented: return other modulo = _convert_other(modulo) if modulo is NotImplemented: return modulo if context is None: context = getcontext() # deal with NaNs: if there are any sNaNs then first one wins, # (i.e. behaviour for NaNs is identical to that of fma) self_is_nan = self._isnan() other_is_nan = other._isnan() modulo_is_nan = modulo._isnan() if self_is_nan or other_is_nan or modulo_is_nan: if self_is_nan == 2: return context._raise_error(InvalidOperation, 'sNaN', self) if other_is_nan == 2: return context._raise_error(InvalidOperation, 'sNaN', other) if modulo_is_nan == 2: return context._raise_error(InvalidOperation, 'sNaN', modulo) if self_is_nan: return self._fix_nan(context) if other_is_nan: return other._fix_nan(context) return modulo._fix_nan(context) # check inputs: we apply same restrictions as Python's pow() if not (self._isinteger() and other._isinteger() and modulo._isinteger()): return context._raise_error(InvalidOperation, 'pow() 3rd argument not allowed ' 'unless all arguments are integers') if other < 0: return context._raise_error(InvalidOperation, 'pow() 2nd argument cannot be ' 'negative when 3rd argument specified') if not modulo: return context._raise_error(InvalidOperation, 'pow() 3rd argument cannot be 0') # additional restriction for decimal: the modulus must be less # than 10**prec in absolute value if modulo.adjusted() >= context.prec: return context._raise_error(InvalidOperation, 'insufficient precision: pow() 3rd ' 'argument must not have more than ' 'precision digits') # define 0**0 == NaN, for consistency with two-argument pow # (even though it hurts!) if not other and not self: return context._raise_error(InvalidOperation, 'at least one of pow() 1st argument ' 'and 2nd argument must be nonzero ;' '0**0 is not defined') # compute sign of result if other._iseven(): sign = 0 else: sign = self._sign # convert modulo to a Python integer, and self and other to # Decimal integers (i.e. force their exponents to be >= 0) modulo = abs(int(modulo)) base = _WorkRep(self.to_integral_value()) exponent = _WorkRep(other.to_integral_value()) # compute result using integer pow() base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo for i in range(exponent.exp): base = pow(base, 10, modulo) base = pow(base, exponent.int, modulo) return _dec_from_triple(sign, str(base), 0) def _power_exact(self, other, p): """Attempt to compute self**other exactly. Given Decimals self and other and an integer p, attempt to compute an exact result for the power self**other, with p digits of precision. Return None if self**other is not exactly representable in p digits. Assumes that elimination of special cases has already been performed: self and other must both be nonspecial; self must be positive and not numerically equal to 1; other must be nonzero. For efficiency, other._exp should not be too large, so that 10**abs(other._exp) is a feasible calculation.""" # In the comments below, we write x for the value of self and y for the # value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc # and yc positive integers not divisible by 10. # The main purpose of this method is to identify the *failure* # of x**y to be exactly representable with as little effort as # possible. So we look for cheap and easy tests that # eliminate the possibility of x**y being exact. Only if all # these tests are passed do we go on to actually compute x**y. # Here's the main idea. Express y as a rational number m/n, with m and # n relatively prime and n>0. Then for x**y to be exactly # representable (at *any* precision), xc must be the nth power of a # positive integer and xe must be divisible by n. If y is negative # then additionally xc must be a power of either 2 or 5, hence a power # of 2**n or 5**n. # # There's a limit to how small |y| can be: if y=m/n as above # then: # # (1) if xc != 1 then for the result to be representable we # need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So # if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <= # 2**(1/|y|), hence xc**|y| < 2 and the result is not # representable. # # (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if # |y| < 1/|xe| then the result is not representable. # # Note that since x is not equal to 1, at least one of (1) and # (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) < # 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye. # # There's also a limit to how large y can be, at least if it's # positive: the normalized result will have coefficient xc**y, # so if it's representable then xc**y < 10**p, and y < # p/log10(xc). Hence if y*log10(xc) >= p then the result is # not exactly representable. # if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye, # so |y| < 1/xe and the result is not representable. # Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y| # < 1/nbits(xc). x = _WorkRep(self) xc, xe = x.int, x.exp while xc % 10 == 0: xc //= 10 xe += 1 y = _WorkRep(other) yc, ye = y.int, y.exp while yc % 10 == 0: yc //= 10 ye += 1 # case where xc == 1: result is 10**(xe*y), with xe*y # required to be an integer if xc == 1: xe *= yc # result is now 10**(xe * 10**ye); xe * 10**ye must be integral while xe % 10 == 0: xe //= 10 ye += 1 if ye < 0: return None exponent = xe * 10**ye if y.sign == 1: exponent = -exponent # if other is a nonnegative integer, use ideal exponent if other._isinteger() and other._sign == 0: ideal_exponent = self._exp*int(other) zeros = min(exponent-ideal_exponent, p-1) else: zeros = 0 return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros) # case where y is negative: xc must be either a power # of 2 or a power of 5. if y.sign == 1: last_digit = xc % 10 if last_digit in (2,4,6,8): # quick test for power of 2 if xc & -xc != xc: return None # now xc is a power of 2; e is its exponent e = _nbits(xc)-1 # We now have: # # x = 2**e * 10**xe, e > 0, and y < 0. # # The exact result is: # # x**y = 5**(-e*y) * 10**(e*y + xe*y) # # provided that both e*y and xe*y are integers. Note that if # 5**(-e*y) >= 10**p, then the result can't be expressed # exactly with p digits of precision. # # Using the above, we can guard against large values of ye. # 93/65 is an upper bound for log(10)/log(5), so if # # ye >= len(str(93*p//65)) # # then # # -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5), # # so 5**(-e*y) >= 10**p, and the coefficient of the result # can't be expressed in p digits. # emax >= largest e such that 5**e < 10**p. emax = p*93//65 if ye >= len(str(emax)): return None # Find -e*y and -xe*y; both must be integers e = _decimal_lshift_exact(e * yc, ye) xe = _decimal_lshift_exact(xe * yc, ye) if e is None or xe is None: return None if e > emax: return None xc = 5**e elif last_digit == 5: # e >= log_5(xc) if xc is a power of 5; we have # equality all the way up to xc=5**2658 e = _nbits(xc)*28//65 xc, remainder = divmod(5**e, xc) if remainder: return None while xc % 5 == 0: xc //= 5 e -= 1 # Guard against large values of ye, using the same logic as in # the 'xc is a power of 2' branch. 10/3 is an upper bound for # log(10)/log(2). emax = p*10//3 if ye >= len(str(emax)): return None e = _decimal_lshift_exact(e * yc, ye) xe = _decimal_lshift_exact(xe * yc, ye) if e is None or xe is None: return None if e > emax: return None xc = 2**e else: return None if xc >= 10**p: return None xe = -e-xe return _dec_from_triple(0, str(xc), xe) # now y is positive; find m and n such that y = m/n if ye >= 0: m, n = yc*10**ye, 1 else: if xe != 0 and len(str(abs(yc*xe))) <= -ye: return None xc_bits = _nbits(xc) if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye: return None m, n = yc, 10**(-ye) while m % 2 == n % 2 == 0: m //= 2 n //= 2 while m % 5 == n % 5 == 0: m //= 5 n //= 5 # compute nth root of xc*10**xe if n > 1: # if 1 < xc < 2**n then xc isn't an nth power if xc != 1 and xc_bits <= n: return None xe, rem = divmod(xe, n) if rem != 0: return None # compute nth root of xc using Newton's method a = 1 << -(-_nbits(xc)//n) # initial estimate while True: q, r = divmod(xc, a**(n-1)) if a <= q: break else: a = (a*(n-1) + q)//n if not (a == q and r == 0): return None xc = a # now xc*10**xe is the nth root of the original xc*10**xe # compute mth power of xc*10**xe # if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m > # 10**p and the result is not representable. if xc > 1 and m > p*100//_log10_lb(xc): return None xc = xc**m xe *= m if xc > 10**p: return None # by this point the result *is* exactly representable # adjust the exponent to get as close as possible to the ideal # exponent, if necessary str_xc = str(xc) if other._isinteger() and other._sign == 0: ideal_exponent = self._exp*int(other) zeros = min(xe-ideal_exponent, p-len(str_xc)) else: zeros = 0 return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros) def __pow__(self, other, modulo=None, context=None): """Return self ** other [ % modulo]. With two arguments, compute self**other. With three arguments, compute (self**other) % modulo. For the three argument form, the following restrictions on the arguments hold: - all three arguments must be integral - other must be nonnegative - either self or other (or both) must be nonzero - modulo must be nonzero and must have at most p digits, where p is the context precision. If any of these restrictions is violated the InvalidOperation flag is raised. The result of pow(self, other, modulo) is identical to the result that would be obtained by computing (self**other) % modulo with unbounded precision, but is computed more efficiently. It is always exact. """ if modulo is not None: return self._power_modulo(other, modulo, context) other = _convert_other(other) if other is NotImplemented: return other if context is None: context = getcontext() # either argument is a NaN => result is NaN ans = self._check_nans(other, context) if ans: return ans # 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity) if not other: if not self: return context._raise_error(InvalidOperation, '0 ** 0') else: return _One # result has sign 1 iff self._sign is 1 and other is an odd integer result_sign = 0 if self._sign == 1: if other._isinteger(): if not other._iseven(): result_sign = 1 else: # -ve**noninteger = NaN # (-0)**noninteger = 0**noninteger if self: return context._raise_error(InvalidOperation, 'x ** y with x negative and y not an integer') # negate self, without doing any unwanted rounding self = self.copy_negate() # 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity if not self: if other._sign == 0: return _dec_from_triple(result_sign, '0', 0) else: return _SignedInfinity[result_sign] # Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0 if self._isinfinity(): if other._sign == 0: return _SignedInfinity[result_sign] else: return _dec_from_triple(result_sign, '0', 0) # 1**other = 1, but the choice of exponent and the flags # depend on the exponent of self, and on whether other is a # positive integer, a negative integer, or neither if self == _One: if other._isinteger(): # exp = max(self._exp*max(int(other), 0), # 1-context.prec) but evaluating int(other) directly # is dangerous until we know other is small (other # could be 1e999999999) if other._sign == 1: multiplier = 0 elif other > context.prec: multiplier = context.prec else: multiplier = int(other) exp = self._exp * multiplier if exp < 1-context.prec: exp = 1-context.prec context._raise_error(Rounded) else: context._raise_error(Inexact) context._raise_error(Rounded) exp = 1-context.prec return _dec_from_triple(result_sign, '1'+'0'*-exp, exp) # compute adjusted exponent of self self_adj = self.adjusted() # self ** infinity is infinity if self > 1, 0 if self < 1 # self ** -infinity is infinity if self < 1, 0 if self > 1 if other._isinfinity(): if (other._sign == 0) == (self_adj < 0): return _dec_from_triple(result_sign, '0', 0) else: return _SignedInfinity[result_sign] # from here on, the result always goes through the call # to _fix at the end of this function. ans = None exact = False # crude test to catch cases of extreme overflow/underflow. If # log10(self)*other >= 10**bound and bound >= len(str(Emax)) # then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence # self**other >= 10**(Emax+1), so overflow occurs. The test # for underflow is similar. bound = self._log10_exp_bound() + other.adjusted() if (self_adj >= 0) == (other._sign == 0): # self > 1 and other +ve, or self < 1 and other -ve # possibility of overflow if bound >= len(str(context.Emax)): ans = _dec_from_triple(result_sign, '1', context.Emax+1) else: # self > 1 and other -ve, or self < 1 and other +ve # possibility of underflow to 0 Etiny = context.Etiny() if bound >= len(str(-Etiny)): ans = _dec_from_triple(result_sign, '1', Etiny-1) # try for an exact result with precision +1 if ans is None: ans = self._power_exact(other, context.prec + 1) if ans is not None: if result_sign == 1: ans = _dec_from_triple(1, ans._int, ans._exp) exact = True # usual case: inexact result, x**y computed directly as exp(y*log(x)) if ans is None: p = context.prec x = _WorkRep(self) xc, xe = x.int, x.exp y = _WorkRep(other) yc, ye = y.int, y.exp if y.sign == 1: yc = -yc # compute correctly rounded result: start with precision +3, # then increase precision until result is unambiguously roundable extra = 3 while True: coeff, exp = _dpower(xc, xe, yc, ye, p+extra) if coeff % (5*10**(len(str(coeff))-p-1)): break extra += 3 ans = _dec_from_triple(result_sign, str(coeff), exp) # unlike exp, ln and log10, the power function respects the # rounding mode; no need to switch to ROUND_HALF_EVEN here # There's a difficulty here when 'other' is not an integer and # the result is exact. In this case, the specification # requires that the Inexact flag be raised (in spite of # exactness), but since the result is exact _fix won't do this # for us. (Correspondingly, the Underflow signal should also # be raised for subnormal results.) We can't directly raise # these signals either before or after calling _fix, since # that would violate the precedence for signals. So we wrap # the ._fix call in a temporary context, and reraise # afterwards. if exact and not other._isinteger(): # pad with zeros up to length context.prec+1 if necessary; this # ensures that the Rounded signal will be raised. if len(ans._int) <= context.prec: expdiff = context.prec + 1 - len(ans._int) ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff, ans._exp-expdiff) # create a copy of the current context, with cleared flags/traps newcontext = context.copy() newcontext.clear_flags() for exception in _signals: newcontext.traps[exception] = 0 # round in the new context ans = ans._fix(newcontext) # raise Inexact, and if necessary, Underflow newcontext._raise_error(Inexact) if newcontext.flags[Subnormal]: newcontext._raise_error(Underflow) # propagate signals to the original context; _fix could # have raised any of Overflow, Underflow, Subnormal, # Inexact, Rounded, Clamped. Overflow needs the correct # arguments. Note that the order of the exceptions is # important here. if newcontext.flags[Overflow]: context._raise_error(Overflow, 'above Emax', ans._sign) for exception in Underflow, Subnormal, Inexact, Rounded, Clamped: if newcontext.flags[exception]: context._raise_error(exception) else: ans = ans._fix(context) return ans def __rpow__(self, other, context=None): """Swaps self/other and returns __pow__.""" other = _convert_other(other) if other is NotImplemented: return other return other.__pow__(self, context=context) def normalize(self, context=None): """Normalize- strip trailing 0s, change anything equal to 0 to 0e0""" if context is None: context = getcontext() if self._is_special: ans = self._check_nans(context=context) if ans: return ans dup = self._fix(context) if dup._isinfinity(): return dup if not dup: return _dec_from_triple(dup._sign, '0', 0) exp_max = [context.Emax, context.Etop()][context.clamp] end = len(dup._int) exp = dup._exp while dup._int[end-1] == '0' and exp < exp_max: exp += 1 end -= 1 return _dec_from_triple(dup._sign, dup._int[:end], exp) def quantize(self, exp, rounding=None, context=None, watchexp=True): """Quantize self so its exponent is the same as that of exp. Similar to self._rescale(exp._exp) but with error checking. """ exp = _convert_other(exp, raiseit=True) if context is None: context = getcontext() if rounding is None: rounding = context.rounding if self._is_special or exp._is_special: ans = self._check_nans(exp, context) if ans: return ans if exp._isinfinity() or self._isinfinity(): if exp._isinfinity() and self._isinfinity(): return Decimal(self) # if both are inf, it is OK return context._raise_error(InvalidOperation, 'quantize with one INF') # if we're not watching exponents, do a simple rescale if not watchexp: ans = self._rescale(exp._exp, rounding) # raise Inexact and Rounded where appropriate if ans._exp > self._exp: context._raise_error(Rounded) if ans != self: context._raise_error(Inexact) return ans # exp._exp should be between Etiny and Emax if not (context.Etiny() <= exp._exp <= context.Emax): return context._raise_error(InvalidOperation, 'target exponent out of bounds in quantize') if not self: ans = _dec_from_triple(self._sign, '0', exp._exp) return ans._fix(context) self_adjusted = self.adjusted() if self_adjusted > context.Emax: return context._raise_error(InvalidOperation, 'exponent of quantize result too large for current context') if self_adjusted - exp._exp + 1 > context.prec: return context._raise_error(InvalidOperation, 'quantize result has too many digits for current context') ans = self._rescale(exp._exp, rounding) if ans.adjusted() > context.Emax: return context._raise_error(InvalidOperation, 'exponent of quantize result too large for current context') if len(ans._int) > context.prec: return context._raise_error(InvalidOperation, 'quantize result has too many digits for current context') # raise appropriate flags if ans and ans.adjusted() < context.Emin: context._raise_error(Subnormal) if ans._exp > self._exp: if ans != self: context._raise_error(Inexact) context._raise_error(Rounded) # call to fix takes care of any necessary folddown, and # signals Clamped if necessary ans = ans._fix(context) return ans def same_quantum(self, other, context=None): """Return True if self and other have the same exponent; otherwise return False. If either operand is a special value, the following rules are used: * return True if both operands are infinities * return True if both operands are NaNs * otherwise, return False. """ other = _convert_other(other, raiseit=True) if self._is_special or other._is_special: return (self.is_nan() and other.is_nan() or self.is_infinite() and other.is_infinite()) return self._exp == other._exp def _rescale(self, exp, rounding): """Rescale self so that the exponent is exp, either by padding with zeros or by truncating digits, using the given rounding mode. Specials are returned without change. This operation is quiet: it raises no flags, and uses no information from the context. exp = exp to scale to (an integer) rounding = rounding mode """ if self._is_special: return Decimal(self) if not self: return _dec_from_triple(self._sign, '0', exp) if self._exp >= exp: # pad answer with zeros if necessary return _dec_from_triple(self._sign, self._int + '0'*(self._exp - exp), exp) # too many digits; round and lose data. If self.adjusted() < # exp-1, replace self by 10**(exp-1) before rounding digits = len(self._int) + self._exp - exp if digits < 0: self = _dec_from_triple(self._sign, '1', exp-1) digits = 0 this_function = self._pick_rounding_function[rounding] changed = this_function(self, digits) coeff = self._int[:digits] or '0' if changed == 1: coeff = str(int(coeff)+1) return _dec_from_triple(self._sign, coeff, exp) def _round(self, places, rounding): """Round a nonzero, nonspecial Decimal to a fixed number of significant figures, using the given rounding mode. Infinities, NaNs and zeros are returned unaltered. This operation is quiet: it raises no flags, and uses no information from the context. """ if places <= 0: raise ValueError("argument should be at least 1 in _round") if self._is_special or not self: return Decimal(self) ans = self._rescale(self.adjusted()+1-places, rounding) # it can happen that the rescale alters the adjusted exponent; # for example when rounding 99.97 to 3 significant figures. # When this happens we end up with an extra 0 at the end of # the number; a second rescale fixes this. if ans.adjusted() != self.adjusted(): ans = ans._rescale(ans.adjusted()+1-places, rounding) return ans def to_integral_exact(self, rounding=None, context=None): """Rounds to a nearby integer. If no rounding mode is specified, take the rounding mode from the context. This method raises the Rounded and Inexact flags when appropriate. See also: to_integral_value, which does exactly the same as this method except that it doesn't raise Inexact or Rounded. """ if self._is_special: ans = self._check_nans(context=context) if ans: return ans return Decimal(self) if self._exp >= 0: return Decimal(self) if not self: return _dec_from_triple(self._sign, '0', 0) if context is None: context = getcontext() if rounding is None: rounding = context.rounding ans = self._rescale(0, rounding) if ans != self: context._raise_error(Inexact) context._raise_error(Rounded) return ans def to_integral_value(self, rounding=None, context=None): """Rounds to the nearest integer, without raising inexact, rounded.""" if context is None: context = getcontext() if rounding is None: rounding = context.rounding if self._is_special: ans = self._check_nans(context=context) if ans: return ans return Decimal(self) if self._exp >= 0: return Decimal(self) else: return self._rescale(0, rounding) # the method name changed, but we provide also the old one, for compatibility to_integral = to_integral_value def sqrt(self, context=None): """Return the square root of self.""" if context is None: context = getcontext() if self._is_special: ans = self._check_nans(context=context) if ans: return ans if self._isinfinity() and self._sign == 0: return Decimal(self) if not self: # exponent = self._exp // 2. sqrt(-0) = -0 ans = _dec_from_triple(self._sign, '0', self._exp // 2) return ans._fix(context) if self._sign == 1: return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0') # At this point self represents a positive number. Let p be # the desired precision and express self in the form c*100**e # with c a positive real number and e an integer, c and e # being chosen so that 100**(p-1) <= c < 100**p. Then the # (exact) square root of self is sqrt(c)*10**e, and 10**(p-1) # <= sqrt(c) < 10**p, so the closest representable Decimal at # precision p is n*10**e where n = round_half_even(sqrt(c)), # the closest integer to sqrt(c) with the even integer chosen # in the case of a tie. # # To ensure correct rounding in all cases, we use the # following trick: we compute the square root to an extra # place (precision p+1 instead of precision p), rounding down. # Then, if the result is inexact and its last digit is 0 or 5, # we increase the last digit to 1 or 6 respectively; if it's # exact we leave the last digit alone. Now the final round to # p places (or fewer in the case of underflow) will round # correctly and raise the appropriate flags. # use an extra digit of precision prec = context.prec+1 # write argument in the form c*100**e where e = self._exp//2 # is the 'ideal' exponent, to be used if the square root is # exactly representable. l is the number of 'digits' of c in # base 100, so that 100**(l-1) <= c < 100**l. op = _WorkRep(self) e = op.exp >> 1 if op.exp & 1: c = op.int * 10 l = (len(self._int) >> 1) + 1 else: c = op.int l = len(self._int)+1 >> 1 # rescale so that c has exactly prec base 100 'digits' shift = prec-l if shift >= 0: c *= 100**shift exact = True else: c, remainder = divmod(c, 100**-shift) exact = not remainder e -= shift # find n = floor(sqrt(c)) using Newton's method n = 10**prec while True: q = c//n if n <= q: break else: n = n + q >> 1 exact = exact and n*n == c if exact: # result is exact; rescale to use ideal exponent e if shift >= 0: # assert n % 10**shift == 0 n //= 10**shift else: n *= 10**-shift e += shift else: # result is not exact; fix last digit as described above if n % 5 == 0: n += 1 ans = _dec_from_triple(0, str(n), e) # round, and fit to current context context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) ans = ans._fix(context) context.rounding = rounding return ans def max(self, other, context=None): """Returns the larger value. Like max(self, other) except if one is not a number, returns NaN (and signals if one is sNaN). Also rounds. """ other = _convert_other(other, raiseit=True) if context is None: context = getcontext() if self._is_special or other._is_special: # If one operand is a quiet NaN and the other is number, then the # number is always returned sn = self._isnan() on = other._isnan() if sn or on: if on == 1 and sn == 0: return self._fix(context) if sn == 1 and on == 0: return other._fix(context) return self._check_nans(other, context) c = self._cmp(other) if c == 0: # If both operands are finite and equal in numerical value # then an ordering is applied: # # If the signs differ then max returns the operand with the # positive sign and min returns the operand with the negative sign # # If the signs are the same then the exponent is used to select # the result. This is exactly the ordering used in compare_total. c = self.compare_total(other) if c == -1: ans = other else: ans = self return ans._fix(context) def min(self, other, context=None): """Returns the smaller value. Like min(self, other) except if one is not a number, returns NaN (and signals if one is sNaN). Also rounds. """ other = _convert_other(other, raiseit=True) if context is None: context = getcontext() if self._is_special or other._is_special: # If one operand is a quiet NaN and the other is number, then the # number is always returned sn = self._isnan() on = other._isnan() if sn or on: if on == 1 and sn == 0: return self._fix(context) if sn == 1 and on == 0: return other._fix(context) return self._check_nans(other, context) c = self._cmp(other) if c == 0: c = self.compare_total(other) if c == -1: ans = self else: ans = other return ans._fix(context) def _isinteger(self): """Returns whether self is an integer""" if self._is_special: return False if self._exp >= 0: return True rest = self._int[self._exp:] return rest == '0'*len(rest) def _iseven(self): """Returns True if self is even. Assumes self is an integer.""" if not self or self._exp > 0: return True return self._int[-1+self._exp] in '02468' def adjusted(self): """Return the adjusted exponent of self""" try: return self._exp + len(self._int) - 1 # If NaN or Infinity, self._exp is string except TypeError: return 0 def canonical(self): """Returns the same Decimal object. As we do not have different encodings for the same number, the received object already is in its canonical form. """ return self def compare_signal(self, other, context=None): """Compares self to the other operand numerically. It's pretty much like compare(), but all NaNs signal, with signaling NaNs taking precedence over quiet NaNs. """ other = _convert_other(other, raiseit = True) ans = self._compare_check_nans(other, context) if ans: return ans return self.compare(other, context=context) def compare_total(self, other, context=None): """Compares self to other using the abstract representations. This is not like the standard compare, which use their numerical value. Note that a total ordering is defined for all possible abstract representations. """ other = _convert_other(other, raiseit=True) # if one is negative and the other is positive, it's easy if self._sign and not other._sign: return _NegativeOne if not self._sign and other._sign: return _One sign = self._sign # let's handle both NaN types self_nan = self._isnan() other_nan = other._isnan() if self_nan or other_nan: if self_nan == other_nan: # compare payloads as though they're integers self_key = len(self._int), self._int other_key = len(other._int), other._int if self_key < other_key: if sign: return _One else: return _NegativeOne if self_key > other_key: if sign: return _NegativeOne else: return _One return _Zero if sign: if self_nan == 1: return _NegativeOne if other_nan == 1: return _One if self_nan == 2: return _NegativeOne if other_nan == 2: return _One else: if self_nan == 1: return _One if other_nan == 1: return _NegativeOne if self_nan == 2: return _One if other_nan == 2: return _NegativeOne if self < other: return _NegativeOne if self > other: return _One if self._exp < other._exp: if sign: return _One else: return _NegativeOne if self._exp > other._exp: if sign: return _NegativeOne else: return _One return _Zero def compare_total_mag(self, other, context=None): """Compares self to other using abstract repr., ignoring sign. Like compare_total, but with operand's sign ignored and assumed to be 0. """ other = _convert_other(other, raiseit=True) s = self.copy_abs() o = other.copy_abs() return s.compare_total(o) def copy_abs(self): """Returns a copy with the sign set to 0. """ return _dec_from_triple(0, self._int, self._exp, self._is_special) def copy_negate(self): """Returns a copy with the sign inverted.""" if self._sign: return _dec_from_triple(0, self._int, self._exp, self._is_special) else: return _dec_from_triple(1, self._int, self._exp, self._is_special) def copy_sign(self, other, context=None): """Returns self with the sign of other.""" other = _convert_other(other, raiseit=True) return _dec_from_triple(other._sign, self._int, self._exp, self._is_special) def exp(self, context=None): """Returns e ** self.""" if context is None: context = getcontext() # exp(NaN) = NaN ans = self._check_nans(context=context) if ans: return ans # exp(-Infinity) = 0 if self._isinfinity() == -1: return _Zero # exp(0) = 1 if not self: return _One # exp(Infinity) = Infinity if self._isinfinity() == 1: return Decimal(self) # the result is now guaranteed to be inexact (the true # mathematical result is transcendental). There's no need to # raise Rounded and Inexact here---they'll always be raised as # a result of the call to _fix. p = context.prec adj = self.adjusted() # we only need to do any computation for quite a small range # of adjusted exponents---for example, -29 <= adj <= 10 for # the default context. For smaller exponent the result is # indistinguishable from 1 at the given precision, while for # larger exponent the result either overflows or underflows. if self._sign == 0 and adj > len(str((context.Emax+1)*3)): # overflow ans = _dec_from_triple(0, '1', context.Emax+1) elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)): # underflow to 0 ans = _dec_from_triple(0, '1', context.Etiny()-1) elif self._sign == 0 and adj < -p: # p+1 digits; final round will raise correct flags ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p) elif self._sign == 1 and adj < -p-1: # p+1 digits; final round will raise correct flags ans = _dec_from_triple(0, '9'*(p+1), -p-1) # general case else: op = _WorkRep(self) c, e = op.int, op.exp if op.sign == 1: c = -c # compute correctly rounded result: increase precision by # 3 digits at a time until we get an unambiguously # roundable result extra = 3 while True: coeff, exp = _dexp(c, e, p+extra) if coeff % (5*10**(len(str(coeff))-p-1)): break extra += 3 ans = _dec_from_triple(0, str(coeff), exp) # at this stage, ans should round correctly with *any* # rounding mode, not just with ROUND_HALF_EVEN context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) ans = ans._fix(context) context.rounding = rounding return ans def is_canonical(self): """Return True if self is canonical; otherwise return False. Currently, the encoding of a Decimal instance is always canonical, so this method returns True for any Decimal. """ return True def is_finite(self): """Return True if self is finite; otherwise return False. A Decimal instance is considered finite if it is neither infinite nor a NaN. """ return not self._is_special def is_infinite(self): """Return True if self is infinite; otherwise return False.""" return self._exp == 'F' def is_nan(self): """Return True if self is a qNaN or sNaN; otherwise return False.""" return self._exp in ('n', 'N') def is_normal(self, context=None): """Return True if self is a normal number; otherwise return False.""" if self._is_special or not self: return False if context is None: context = getcontext() return context.Emin <= self.adjusted() def is_qnan(self): """Return True if self is a quiet NaN; otherwise return False.""" return self._exp == 'n' def is_signed(self): """Return True if self is negative; otherwise return False.""" return self._sign == 1 def is_snan(self): """Return True if self is a signaling NaN; otherwise return False.""" return self._exp == 'N' def is_subnormal(self, context=None): """Return True if self is subnormal; otherwise return False.""" if self._is_special or not self: return False if context is None: context = getcontext() return self.adjusted() < context.Emin def is_zero(self): """Return True if self is a zero; otherwise return False.""" return not self._is_special and self._int == '0' def _ln_exp_bound(self): """Compute a lower bound for the adjusted exponent of self.ln(). In other words, compute r such that self.ln() >= 10**r. Assumes that self is finite and positive and that self != 1. """ # for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1 adj = self._exp + len(self._int) - 1 if adj >= 1: # argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10) return len(str(adj*23//10)) - 1 if adj <= -2: # argument <= 0.1 return len(str((-1-adj)*23//10)) - 1 op = _WorkRep(self) c, e = op.int, op.exp if adj == 0: # 1 < self < 10 num = str(c-10**-e) den = str(c) return len(num) - len(den) - (num < den) # adj == -1, 0.1 <= self < 1 return e + len(str(10**-e - c)) - 1 def ln(self, context=None): """Returns the natural (base e) logarithm of self.""" if context is None: context = getcontext() # ln(NaN) = NaN ans = self._check_nans(context=context) if ans: return ans # ln(0.0) == -Infinity if not self: return _NegativeInfinity # ln(Infinity) = Infinity if self._isinfinity() == 1: return _Infinity # ln(1.0) == 0.0 if self == _One: return _Zero # ln(negative) raises InvalidOperation if self._sign == 1: return context._raise_error(InvalidOperation, 'ln of a negative value') # result is irrational, so necessarily inexact op = _WorkRep(self) c, e = op.int, op.exp p = context.prec # correctly rounded result: repeatedly increase precision by 3 # until we get an unambiguously roundable result places = p - self._ln_exp_bound() + 2 # at least p+3 places while True: coeff = _dlog(c, e, places) # assert len(str(abs(coeff)))-p >= 1 if coeff % (5*10**(len(str(abs(coeff)))-p-1)): break places += 3 ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) ans = ans._fix(context) context.rounding = rounding return ans def _log10_exp_bound(self): """Compute a lower bound for the adjusted exponent of self.log10(). In other words, find r such that self.log10() >= 10**r. Assumes that self is finite and positive and that self != 1. """ # For x >= 10 or x < 0.1 we only need a bound on the integer # part of log10(self), and this comes directly from the # exponent of x. For 0.1 <= x <= 10 we use the inequalities # 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| > # (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0 adj = self._exp + len(self._int) - 1 if adj >= 1: # self >= 10 return len(str(adj))-1 if adj <= -2: # self < 0.1 return len(str(-1-adj))-1 op = _WorkRep(self) c, e = op.int, op.exp if adj == 0: # 1 < self < 10 num = str(c-10**-e) den = str(231*c) return len(num) - len(den) - (num < den) + 2 # adj == -1, 0.1 <= self < 1 num = str(10**-e-c) return len(num) + e - (num < "231") - 1 def log10(self, context=None): """Returns the base 10 logarithm of self.""" if context is None: context = getcontext() # log10(NaN) = NaN ans = self._check_nans(context=context) if ans: return ans # log10(0.0) == -Infinity if not self: return _NegativeInfinity # log10(Infinity) = Infinity if self._isinfinity() == 1: return _Infinity # log10(negative or -Infinity) raises InvalidOperation if self._sign == 1: return context._raise_error(InvalidOperation, 'log10 of a negative value') # log10(10**n) = n if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1): # answer may need rounding ans = Decimal(self._exp + len(self._int) - 1) else: # result is irrational, so necessarily inexact op = _WorkRep(self) c, e = op.int, op.exp p = context.prec # correctly rounded result: repeatedly increase precision # until result is unambiguously roundable places = p-self._log10_exp_bound()+2 while True: coeff = _dlog10(c, e, places) # assert len(str(abs(coeff)))-p >= 1 if coeff % (5*10**(len(str(abs(coeff)))-p-1)): break places += 3 ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) ans = ans._fix(context) context.rounding = rounding return ans def logb(self, context=None): """ Returns the exponent of the magnitude of self's MSD. The result is the integer which is the exponent of the magnitude of the most significant digit of self (as though it were truncated to a single digit while maintaining the value of that digit and without limiting the resulting exponent). """ # logb(NaN) = NaN ans = self._check_nans(context=context) if ans: return ans if context is None: context = getcontext() # logb(+/-Inf) = +Inf if self._isinfinity(): return _Infinity # logb(0) = -Inf, DivisionByZero if not self: return context._raise_error(DivisionByZero, 'logb(0)', 1) # otherwise, simply return the adjusted exponent of self, as a # Decimal. Note that no attempt is made to fit the result # into the current context. ans = Decimal(self.adjusted()) return ans._fix(context) def _islogical(self): """Return True if self is a logical operand. For being logical, it must be a finite number with a sign of 0, an exponent of 0, and a coefficient whose digits must all be either 0 or 1. """ if self._sign != 0 or self._exp != 0: return False for dig in self._int: if dig not in '01': return False return True def _fill_logical(self, context, opa, opb): dif = context.prec - len(opa) if dif > 0: opa = '0'*dif + opa elif dif < 0: opa = opa[-context.prec:] dif = context.prec - len(opb) if dif > 0: opb = '0'*dif + opb elif dif < 0: opb = opb[-context.prec:] return opa, opb def logical_and(self, other, context=None): """Applies an 'and' operation between self and other's digits.""" if context is None: context = getcontext() other = _convert_other(other, raiseit=True) if not self._islogical() or not other._islogical(): return context._raise_error(InvalidOperation) # fill to context.prec (opa, opb) = self._fill_logical(context, self._int, other._int) # make the operation, and clean starting zeroes result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)]) return _dec_from_triple(0, result.lstrip('0') or '0', 0) def logical_invert(self, context=None): """Invert all its digits.""" if context is None: context = getcontext() return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0), context) def logical_or(self, other, context=None): """Applies an 'or' operation between self and other's digits.""" if context is None: context = getcontext() other = _convert_other(other, raiseit=True) if not self._islogical() or not other._islogical(): return context._raise_error(InvalidOperation) # fill to context.prec (opa, opb) = self._fill_logical(context, self._int, other._int) # make the operation, and clean starting zeroes result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)]) return _dec_from_triple(0, result.lstrip('0') or '0', 0) def logical_xor(self, other, context=None): """Applies an 'xor' operation between self and other's digits.""" if context is None: context = getcontext() other = _convert_other(other, raiseit=True) if not self._islogical() or not other._islogical(): return context._raise_error(InvalidOperation) # fill to context.prec (opa, opb) = self._fill_logical(context, self._int, other._int) # make the operation, and clean starting zeroes result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)]) return _dec_from_triple(0, result.lstrip('0') or '0', 0) def max_mag(self, other, context=None): """Compares the values numerically with their sign ignored.""" other = _convert_other(other, raiseit=True) if context is None: context = getcontext() if self._is_special or other._is_special: # If one operand is a quiet NaN and the other is number, then the # number is always returned sn = self._isnan() on = other._isnan() if sn or on: if on == 1 and sn == 0: return self._fix(context) if sn == 1 and on == 0: return other._fix(context) return self._check_nans(other, context) c = self.copy_abs()._cmp(other.copy_abs()) if c == 0: c = self.compare_total(other) if c == -1: ans = other else: ans = self return ans._fix(context) def min_mag(self, other, context=None): """Compares the values numerically with their sign ignored.""" other = _convert_other(other, raiseit=True) if context is None: context = getcontext() if self._is_special or other._is_special: # If one operand is a quiet NaN and the other is number, then the # number is always returned sn = self._isnan() on = other._isnan() if sn or on: if on == 1 and sn == 0: return self._fix(context) if sn == 1 and on == 0: return other._fix(context) return self._check_nans(other, context) c = self.copy_abs()._cmp(other.copy_abs()) if c == 0: c = self.compare_total(other) if c == -1: ans = self else: ans = other return ans._fix(context) def next_minus(self, context=None): """Returns the largest representable number smaller than itself.""" if context is None: context = getcontext() ans = self._check_nans(context=context) if ans: return ans if self._isinfinity() == -1: return _NegativeInfinity if self._isinfinity() == 1: return _dec_from_triple(0, '9'*context.prec, context.Etop()) context = context.copy() context._set_rounding(ROUND_FLOOR) context._ignore_all_flags() new_self = self._fix(context) if new_self != self: return new_self return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1), context) def next_plus(self, context=None): """Returns the smallest representable number larger than itself.""" if context is None: context = getcontext() ans = self._check_nans(context=context) if ans: return ans if self._isinfinity() == 1: return _Infinity if self._isinfinity() == -1: return _dec_from_triple(1, '9'*context.prec, context.Etop()) context = context.copy() context._set_rounding(ROUND_CEILING) context._ignore_all_flags() new_self = self._fix(context) if new_self != self: return new_self return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1), context) def next_toward(self, other, context=None): """Returns the number closest to self, in the direction towards other. The result is the closest representable number to self (excluding self) that is in the direction towards other, unless both have the same value. If the two operands are numerically equal, then the result is a copy of self with the sign set to be the same as the sign of other. """ other = _convert_other(other, raiseit=True) if context is None: context = getcontext() ans = self._check_nans(other, context) if ans: return ans comparison = self._cmp(other) if comparison == 0: return self.copy_sign(other) if comparison == -1: ans = self.next_plus(context) else: # comparison == 1 ans = self.next_minus(context) # decide which flags to raise using value of ans if ans._isinfinity(): context._raise_error(Overflow, 'Infinite result from next_toward', ans._sign) context._raise_error(Inexact) context._raise_error(Rounded) elif ans.adjusted() < context.Emin: context._raise_error(Underflow) context._raise_error(Subnormal) context._raise_error(Inexact) context._raise_error(Rounded) # if precision == 1 then we don't raise Clamped for a # result 0E-Etiny. if not ans: context._raise_error(Clamped) return ans def number_class(self, context=None): """Returns an indication of the class of self. The class is one of the following strings: sNaN NaN -Infinity -Normal -Subnormal -Zero +Zero +Subnormal +Normal +Infinity """ if self.is_snan(): return "sNaN" if self.is_qnan(): return "NaN" inf = self._isinfinity() if inf == 1: return "+Infinity" if inf == -1: return "-Infinity" if self.is_zero(): if self._sign: return "-Zero" else: return "+Zero" if context is None: context = getcontext() if self.is_subnormal(context=context): if self._sign: return "-Subnormal" else: return "+Subnormal" # just a normal, regular, boring number, :) if self._sign: return "-Normal" else: return "+Normal" def radix(self): """Just returns 10, as this is Decimal, :)""" return Decimal(10) def rotate(self, other, context=None): """Returns a rotated copy of self, value-of-other times.""" if context is None: context = getcontext() other = _convert_other(other, raiseit=True) ans = self._check_nans(other, context) if ans: return ans if other._exp != 0: return context._raise_error(InvalidOperation) if not (-context.prec <= int(other) <= context.prec): return context._raise_error(InvalidOperation) if self._isinfinity(): return Decimal(self) # get values, pad if necessary torot = int(other) rotdig = self._int topad = context.prec - len(rotdig) if topad > 0: rotdig = '0'*topad + rotdig elif topad < 0: rotdig = rotdig[-topad:] # let's rotate! rotated = rotdig[torot:] + rotdig[:torot] return _dec_from_triple(self._sign, rotated.lstrip('0') or '0', self._exp) def scaleb(self, other, context=None): """Returns self operand after adding the second value to its exp.""" if context is None: context = getcontext() other = _convert_other(other, raiseit=True) ans = self._check_nans(other, context) if ans: return ans if other._exp != 0: return context._raise_error(InvalidOperation) liminf = -2 * (context.Emax + context.prec) limsup = 2 * (context.Emax + context.prec) if not (liminf <= int(other) <= limsup): return context._raise_error(InvalidOperation) if self._isinfinity(): return Decimal(self) d = _dec_from_triple(self._sign, self._int, self._exp + int(other)) d = d._fix(context) return d def shift(self, other, context=None): """Returns a shifted copy of self, value-of-other times.""" if context is None: context = getcontext() other = _convert_other(other, raiseit=True) ans = self._check_nans(other, context) if ans: return ans if other._exp != 0: return context._raise_error(InvalidOperation) if not (-context.prec <= int(other) <= context.prec): return context._raise_error(InvalidOperation) if self._isinfinity(): return Decimal(self) # get values, pad if necessary torot = int(other) rotdig = self._int topad = context.prec - len(rotdig) if topad > 0: rotdig = '0'*topad + rotdig elif topad < 0: rotdig = rotdig[-topad:] # let's shift! if torot < 0: shifted = rotdig[:torot] else: shifted = rotdig + '0'*torot shifted = shifted[-context.prec:] return _dec_from_triple(self._sign, shifted.lstrip('0') or '0', self._exp) # Support for pickling, copy, and deepcopy def __reduce__(self): return (self.__class__, (str(self),)) def __copy__(self): if type(self) is Decimal: return self # I'm immutable; therefore I am my own clone return self.__class__(str(self)) def __deepcopy__(self, memo): if type(self) is Decimal: return self # My components are also immutable return self.__class__(str(self)) # PEP 3101 support. the _localeconv keyword argument should be # considered private: it's provided for ease of testing only. def __format__(self, specifier, context=None, _localeconv=None): """Format a Decimal instance according to the given specifier. The specifier should be a standard format specifier, with the form described in PEP 3101. Formatting types 'e', 'E', 'f', 'F', 'g', 'G', 'n' and '%' are supported. If the formatting type is omitted it defaults to 'g' or 'G', depending on the value of context.capitals. """ # Note: PEP 3101 says that if the type is not present then # there should be at least one digit after the decimal point. # We take the liberty of ignoring this requirement for # Decimal---it's presumably there to make sure that # format(float, '') behaves similarly to str(float). if context is None: context = getcontext() spec = _parse_format_specifier(specifier, _localeconv=_localeconv) # special values don't care about the type or precision if self._is_special: sign = _format_sign(self._sign, spec) body = str(self.copy_abs()) return _format_align(sign, body, spec) # a type of None defaults to 'g' or 'G', depending on context if spec['type'] is None: spec['type'] = ['g', 'G'][context.capitals] # if type is '%', adjust exponent of self accordingly if spec['type'] == '%': self = _dec_from_triple(self._sign, self._int, self._exp+2) # round if necessary, taking rounding mode from the context rounding = context.rounding precision = spec['precision'] if precision is not None: if spec['type'] in 'eE': self = self._round(precision+1, rounding) elif spec['type'] in 'fF%': self = self._rescale(-precision, rounding) elif spec['type'] in 'gG' and len(self._int) > precision: self = self._round(precision, rounding) # special case: zeros with a positive exponent can't be # represented in fixed point; rescale them to 0e0. if not self and self._exp > 0 and spec['type'] in 'fF%': self = self._rescale(0, rounding) # figure out placement of the decimal point leftdigits = self._exp + len(self._int) if spec['type'] in 'eE': if not self and precision is not None: dotplace = 1 - precision else: dotplace = 1 elif spec['type'] in 'fF%': dotplace = leftdigits elif spec['type'] in 'gG': if self._exp <= 0 and leftdigits > -6: dotplace = leftdigits else: dotplace = 1 # find digits before and after decimal point, and get exponent if dotplace < 0: intpart = '0' fracpart = '0'*(-dotplace) + self._int elif dotplace > len(self._int): intpart = self._int + '0'*(dotplace-len(self._int)) fracpart = '' else: intpart = self._int[:dotplace] or '0' fracpart = self._int[dotplace:] exp = leftdigits-dotplace # done with the decimal-specific stuff; hand over the rest # of the formatting to the _format_number function return _format_number(self._sign, intpart, fracpart, exp, spec) def _dec_from_triple(sign, coefficient, exponent, special=False): """Create a decimal instance directly, without any validation, normalization (e.g. removal of leading zeros) or argument conversion. This function is for *internal use only*. """ self = object.__new__(Decimal) self._sign = sign self._int = coefficient self._exp = exponent self._is_special = special return self # Register Decimal as a kind of Number (an abstract base class). # However, do not register it as Real (because Decimals are not # interoperable with floats). _numbers.Number.register(Decimal) ##### Context class ####################################################### class _ContextManager(object): """Context manager class to support localcontext(). Sets a copy of the supplied context in __enter__() and restores the previous decimal context in __exit__() """ def __init__(self, new_context): self.new_context = new_context.copy() def __enter__(self): self.saved_context = getcontext() setcontext(self.new_context) return self.new_context def __exit__(self, t, v, tb): setcontext(self.saved_context) class Context(object): """Contains the context for a Decimal instance. Contains: prec - precision (for use in rounding, division, square roots..) rounding - rounding type (how you round) traps - If traps[exception] = 1, then the exception is raised when it is caused. Otherwise, a value is substituted in. flags - When an exception is caused, flags[exception] is set. (Whether or not the trap_enabler is set) Should be reset by user of Decimal instance. Emin - Minimum exponent Emax - Maximum exponent capitals - If 1, 1*10^1 is printed as 1E+1. If 0, printed as 1e1 clamp - If 1, change exponents if too high (Default 0) """ def __init__(self, prec=None, rounding=None, Emin=None, Emax=None, capitals=None, clamp=None, flags=None, traps=None, _ignored_flags=None): # Set defaults; for everything except flags and _ignored_flags, # inherit from DefaultContext. try: dc = DefaultContext except NameError: pass self.prec = prec if prec is not None else dc.prec self.rounding = rounding if rounding is not None else dc.rounding self.Emin = Emin if Emin is not None else dc.Emin self.Emax = Emax if Emax is not None else dc.Emax self.capitals = capitals if capitals is not None else dc.capitals self.clamp = clamp if clamp is not None else dc.clamp if _ignored_flags is None: self._ignored_flags = [] else: self._ignored_flags = _ignored_flags if traps is None: self.traps = dc.traps.copy() elif not isinstance(traps, dict): self.traps = dict((s, int(s in traps)) for s in _signals + traps) else: self.traps = traps if flags is None: self.flags = dict.fromkeys(_signals, 0) elif not isinstance(flags, dict): self.flags = dict((s, int(s in flags)) for s in _signals + flags) else: self.flags = flags def _set_integer_check(self, name, value, vmin, vmax): if not isinstance(value, int): raise TypeError("%s must be an integer" % name) if vmin == '-inf': if value > vmax: raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value)) elif vmax == 'inf': if value < vmin: raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value)) else: if value < vmin or value > vmax: raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value)) return object.__setattr__(self, name, value) def _set_signal_dict(self, name, d): if not isinstance(d, dict): raise TypeError("%s must be a signal dict" % d) for key in d: if not key in _signals: raise KeyError("%s is not a valid signal dict" % d) for key in _signals: if not key in d: raise KeyError("%s is not a valid signal dict" % d) return object.__setattr__(self, name, d) def __setattr__(self, name, value): if name == 'prec': return self._set_integer_check(name, value, 1, 'inf') elif name == 'Emin': return self._set_integer_check(name, value, '-inf', 0) elif name == 'Emax': return self._set_integer_check(name, value, 0, 'inf') elif name == 'capitals': return self._set_integer_check(name, value, 0, 1) elif name == 'clamp': return self._set_integer_check(name, value, 0, 1) elif name == 'rounding': if not value in _rounding_modes: # raise TypeError even for strings to have consistency # among various implementations. raise TypeError("%s: invalid rounding mode" % value) return object.__setattr__(self, name, value) elif name == 'flags' or name == 'traps': return self._set_signal_dict(name, value) elif name == '_ignored_flags': return object.__setattr__(self, name, value) else: raise AttributeError( "'decimal.Context' object has no attribute '%s'" % name) def __delattr__(self, name): raise AttributeError("%s cannot be deleted" % name) # Support for pickling, copy, and deepcopy def __reduce__(self): flags = [sig for sig, v in self.flags.items() if v] traps = [sig for sig, v in self.traps.items() if v] return (self.__class__, (self.prec, self.rounding, self.Emin, self.Emax, self.capitals, self.clamp, flags, traps)) def __repr__(self): """Show the current context.""" s = [] s.append('Context(prec=%(prec)d, rounding=%(rounding)s, ' 'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, ' 'clamp=%(clamp)d' % vars(self)) names = [f.__name__ for f, v in self.flags.items() if v] s.append('flags=[' + ', '.join(names) + ']') names = [t.__name__ for t, v in self.traps.items() if v] s.append('traps=[' + ', '.join(names) + ']') return ', '.join(s) + ')' def clear_flags(self): """Reset all flags to zero""" for flag in self.flags: self.flags[flag] = 0 def clear_traps(self): """Reset all traps to zero""" for flag in self.traps: self.traps[flag] = 0 def _shallow_copy(self): """Returns a shallow copy from self.""" nc = Context(self.prec, self.rounding, self.Emin, self.Emax, self.capitals, self.clamp, self.flags, self.traps, self._ignored_flags) return nc def copy(self): """Returns a deep copy from self.""" nc = Context(self.prec, self.rounding, self.Emin, self.Emax, self.capitals, self.clamp, self.flags.copy(), self.traps.copy(), self._ignored_flags) return nc __copy__ = copy def _raise_error(self, condition, explanation = None, *args): """Handles an error If the flag is in _ignored_flags, returns the default response. Otherwise, it sets the flag, then, if the corresponding trap_enabler is set, it reraises the exception. Otherwise, it returns the default value after setting the flag. """ error = _condition_map.get(condition, condition) if error in self._ignored_flags: # Don't touch the flag return error().handle(self, *args) self.flags[error] = 1 if not self.traps[error]: # The errors define how to handle themselves. return condition().handle(self, *args) # Errors should only be risked on copies of the context # self._ignored_flags = [] raise error(explanation) def _ignore_all_flags(self): """Ignore all flags, if they are raised""" return self._ignore_flags(*_signals) def _ignore_flags(self, *flags): """Ignore the flags, if they are raised""" # Do not mutate-- This way, copies of a context leave the original # alone. self._ignored_flags = (self._ignored_flags + list(flags)) return list(flags) def _regard_flags(self, *flags): """Stop ignoring the flags, if they are raised""" if flags and isinstance(flags[0], (tuple,list)): flags = flags[0] for flag in flags: self._ignored_flags.remove(flag) # We inherit object.__hash__, so we must deny this explicitly __hash__ = None def Etiny(self): """Returns Etiny (= Emin - prec + 1)""" return int(self.Emin - self.prec + 1) def Etop(self): """Returns maximum exponent (= Emax - prec + 1)""" return int(self.Emax - self.prec + 1) def _set_rounding(self, type): """Sets the rounding type. Sets the rounding type, and returns the current (previous) rounding type. Often used like: context = context.copy() # so you don't change the calling context # if an error occurs in the middle. rounding = context._set_rounding(ROUND_UP) val = self.__sub__(other, context=context) context._set_rounding(rounding) This will make it round up for that operation. """ rounding = self.rounding self.rounding= type return rounding def create_decimal(self, num='0'): """Creates a new Decimal instance but using self as context. This method implements the to-number operation of the IBM Decimal specification.""" if isinstance(num, str) and num != num.strip(): return self._raise_error(ConversionSyntax, "no trailing or leading whitespace is " "permitted.") d = Decimal(num, context=self) if d._isnan() and len(d._int) > self.prec - self.clamp: return self._raise_error(ConversionSyntax, "diagnostic info too long in NaN") return d._fix(self) def create_decimal_from_float(self, f): """Creates a new Decimal instance from a float but rounding using self as the context. >>> context = Context(prec=5, rounding=ROUND_DOWN) >>> context.create_decimal_from_float(3.1415926535897932) Decimal('3.1415') >>> context = Context(prec=5, traps=[Inexact]) >>> context.create_decimal_from_float(3.1415926535897932) Traceback (most recent call last): ... decimal.Inexact: None """ d = Decimal.from_float(f) # An exact conversion return d._fix(self) # Apply the context rounding # Methods def abs(self, a): """Returns the absolute value of the operand. If the operand is negative, the result is the same as using the minus operation on the operand. Otherwise, the result is the same as using the plus operation on the operand. >>> ExtendedContext.abs(Decimal('2.1')) Decimal('2.1') >>> ExtendedContext.abs(Decimal('-100')) Decimal('100') >>> ExtendedContext.abs(Decimal('101.5')) Decimal('101.5') >>> ExtendedContext.abs(Decimal('-101.5')) Decimal('101.5') >>> ExtendedContext.abs(-1) Decimal('1') """ a = _convert_other(a, raiseit=True) return a.__abs__(context=self) def add(self, a, b): """Return the sum of the two operands. >>> ExtendedContext.add(Decimal('12'), Decimal('7.00')) Decimal('19.00') >>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4')) Decimal('1.02E+4') >>> ExtendedContext.add(1, Decimal(2)) Decimal('3') >>> ExtendedContext.add(Decimal(8), 5) Decimal('13') >>> ExtendedContext.add(5, 5) Decimal('10') """ a = _convert_other(a, raiseit=True) r = a.__add__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def _apply(self, a): return str(a._fix(self)) def canonical(self, a): """Returns the same Decimal object. As we do not have different encodings for the same number, the received object already is in its canonical form. >>> ExtendedContext.canonical(Decimal('2.50')) Decimal('2.50') """ if not isinstance(a, Decimal): raise TypeError("canonical requires a Decimal as an argument.") return a.canonical() def compare(self, a, b): """Compares values numerically. If the signs of the operands differ, a value representing each operand ('-1' if the operand is less than zero, '0' if the operand is zero or negative zero, or '1' if the operand is greater than zero) is used in place of that operand for the comparison instead of the actual operand. The comparison is then effected by subtracting the second operand from the first and then returning a value according to the result of the subtraction: '-1' if the result is less than zero, '0' if the result is zero or negative zero, or '1' if the result is greater than zero. >>> ExtendedContext.compare(Decimal('2.1'), Decimal('3')) Decimal('-1') >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1')) Decimal('0') >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10')) Decimal('0') >>> ExtendedContext.compare(Decimal('3'), Decimal('2.1')) Decimal('1') >>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3')) Decimal('1') >>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1')) Decimal('-1') >>> ExtendedContext.compare(1, 2) Decimal('-1') >>> ExtendedContext.compare(Decimal(1), 2) Decimal('-1') >>> ExtendedContext.compare(1, Decimal(2)) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.compare(b, context=self) def compare_signal(self, a, b): """Compares the values of the two operands numerically. It's pretty much like compare(), but all NaNs signal, with signaling NaNs taking precedence over quiet NaNs. >>> c = ExtendedContext >>> c.compare_signal(Decimal('2.1'), Decimal('3')) Decimal('-1') >>> c.compare_signal(Decimal('2.1'), Decimal('2.1')) Decimal('0') >>> c.flags[InvalidOperation] = 0 >>> print(c.flags[InvalidOperation]) 0 >>> c.compare_signal(Decimal('NaN'), Decimal('2.1')) Decimal('NaN') >>> print(c.flags[InvalidOperation]) 1 >>> c.flags[InvalidOperation] = 0 >>> print(c.flags[InvalidOperation]) 0 >>> c.compare_signal(Decimal('sNaN'), Decimal('2.1')) Decimal('NaN') >>> print(c.flags[InvalidOperation]) 1 >>> c.compare_signal(-1, 2) Decimal('-1') >>> c.compare_signal(Decimal(-1), 2) Decimal('-1') >>> c.compare_signal(-1, Decimal(2)) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.compare_signal(b, context=self) def compare_total(self, a, b): """Compares two operands using their abstract representation. This is not like the standard compare, which use their numerical value. Note that a total ordering is defined for all possible abstract representations. >>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9')) Decimal('-1') >>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12')) Decimal('-1') >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3')) Decimal('-1') >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30')) Decimal('0') >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300')) Decimal('1') >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN')) Decimal('-1') >>> ExtendedContext.compare_total(1, 2) Decimal('-1') >>> ExtendedContext.compare_total(Decimal(1), 2) Decimal('-1') >>> ExtendedContext.compare_total(1, Decimal(2)) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.compare_total(b) def compare_total_mag(self, a, b): """Compares two operands using their abstract representation ignoring sign. Like compare_total, but with operand's sign ignored and assumed to be 0. """ a = _convert_other(a, raiseit=True) return a.compare_total_mag(b) def copy_abs(self, a): """Returns a copy of the operand with the sign set to 0. >>> ExtendedContext.copy_abs(Decimal('2.1')) Decimal('2.1') >>> ExtendedContext.copy_abs(Decimal('-100')) Decimal('100') >>> ExtendedContext.copy_abs(-1) Decimal('1') """ a = _convert_other(a, raiseit=True) return a.copy_abs() def copy_decimal(self, a): """Returns a copy of the decimal object. >>> ExtendedContext.copy_decimal(Decimal('2.1')) Decimal('2.1') >>> ExtendedContext.copy_decimal(Decimal('-1.00')) Decimal('-1.00') >>> ExtendedContext.copy_decimal(1) Decimal('1') """ a = _convert_other(a, raiseit=True) return Decimal(a) def copy_negate(self, a): """Returns a copy of the operand with the sign inverted. >>> ExtendedContext.copy_negate(Decimal('101.5')) Decimal('-101.5') >>> ExtendedContext.copy_negate(Decimal('-101.5')) Decimal('101.5') >>> ExtendedContext.copy_negate(1) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.copy_negate() def copy_sign(self, a, b): """Copies the second operand's sign to the first one. In detail, it returns a copy of the first operand with the sign equal to the sign of the second operand. >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33')) Decimal('1.50') >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33')) Decimal('1.50') >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33')) Decimal('-1.50') >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33')) Decimal('-1.50') >>> ExtendedContext.copy_sign(1, -2) Decimal('-1') >>> ExtendedContext.copy_sign(Decimal(1), -2) Decimal('-1') >>> ExtendedContext.copy_sign(1, Decimal(-2)) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.copy_sign(b) def divide(self, a, b): """Decimal division in a specified context. >>> ExtendedContext.divide(Decimal('1'), Decimal('3')) Decimal('0.333333333') >>> ExtendedContext.divide(Decimal('2'), Decimal('3')) Decimal('0.666666667') >>> ExtendedContext.divide(Decimal('5'), Decimal('2')) Decimal('2.5') >>> ExtendedContext.divide(Decimal('1'), Decimal('10')) Decimal('0.1') >>> ExtendedContext.divide(Decimal('12'), Decimal('12')) Decimal('1') >>> ExtendedContext.divide(Decimal('8.00'), Decimal('2')) Decimal('4.00') >>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0')) Decimal('1.20') >>> ExtendedContext.divide(Decimal('1000'), Decimal('100')) Decimal('10') >>> ExtendedContext.divide(Decimal('1000'), Decimal('1')) Decimal('1000') >>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2')) Decimal('1.20E+6') >>> ExtendedContext.divide(5, 5) Decimal('1') >>> ExtendedContext.divide(Decimal(5), 5) Decimal('1') >>> ExtendedContext.divide(5, Decimal(5)) Decimal('1') """ a = _convert_other(a, raiseit=True) r = a.__truediv__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def divide_int(self, a, b): """Divides two numbers and returns the integer part of the result. >>> ExtendedContext.divide_int(Decimal('2'), Decimal('3')) Decimal('0') >>> ExtendedContext.divide_int(Decimal('10'), Decimal('3')) Decimal('3') >>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3')) Decimal('3') >>> ExtendedContext.divide_int(10, 3) Decimal('3') >>> ExtendedContext.divide_int(Decimal(10), 3) Decimal('3') >>> ExtendedContext.divide_int(10, Decimal(3)) Decimal('3') """ a = _convert_other(a, raiseit=True) r = a.__floordiv__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def divmod(self, a, b): """Return (a // b, a % b). >>> ExtendedContext.divmod(Decimal(8), Decimal(3)) (Decimal('2'), Decimal('2')) >>> ExtendedContext.divmod(Decimal(8), Decimal(4)) (Decimal('2'), Decimal('0')) >>> ExtendedContext.divmod(8, 4) (Decimal('2'), Decimal('0')) >>> ExtendedContext.divmod(Decimal(8), 4) (Decimal('2'), Decimal('0')) >>> ExtendedContext.divmod(8, Decimal(4)) (Decimal('2'), Decimal('0')) """ a = _convert_other(a, raiseit=True) r = a.__divmod__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def exp(self, a): """Returns e ** a. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.exp(Decimal('-Infinity')) Decimal('0') >>> c.exp(Decimal('-1')) Decimal('0.367879441') >>> c.exp(Decimal('0')) Decimal('1') >>> c.exp(Decimal('1')) Decimal('2.71828183') >>> c.exp(Decimal('0.693147181')) Decimal('2.00000000') >>> c.exp(Decimal('+Infinity')) Decimal('Infinity') >>> c.exp(10) Decimal('22026.4658') """ a =_convert_other(a, raiseit=True) return a.exp(context=self) def fma(self, a, b, c): """Returns a multiplied by b, plus c. The first two operands are multiplied together, using multiply, the third operand is then added to the result of that multiplication, using add, all with only one final rounding. >>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7')) Decimal('22') >>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7')) Decimal('-8') >>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578')) Decimal('1.38435736E+12') >>> ExtendedContext.fma(1, 3, 4) Decimal('7') >>> ExtendedContext.fma(1, Decimal(3), 4) Decimal('7') >>> ExtendedContext.fma(1, 3, Decimal(4)) Decimal('7') """ a = _convert_other(a, raiseit=True) return a.fma(b, c, context=self) def is_canonical(self, a): """Return True if the operand is canonical; otherwise return False. Currently, the encoding of a Decimal instance is always canonical, so this method returns True for any Decimal. >>> ExtendedContext.is_canonical(Decimal('2.50')) True """ if not isinstance(a, Decimal): raise TypeError("is_canonical requires a Decimal as an argument.") return a.is_canonical() def is_finite(self, a): """Return True if the operand is finite; otherwise return False. A Decimal instance is considered finite if it is neither infinite nor a NaN. >>> ExtendedContext.is_finite(Decimal('2.50')) True >>> ExtendedContext.is_finite(Decimal('-0.3')) True >>> ExtendedContext.is_finite(Decimal('0')) True >>> ExtendedContext.is_finite(Decimal('Inf')) False >>> ExtendedContext.is_finite(Decimal('NaN')) False >>> ExtendedContext.is_finite(1) True """ a = _convert_other(a, raiseit=True) return a.is_finite() def is_infinite(self, a): """Return True if the operand is infinite; otherwise return False. >>> ExtendedContext.is_infinite(Decimal('2.50')) False >>> ExtendedContext.is_infinite(Decimal('-Inf')) True >>> ExtendedContext.is_infinite(Decimal('NaN')) False >>> ExtendedContext.is_infinite(1) False """ a = _convert_other(a, raiseit=True) return a.is_infinite() def is_nan(self, a): """Return True if the operand is a qNaN or sNaN; otherwise return False. >>> ExtendedContext.is_nan(Decimal('2.50')) False >>> ExtendedContext.is_nan(Decimal('NaN')) True >>> ExtendedContext.is_nan(Decimal('-sNaN')) True >>> ExtendedContext.is_nan(1) False """ a = _convert_other(a, raiseit=True) return a.is_nan() def is_normal(self, a): """Return True if the operand is a normal number; otherwise return False. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.is_normal(Decimal('2.50')) True >>> c.is_normal(Decimal('0.1E-999')) False >>> c.is_normal(Decimal('0.00')) False >>> c.is_normal(Decimal('-Inf')) False >>> c.is_normal(Decimal('NaN')) False >>> c.is_normal(1) True """ a = _convert_other(a, raiseit=True) return a.is_normal(context=self) def is_qnan(self, a): """Return True if the operand is a quiet NaN; otherwise return False. >>> ExtendedContext.is_qnan(Decimal('2.50')) False >>> ExtendedContext.is_qnan(Decimal('NaN')) True >>> ExtendedContext.is_qnan(Decimal('sNaN')) False >>> ExtendedContext.is_qnan(1) False """ a = _convert_other(a, raiseit=True) return a.is_qnan() def is_signed(self, a): """Return True if the operand is negative; otherwise return False. >>> ExtendedContext.is_signed(Decimal('2.50')) False >>> ExtendedContext.is_signed(Decimal('-12')) True >>> ExtendedContext.is_signed(Decimal('-0')) True >>> ExtendedContext.is_signed(8) False >>> ExtendedContext.is_signed(-8) True """ a = _convert_other(a, raiseit=True) return a.is_signed() def is_snan(self, a): """Return True if the operand is a signaling NaN; otherwise return False. >>> ExtendedContext.is_snan(Decimal('2.50')) False >>> ExtendedContext.is_snan(Decimal('NaN')) False >>> ExtendedContext.is_snan(Decimal('sNaN')) True >>> ExtendedContext.is_snan(1) False """ a = _convert_other(a, raiseit=True) return a.is_snan() def is_subnormal(self, a): """Return True if the operand is subnormal; otherwise return False. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.is_subnormal(Decimal('2.50')) False >>> c.is_subnormal(Decimal('0.1E-999')) True >>> c.is_subnormal(Decimal('0.00')) False >>> c.is_subnormal(Decimal('-Inf')) False >>> c.is_subnormal(Decimal('NaN')) False >>> c.is_subnormal(1) False """ a = _convert_other(a, raiseit=True) return a.is_subnormal(context=self) def is_zero(self, a): """Return True if the operand is a zero; otherwise return False. >>> ExtendedContext.is_zero(Decimal('0')) True >>> ExtendedContext.is_zero(Decimal('2.50')) False >>> ExtendedContext.is_zero(Decimal('-0E+2')) True >>> ExtendedContext.is_zero(1) False >>> ExtendedContext.is_zero(0) True """ a = _convert_other(a, raiseit=True) return a.is_zero() def ln(self, a): """Returns the natural (base e) logarithm of the operand. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.ln(Decimal('0')) Decimal('-Infinity') >>> c.ln(Decimal('1.000')) Decimal('0') >>> c.ln(Decimal('2.71828183')) Decimal('1.00000000') >>> c.ln(Decimal('10')) Decimal('2.30258509') >>> c.ln(Decimal('+Infinity')) Decimal('Infinity') >>> c.ln(1) Decimal('0') """ a = _convert_other(a, raiseit=True) return a.ln(context=self) def log10(self, a): """Returns the base 10 logarithm of the operand. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.log10(Decimal('0')) Decimal('-Infinity') >>> c.log10(Decimal('0.001')) Decimal('-3') >>> c.log10(Decimal('1.000')) Decimal('0') >>> c.log10(Decimal('2')) Decimal('0.301029996') >>> c.log10(Decimal('10')) Decimal('1') >>> c.log10(Decimal('70')) Decimal('1.84509804') >>> c.log10(Decimal('+Infinity')) Decimal('Infinity') >>> c.log10(0) Decimal('-Infinity') >>> c.log10(1) Decimal('0') """ a = _convert_other(a, raiseit=True) return a.log10(context=self) def logb(self, a): """ Returns the exponent of the magnitude of the operand's MSD. The result is the integer which is the exponent of the magnitude of the most significant digit of the operand (as though the operand were truncated to a single digit while maintaining the value of that digit and without limiting the resulting exponent). >>> ExtendedContext.logb(Decimal('250')) Decimal('2') >>> ExtendedContext.logb(Decimal('2.50')) Decimal('0') >>> ExtendedContext.logb(Decimal('0.03')) Decimal('-2') >>> ExtendedContext.logb(Decimal('0')) Decimal('-Infinity') >>> ExtendedContext.logb(1) Decimal('0') >>> ExtendedContext.logb(10) Decimal('1') >>> ExtendedContext.logb(100) Decimal('2') """ a = _convert_other(a, raiseit=True) return a.logb(context=self) def logical_and(self, a, b): """Applies the logical operation 'and' between each operand's digits. The operands must be both logical numbers. >>> ExtendedContext.logical_and(Decimal('0'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('0'), Decimal('1')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('1'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('1'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010')) Decimal('1000') >>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10')) Decimal('10') >>> ExtendedContext.logical_and(110, 1101) Decimal('100') >>> ExtendedContext.logical_and(Decimal(110), 1101) Decimal('100') >>> ExtendedContext.logical_and(110, Decimal(1101)) Decimal('100') """ a = _convert_other(a, raiseit=True) return a.logical_and(b, context=self) def logical_invert(self, a): """Invert all the digits in the operand. The operand must be a logical number. >>> ExtendedContext.logical_invert(Decimal('0')) Decimal('111111111') >>> ExtendedContext.logical_invert(Decimal('1')) Decimal('111111110') >>> ExtendedContext.logical_invert(Decimal('111111111')) Decimal('0') >>> ExtendedContext.logical_invert(Decimal('101010101')) Decimal('10101010') >>> ExtendedContext.logical_invert(1101) Decimal('111110010') """ a = _convert_other(a, raiseit=True) return a.logical_invert(context=self) def logical_or(self, a, b): """Applies the logical operation 'or' between each operand's digits. The operands must be both logical numbers. >>> ExtendedContext.logical_or(Decimal('0'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_or(Decimal('0'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1'), Decimal('0')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010')) Decimal('1110') >>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10')) Decimal('1110') >>> ExtendedContext.logical_or(110, 1101) Decimal('1111') >>> ExtendedContext.logical_or(Decimal(110), 1101) Decimal('1111') >>> ExtendedContext.logical_or(110, Decimal(1101)) Decimal('1111') """ a = _convert_other(a, raiseit=True) return a.logical_or(b, context=self) def logical_xor(self, a, b): """Applies the logical operation 'xor' between each operand's digits. The operands must be both logical numbers. >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0')) Decimal('1') >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1')) Decimal('0') >>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010')) Decimal('110') >>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10')) Decimal('1101') >>> ExtendedContext.logical_xor(110, 1101) Decimal('1011') >>> ExtendedContext.logical_xor(Decimal(110), 1101) Decimal('1011') >>> ExtendedContext.logical_xor(110, Decimal(1101)) Decimal('1011') """ a = _convert_other(a, raiseit=True) return a.logical_xor(b, context=self) def max(self, a, b): """max compares two values numerically and returns the maximum. If either operand is a NaN then the general rules apply. Otherwise, the operands are compared as though by the compare operation. If they are numerically equal then the left-hand operand is chosen as the result. Otherwise the maximum (closer to positive infinity) of the two operands is chosen as the result. >>> ExtendedContext.max(Decimal('3'), Decimal('2')) Decimal('3') >>> ExtendedContext.max(Decimal('-10'), Decimal('3')) Decimal('3') >>> ExtendedContext.max(Decimal('1.0'), Decimal('1')) Decimal('1') >>> ExtendedContext.max(Decimal('7'), Decimal('NaN')) Decimal('7') >>> ExtendedContext.max(1, 2) Decimal('2') >>> ExtendedContext.max(Decimal(1), 2) Decimal('2') >>> ExtendedContext.max(1, Decimal(2)) Decimal('2') """ a = _convert_other(a, raiseit=True) return a.max(b, context=self) def max_mag(self, a, b): """Compares the values numerically with their sign ignored. >>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN')) Decimal('7') >>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10')) Decimal('-10') >>> ExtendedContext.max_mag(1, -2) Decimal('-2') >>> ExtendedContext.max_mag(Decimal(1), -2) Decimal('-2') >>> ExtendedContext.max_mag(1, Decimal(-2)) Decimal('-2') """ a = _convert_other(a, raiseit=True) return a.max_mag(b, context=self) def min(self, a, b): """min compares two values numerically and returns the minimum. If either operand is a NaN then the general rules apply. Otherwise, the operands are compared as though by the compare operation. If they are numerically equal then the left-hand operand is chosen as the result. Otherwise the minimum (closer to negative infinity) of the two operands is chosen as the result. >>> ExtendedContext.min(Decimal('3'), Decimal('2')) Decimal('2') >>> ExtendedContext.min(Decimal('-10'), Decimal('3')) Decimal('-10') >>> ExtendedContext.min(Decimal('1.0'), Decimal('1')) Decimal('1.0') >>> ExtendedContext.min(Decimal('7'), Decimal('NaN')) Decimal('7') >>> ExtendedContext.min(1, 2) Decimal('1') >>> ExtendedContext.min(Decimal(1), 2) Decimal('1') >>> ExtendedContext.min(1, Decimal(29)) Decimal('1') """ a = _convert_other(a, raiseit=True) return a.min(b, context=self) def min_mag(self, a, b): """Compares the values numerically with their sign ignored. >>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2')) Decimal('-2') >>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN')) Decimal('-3') >>> ExtendedContext.min_mag(1, -2) Decimal('1') >>> ExtendedContext.min_mag(Decimal(1), -2) Decimal('1') >>> ExtendedContext.min_mag(1, Decimal(-2)) Decimal('1') """ a = _convert_other(a, raiseit=True) return a.min_mag(b, context=self) def minus(self, a): """Minus corresponds to unary prefix minus in Python. The operation is evaluated using the same rules as subtract; the operation minus(a) is calculated as subtract('0', a) where the '0' has the same exponent as the operand. >>> ExtendedContext.minus(Decimal('1.3')) Decimal('-1.3') >>> ExtendedContext.minus(Decimal('-1.3')) Decimal('1.3') >>> ExtendedContext.minus(1) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.__neg__(context=self) def multiply(self, a, b): """multiply multiplies two operands. If either operand is a special value then the general rules apply. Otherwise, the operands are multiplied together ('long multiplication'), resulting in a number which may be as long as the sum of the lengths of the two operands. >>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3')) Decimal('3.60') >>> ExtendedContext.multiply(Decimal('7'), Decimal('3')) Decimal('21') >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8')) Decimal('0.72') >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0')) Decimal('-0.0') >>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321')) Decimal('4.28135971E+11') >>> ExtendedContext.multiply(7, 7) Decimal('49') >>> ExtendedContext.multiply(Decimal(7), 7) Decimal('49') >>> ExtendedContext.multiply(7, Decimal(7)) Decimal('49') """ a = _convert_other(a, raiseit=True) r = a.__mul__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def next_minus(self, a): """Returns the largest representable number smaller than a. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> ExtendedContext.next_minus(Decimal('1')) Decimal('0.999999999') >>> c.next_minus(Decimal('1E-1007')) Decimal('0E-1007') >>> ExtendedContext.next_minus(Decimal('-1.00000003')) Decimal('-1.00000004') >>> c.next_minus(Decimal('Infinity')) Decimal('9.99999999E+999') >>> c.next_minus(1) Decimal('0.999999999') """ a = _convert_other(a, raiseit=True) return a.next_minus(context=self) def next_plus(self, a): """Returns the smallest representable number larger than a. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> ExtendedContext.next_plus(Decimal('1')) Decimal('1.00000001') >>> c.next_plus(Decimal('-1E-1007')) Decimal('-0E-1007') >>> ExtendedContext.next_plus(Decimal('-1.00000003')) Decimal('-1.00000002') >>> c.next_plus(Decimal('-Infinity')) Decimal('-9.99999999E+999') >>> c.next_plus(1) Decimal('1.00000001') """ a = _convert_other(a, raiseit=True) return a.next_plus(context=self) def next_toward(self, a, b): """Returns the number closest to a, in direction towards b. The result is the closest representable number from the first operand (but not the first operand) that is in the direction towards the second operand, unless the operands have the same value. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.next_toward(Decimal('1'), Decimal('2')) Decimal('1.00000001') >>> c.next_toward(Decimal('-1E-1007'), Decimal('1')) Decimal('-0E-1007') >>> c.next_toward(Decimal('-1.00000003'), Decimal('0')) Decimal('-1.00000002') >>> c.next_toward(Decimal('1'), Decimal('0')) Decimal('0.999999999') >>> c.next_toward(Decimal('1E-1007'), Decimal('-100')) Decimal('0E-1007') >>> c.next_toward(Decimal('-1.00000003'), Decimal('-10')) Decimal('-1.00000004') >>> c.next_toward(Decimal('0.00'), Decimal('-0.0000')) Decimal('-0.00') >>> c.next_toward(0, 1) Decimal('1E-1007') >>> c.next_toward(Decimal(0), 1) Decimal('1E-1007') >>> c.next_toward(0, Decimal(1)) Decimal('1E-1007') """ a = _convert_other(a, raiseit=True) return a.next_toward(b, context=self) def normalize(self, a): """normalize reduces an operand to its simplest form. Essentially a plus operation with all trailing zeros removed from the result. >>> ExtendedContext.normalize(Decimal('2.1')) Decimal('2.1') >>> ExtendedContext.normalize(Decimal('-2.0')) Decimal('-2') >>> ExtendedContext.normalize(Decimal('1.200')) Decimal('1.2') >>> ExtendedContext.normalize(Decimal('-120')) Decimal('-1.2E+2') >>> ExtendedContext.normalize(Decimal('120.00')) Decimal('1.2E+2') >>> ExtendedContext.normalize(Decimal('0.00')) Decimal('0') >>> ExtendedContext.normalize(6) Decimal('6') """ a = _convert_other(a, raiseit=True) return a.normalize(context=self) def number_class(self, a): """Returns an indication of the class of the operand. The class is one of the following strings: -sNaN -NaN -Infinity -Normal -Subnormal -Zero +Zero +Subnormal +Normal +Infinity >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.number_class(Decimal('Infinity')) '+Infinity' >>> c.number_class(Decimal('1E-10')) '+Normal' >>> c.number_class(Decimal('2.50')) '+Normal' >>> c.number_class(Decimal('0.1E-999')) '+Subnormal' >>> c.number_class(Decimal('0')) '+Zero' >>> c.number_class(Decimal('-0')) '-Zero' >>> c.number_class(Decimal('-0.1E-999')) '-Subnormal' >>> c.number_class(Decimal('-1E-10')) '-Normal' >>> c.number_class(Decimal('-2.50')) '-Normal' >>> c.number_class(Decimal('-Infinity')) '-Infinity' >>> c.number_class(Decimal('NaN')) 'NaN' >>> c.number_class(Decimal('-NaN')) 'NaN' >>> c.number_class(Decimal('sNaN')) 'sNaN' >>> c.number_class(123) '+Normal' """ a = _convert_other(a, raiseit=True) return a.number_class(context=self) def plus(self, a): """Plus corresponds to unary prefix plus in Python. The operation is evaluated using the same rules as add; the operation plus(a) is calculated as add('0', a) where the '0' has the same exponent as the operand. >>> ExtendedContext.plus(Decimal('1.3')) Decimal('1.3') >>> ExtendedContext.plus(Decimal('-1.3')) Decimal('-1.3') >>> ExtendedContext.plus(-1) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.__pos__(context=self) def power(self, a, b, modulo=None): """Raises a to the power of b, to modulo if given. With two arguments, compute a**b. If a is negative then b must be integral. The result will be inexact unless b is integral and the result is finite and can be expressed exactly in 'precision' digits. With three arguments, compute (a**b) % modulo. For the three argument form, the following restrictions on the arguments hold: - all three arguments must be integral - b must be nonnegative - at least one of a or b must be nonzero - modulo must be nonzero and have at most 'precision' digits The result of pow(a, b, modulo) is identical to the result that would be obtained by computing (a**b) % modulo with unbounded precision, but is computed more efficiently. It is always exact. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.power(Decimal('2'), Decimal('3')) Decimal('8') >>> c.power(Decimal('-2'), Decimal('3')) Decimal('-8') >>> c.power(Decimal('2'), Decimal('-3')) Decimal('0.125') >>> c.power(Decimal('1.7'), Decimal('8')) Decimal('69.7575744') >>> c.power(Decimal('10'), Decimal('0.301029996')) Decimal('2.00000000') >>> c.power(Decimal('Infinity'), Decimal('-1')) Decimal('0') >>> c.power(Decimal('Infinity'), Decimal('0')) Decimal('1') >>> c.power(Decimal('Infinity'), Decimal('1')) Decimal('Infinity') >>> c.power(Decimal('-Infinity'), Decimal('-1')) Decimal('-0') >>> c.power(Decimal('-Infinity'), Decimal('0')) Decimal('1') >>> c.power(Decimal('-Infinity'), Decimal('1')) Decimal('-Infinity') >>> c.power(Decimal('-Infinity'), Decimal('2')) Decimal('Infinity') >>> c.power(Decimal('0'), Decimal('0')) Decimal('NaN') >>> c.power(Decimal('3'), Decimal('7'), Decimal('16')) Decimal('11') >>> c.power(Decimal('-3'), Decimal('7'), Decimal('16')) Decimal('-11') >>> c.power(Decimal('-3'), Decimal('8'), Decimal('16')) Decimal('1') >>> c.power(Decimal('3'), Decimal('7'), Decimal('-16')) Decimal('11') >>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789')) Decimal('11729830') >>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729')) Decimal('-0') >>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537')) Decimal('1') >>> ExtendedContext.power(7, 7) Decimal('823543') >>> ExtendedContext.power(Decimal(7), 7) Decimal('823543') >>> ExtendedContext.power(7, Decimal(7), 2) Decimal('1') """ a = _convert_other(a, raiseit=True) r = a.__pow__(b, modulo, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def quantize(self, a, b): """Returns a value equal to 'a' (rounded), having the exponent of 'b'. The coefficient of the result is derived from that of the left-hand operand. It may be rounded using the current rounding setting (if the exponent is being increased), multiplied by a positive power of ten (if the exponent is being decreased), or is unchanged (if the exponent is already equal to that of the right-hand operand). Unlike other operations, if the length of the coefficient after the quantize operation would be greater than precision then an Invalid operation condition is raised. This guarantees that, unless there is an error condition, the exponent of the result of a quantize is always equal to that of the right-hand operand. Also unlike other operations, quantize will never raise Underflow, even if the result is subnormal and inexact. >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001')) Decimal('2.170') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01')) Decimal('2.17') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1')) Decimal('2.2') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0')) Decimal('2') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1')) Decimal('0E+1') >>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity')) Decimal('-Infinity') >>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity')) Decimal('NaN') >>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1')) Decimal('-0') >>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5')) Decimal('-0E+5') >>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2')) Decimal('NaN') >>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2')) Decimal('NaN') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1')) Decimal('217.0') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0')) Decimal('217') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1')) Decimal('2.2E+2') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2')) Decimal('2E+2') >>> ExtendedContext.quantize(1, 2) Decimal('1') >>> ExtendedContext.quantize(Decimal(1), 2) Decimal('1') >>> ExtendedContext.quantize(1, Decimal(2)) Decimal('1') """ a = _convert_other(a, raiseit=True) return a.quantize(b, context=self) def radix(self): """Just returns 10, as this is Decimal, :) >>> ExtendedContext.radix() Decimal('10') """ return Decimal(10) def remainder(self, a, b): """Returns the remainder from integer division. The result is the residue of the dividend after the operation of calculating integer division as described for divide-integer, rounded to precision digits if necessary. The sign of the result, if non-zero, is the same as that of the original dividend. This operation will fail under the same conditions as integer division (that is, if integer division on the same two operands would fail, the remainder cannot be calculated). >>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3')) Decimal('2.1') >>> ExtendedContext.remainder(Decimal('10'), Decimal('3')) Decimal('1') >>> ExtendedContext.remainder(Decimal('-10'), Decimal('3')) Decimal('-1') >>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1')) Decimal('0.2') >>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3')) Decimal('0.1') >>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3')) Decimal('1.0') >>> ExtendedContext.remainder(22, 6) Decimal('4') >>> ExtendedContext.remainder(Decimal(22), 6) Decimal('4') >>> ExtendedContext.remainder(22, Decimal(6)) Decimal('4') """ a = _convert_other(a, raiseit=True) r = a.__mod__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def remainder_near(self, a, b): """Returns to be "a - b * n", where n is the integer nearest the exact value of "x / b" (if two integers are equally near then the even one is chosen). If the result is equal to 0 then its sign will be the sign of a. This operation will fail under the same conditions as integer division (that is, if integer division on the same two operands would fail, the remainder cannot be calculated). >>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3')) Decimal('-0.9') >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6')) Decimal('-2') >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3')) Decimal('1') >>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3')) Decimal('-1') >>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1')) Decimal('0.2') >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3')) Decimal('0.1') >>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3')) Decimal('-0.3') >>> ExtendedContext.remainder_near(3, 11) Decimal('3') >>> ExtendedContext.remainder_near(Decimal(3), 11) Decimal('3') >>> ExtendedContext.remainder_near(3, Decimal(11)) Decimal('3') """ a = _convert_other(a, raiseit=True) return a.remainder_near(b, context=self) def rotate(self, a, b): """Returns a rotated copy of a, b times. The coefficient of the result is a rotated copy of the digits in the coefficient of the first operand. The number of places of rotation is taken from the absolute value of the second operand, with the rotation being to the left if the second operand is positive or to the right otherwise. >>> ExtendedContext.rotate(Decimal('34'), Decimal('8')) Decimal('400000003') >>> ExtendedContext.rotate(Decimal('12'), Decimal('9')) Decimal('12') >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2')) Decimal('891234567') >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0')) Decimal('123456789') >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2')) Decimal('345678912') >>> ExtendedContext.rotate(1333333, 1) Decimal('13333330') >>> ExtendedContext.rotate(Decimal(1333333), 1) Decimal('13333330') >>> ExtendedContext.rotate(1333333, Decimal(1)) Decimal('13333330') """ a = _convert_other(a, raiseit=True) return a.rotate(b, context=self) def same_quantum(self, a, b): """Returns True if the two operands have the same exponent. The result is never affected by either the sign or the coefficient of either operand. >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001')) False >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01')) True >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1')) False >>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf')) True >>> ExtendedContext.same_quantum(10000, -1) True >>> ExtendedContext.same_quantum(Decimal(10000), -1) True >>> ExtendedContext.same_quantum(10000, Decimal(-1)) True """ a = _convert_other(a, raiseit=True) return a.same_quantum(b) def scaleb (self, a, b): """Returns the first operand after adding the second value its exp. >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2')) Decimal('0.0750') >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0')) Decimal('7.50') >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3')) Decimal('7.50E+3') >>> ExtendedContext.scaleb(1, 4) Decimal('1E+4') >>> ExtendedContext.scaleb(Decimal(1), 4) Decimal('1E+4') >>> ExtendedContext.scaleb(1, Decimal(4)) Decimal('1E+4') """ a = _convert_other(a, raiseit=True) return a.scaleb(b, context=self) def shift(self, a, b): """Returns a shifted copy of a, b times. The coefficient of the result is a shifted copy of the digits in the coefficient of the first operand. The number of places to shift is taken from the absolute value of the second operand, with the shift being to the left if the second operand is positive or to the right otherwise. Digits shifted into the coefficient are zeros. >>> ExtendedContext.shift(Decimal('34'), Decimal('8')) Decimal('400000000') >>> ExtendedContext.shift(Decimal('12'), Decimal('9')) Decimal('0') >>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2')) Decimal('1234567') >>> ExtendedContext.shift(Decimal('123456789'), Decimal('0')) Decimal('123456789') >>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2')) Decimal('345678900') >>> ExtendedContext.shift(88888888, 2) Decimal('888888800') >>> ExtendedContext.shift(Decimal(88888888), 2) Decimal('888888800') >>> ExtendedContext.shift(88888888, Decimal(2)) Decimal('888888800') """ a = _convert_other(a, raiseit=True) return a.shift(b, context=self) def sqrt(self, a): """Square root of a non-negative number to context precision. If the result must be inexact, it is rounded using the round-half-even algorithm. >>> ExtendedContext.sqrt(Decimal('0')) Decimal('0') >>> ExtendedContext.sqrt(Decimal('-0')) Decimal('-0') >>> ExtendedContext.sqrt(Decimal('0.39')) Decimal('0.624499800') >>> ExtendedContext.sqrt(Decimal('100')) Decimal('10') >>> ExtendedContext.sqrt(Decimal('1')) Decimal('1') >>> ExtendedContext.sqrt(Decimal('1.0')) Decimal('1.0') >>> ExtendedContext.sqrt(Decimal('1.00')) Decimal('1.0') >>> ExtendedContext.sqrt(Decimal('7')) Decimal('2.64575131') >>> ExtendedContext.sqrt(Decimal('10')) Decimal('3.16227766') >>> ExtendedContext.sqrt(2) Decimal('1.41421356') >>> ExtendedContext.prec 9 """ a = _convert_other(a, raiseit=True) return a.sqrt(context=self) def subtract(self, a, b): """Return the difference between the two operands. >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07')) Decimal('0.23') >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30')) Decimal('0.00') >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07')) Decimal('-0.77') >>> ExtendedContext.subtract(8, 5) Decimal('3') >>> ExtendedContext.subtract(Decimal(8), 5) Decimal('3') >>> ExtendedContext.subtract(8, Decimal(5)) Decimal('3') """ a = _convert_other(a, raiseit=True) r = a.__sub__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def to_eng_string(self, a): """Converts a number to a string, using scientific notation. The operation is not affected by the context. """ a = _convert_other(a, raiseit=True) return a.to_eng_string(context=self) def to_sci_string(self, a): """Converts a number to a string, using scientific notation. The operation is not affected by the context. """ a = _convert_other(a, raiseit=True) return a.__str__(context=self) def to_integral_exact(self, a): """Rounds to an integer. When the operand has a negative exponent, the result is the same as using the quantize() operation using the given operand as the left-hand-operand, 1E+0 as the right-hand-operand, and the precision of the operand as the precision setting; Inexact and Rounded flags are allowed in this operation. The rounding mode is taken from the context. >>> ExtendedContext.to_integral_exact(Decimal('2.1')) Decimal('2') >>> ExtendedContext.to_integral_exact(Decimal('100')) Decimal('100') >>> ExtendedContext.to_integral_exact(Decimal('100.0')) Decimal('100') >>> ExtendedContext.to_integral_exact(Decimal('101.5')) Decimal('102') >>> ExtendedContext.to_integral_exact(Decimal('-101.5')) Decimal('-102') >>> ExtendedContext.to_integral_exact(Decimal('10E+5')) Decimal('1.0E+6') >>> ExtendedContext.to_integral_exact(Decimal('7.89E+77')) Decimal('7.89E+77') >>> ExtendedContext.to_integral_exact(Decimal('-Inf')) Decimal('-Infinity') """ a = _convert_other(a, raiseit=True) return a.to_integral_exact(context=self) def to_integral_value(self, a): """Rounds to an integer. When the operand has a negative exponent, the result is the same as using the quantize() operation using the given operand as the left-hand-operand, 1E+0 as the right-hand-operand, and the precision of the operand as the precision setting, except that no flags will be set. The rounding mode is taken from the context. >>> ExtendedContext.to_integral_value(Decimal('2.1')) Decimal('2') >>> ExtendedContext.to_integral_value(Decimal('100')) Decimal('100') >>> ExtendedContext.to_integral_value(Decimal('100.0')) Decimal('100') >>> ExtendedContext.to_integral_value(Decimal('101.5')) Decimal('102') >>> ExtendedContext.to_integral_value(Decimal('-101.5')) Decimal('-102') >>> ExtendedContext.to_integral_value(Decimal('10E+5')) Decimal('1.0E+6') >>> ExtendedContext.to_integral_value(Decimal('7.89E+77')) Decimal('7.89E+77') >>> ExtendedContext.to_integral_value(Decimal('-Inf')) Decimal('-Infinity') """ a = _convert_other(a, raiseit=True) return a.to_integral_value(context=self) # the method name changed, but we provide also the old one, for compatibility to_integral = to_integral_value class _WorkRep(object): __slots__ = ('sign','int','exp') # sign: 0 or 1 # int: int # exp: None, int, or string def __init__(self, value=None): if value is None: self.sign = None self.int = 0 self.exp = None elif isinstance(value, Decimal): self.sign = value._sign self.int = int(value._int) self.exp = value._exp else: # assert isinstance(value, tuple) self.sign = value[0] self.int = value[1] self.exp = value[2] def __repr__(self): return "(%r, %r, %r)" % (self.sign, self.int, self.exp) __str__ = __repr__ def _normalize(op1, op2, prec = 0): """Normalizes op1, op2 to have the same exp and length of coefficient. Done during addition. """ if op1.exp < op2.exp: tmp = op2 other = op1 else: tmp = op1 other = op2 # Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1). # Then adding 10**exp to tmp has the same effect (after rounding) # as adding any positive quantity smaller than 10**exp; similarly # for subtraction. So if other is smaller than 10**exp we replace # it with 10**exp. This avoids tmp.exp - other.exp getting too large. tmp_len = len(str(tmp.int)) other_len = len(str(other.int)) exp = tmp.exp + min(-1, tmp_len - prec - 2) if other_len + other.exp - 1 < exp: other.int = 1 other.exp = exp tmp.int *= 10 ** (tmp.exp - other.exp) tmp.exp = other.exp return op1, op2 ##### Integer arithmetic functions used by ln, log10, exp and __pow__ ##### _nbits = int.bit_length def _decimal_lshift_exact(n, e): """ Given integers n and e, return n * 10**e if it's an integer, else None. The computation is designed to avoid computing large powers of 10 unnecessarily. >>> _decimal_lshift_exact(3, 4) 30000 >>> _decimal_lshift_exact(300, -999999999) # returns None """ if n == 0: return 0 elif e >= 0: return n * 10**e else: # val_n = largest power of 10 dividing n. str_n = str(abs(n)) val_n = len(str_n) - len(str_n.rstrip('0')) return None if val_n < -e else n // 10**-e def _sqrt_nearest(n, a): """Closest integer to the square root of the positive integer n. a is an initial approximation to the square root. Any positive integer will do for a, but the closer a is to the square root of n the faster convergence will be. """ if n <= 0 or a <= 0: raise ValueError("Both arguments to _sqrt_nearest should be positive.") b=0 while a != b: b, a = a, a--n//a>>1 return a def _rshift_nearest(x, shift): """Given an integer x and a nonnegative integer shift, return closest integer to x / 2**shift; use round-to-even in case of a tie. """ b, q = 1 << shift, x >> shift return q + (2*(x & (b-1)) + (q&1) > b) def _div_nearest(a, b): """Closest integer to a/b, a and b positive integers; rounds to even in the case of a tie. """ q, r = divmod(a, b) return q + (2*r + (q&1) > b) def _ilog(x, M, L = 8): """Integer approximation to M*log(x/M), with absolute error boundable in terms only of x/M. Given positive integers x and M, return an integer approximation to M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference between the approximation and the exact result is at most 22. For L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In both cases these are upper bounds on the error; it will usually be much smaller.""" # The basic algorithm is the following: let log1p be the function # log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use # the reduction # # log1p(y) = 2*log1p(y/(1+sqrt(1+y))) # # repeatedly until the argument to log1p is small (< 2**-L in # absolute value). For small y we can use the Taylor series # expansion # # log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T # # truncating at T such that y**T is small enough. The whole # computation is carried out in a form of fixed-point arithmetic, # with a real number z being represented by an integer # approximation to z*M. To avoid loss of precision, the y below # is actually an integer approximation to 2**R*y*M, where R is the # number of reductions performed so far. y = x-M # argument reduction; R = number of reductions performed R = 0 while (R <= L and abs(y) << L-R >= M or R > L and abs(y) >> R-L >= M): y = _div_nearest((M*y) << 1, M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M)) R += 1 # Taylor series with T terms T = -int(-10*len(str(M))//(3*L)) yshift = _rshift_nearest(y, R) w = _div_nearest(M, T) for k in range(T-1, 0, -1): w = _div_nearest(M, k) - _div_nearest(yshift*w, M) return _div_nearest(w*y, M) def _dlog10(c, e, p): """Given integers c, e and p with c > 0, p >= 0, compute an integer approximation to 10**p * log10(c*10**e), with an absolute error of at most 1. Assumes that c*10**e is not exactly 1.""" # increase precision by 2; compensate for this by dividing # final result by 100 p += 2 # write c*10**e as d*10**f with either: # f >= 0 and 1 <= d <= 10, or # f <= 0 and 0.1 <= d <= 1. # Thus for c*10**e close to 1, f = 0 l = len(str(c)) f = e+l - (e+l >= 1) if p > 0: M = 10**p k = e+p-f if k >= 0: c *= 10**k else: c = _div_nearest(c, 10**-k) log_d = _ilog(c, M) # error < 5 + 22 = 27 log_10 = _log10_digits(p) # error < 1 log_d = _div_nearest(log_d*M, log_10) log_tenpower = f*M # exact else: log_d = 0 # error < 2.31 log_tenpower = _div_nearest(f, 10**-p) # error < 0.5 return _div_nearest(log_tenpower+log_d, 100) def _dlog(c, e, p): """Given integers c, e and p with c > 0, compute an integer approximation to 10**p * log(c*10**e), with an absolute error of at most 1. Assumes that c*10**e is not exactly 1.""" # Increase precision by 2. The precision increase is compensated # for at the end with a division by 100. p += 2 # rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10, # or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e) # as 10**p * log(d) + 10**p*f * log(10). l = len(str(c)) f = e+l - (e+l >= 1) # compute approximation to 10**p*log(d), with error < 27 if p > 0: k = e+p-f if k >= 0: c *= 10**k else: c = _div_nearest(c, 10**-k) # error of <= 0.5 in c # _ilog magnifies existing error in c by a factor of at most 10 log_d = _ilog(c, 10**p) # error < 5 + 22 = 27 else: # p <= 0: just approximate the whole thing by 0; error < 2.31 log_d = 0 # compute approximation to f*10**p*log(10), with error < 11. if f: extra = len(str(abs(f)))-1 if p + extra >= 0: # error in f * _log10_digits(p+extra) < |f| * 1 = |f| # after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11 f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra) else: f_log_ten = 0 else: f_log_ten = 0 # error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1 return _div_nearest(f_log_ten + log_d, 100) class _Log10Memoize(object): """Class to compute, store, and allow retrieval of, digits of the constant log(10) = 2.302585.... This constant is needed by Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__.""" def __init__(self): self.digits = "23025850929940456840179914546843642076011014886" def getdigits(self, p): """Given an integer p >= 0, return floor(10**p)*log(10). For example, self.getdigits(3) returns 2302. """ # digits are stored as a string, for quick conversion to # integer in the case that we've already computed enough # digits; the stored digits should always be correct # (truncated, not rounded to nearest). if p < 0: raise ValueError("p should be nonnegative") if p >= len(self.digits): # compute p+3, p+6, p+9, ... digits; continue until at # least one of the extra digits is nonzero extra = 3 while True: # compute p+extra digits, correct to within 1ulp M = 10**(p+extra+2) digits = str(_div_nearest(_ilog(10*M, M), 100)) if digits[-extra:] != '0'*extra: break extra += 3 # keep all reliable digits so far; remove trailing zeros # and next nonzero digit self.digits = digits.rstrip('0')[:-1] return int(self.digits[:p+1]) _log10_digits = _Log10Memoize().getdigits def _iexp(x, M, L=8): """Given integers x and M, M > 0, such that x/M is small in absolute value, compute an integer approximation to M*exp(x/M). For 0 <= x/M <= 2.4, the absolute error in the result is bounded by 60 (and is usually much smaller).""" # Algorithm: to compute exp(z) for a real number z, first divide z # by a suitable power R of 2 so that |z/2**R| < 2**-L. Then # compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor # series # # expm1(x) = x + x**2/2! + x**3/3! + ... # # Now use the identity # # expm1(2x) = expm1(x)*(expm1(x)+2) # # R times to compute the sequence expm1(z/2**R), # expm1(z/2**(R-1)), ... , exp(z/2), exp(z). # Find R such that x/2**R/M <= 2**-L R = _nbits((x<<L)//M) # Taylor series. (2**L)**T > M T = -int(-10*len(str(M))//(3*L)) y = _div_nearest(x, T) Mshift = M<<R for i in range(T-1, 0, -1): y = _div_nearest(x*(Mshift + y), Mshift * i) # Expansion for k in range(R-1, -1, -1): Mshift = M<<(k+2) y = _div_nearest(y*(y+Mshift), Mshift) return M+y def _dexp(c, e, p): """Compute an approximation to exp(c*10**e), with p decimal places of precision. Returns integers d, f such that: 10**(p-1) <= d <= 10**p, and (d-1)*10**f < exp(c*10**e) < (d+1)*10**f In other words, d*10**f is an approximation to exp(c*10**e) with p digits of precision, and with an error in d of at most 1. This is almost, but not quite, the same as the error being < 1ulp: when d = 10**(p-1) the error could be up to 10 ulp.""" # we'll call iexp with M = 10**(p+2), giving p+3 digits of precision p += 2 # compute log(10) with extra precision = adjusted exponent of c*10**e extra = max(0, e + len(str(c)) - 1) q = p + extra # compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q), # rounding down shift = e+q if shift >= 0: cshift = c*10**shift else: cshift = c//10**-shift quot, rem = divmod(cshift, _log10_digits(q)) # reduce remainder back to original precision rem = _div_nearest(rem, 10**extra) # error in result of _iexp < 120; error after division < 0.62 return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3 def _dpower(xc, xe, yc, ye, p): """Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that: 10**(p-1) <= c <= 10**p, and (c-1)*10**e < x**y < (c+1)*10**e in other words, c*10**e is an approximation to x**y with p digits of precision, and with an error in c of at most 1. (This is almost, but not quite, the same as the error being < 1ulp: when c == 10**(p-1) we can only guarantee error < 10ulp.) We assume that: x is positive and not equal to 1, and y is nonzero. """ # Find b such that 10**(b-1) <= |y| <= 10**b b = len(str(abs(yc))) + ye # log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point lxc = _dlog(xc, xe, p+b+1) # compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1) shift = ye-b if shift >= 0: pc = lxc*yc*10**shift else: pc = _div_nearest(lxc*yc, 10**-shift) if pc == 0: # we prefer a result that isn't exactly 1; this makes it # easier to compute a correctly rounded result in __pow__ if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1: coeff, exp = 10**(p-1)+1, 1-p else: coeff, exp = 10**p-1, -p else: coeff, exp = _dexp(pc, -(p+1), p+1) coeff = _div_nearest(coeff, 10) exp += 1 return coeff, exp def _log10_lb(c, correction = { '1': 100, '2': 70, '3': 53, '4': 40, '5': 31, '6': 23, '7': 16, '8': 10, '9': 5}): """Compute a lower bound for 100*log10(c) for a positive integer c.""" if c <= 0: raise ValueError("The argument to _log10_lb should be nonnegative.") str_c = str(c) return 100*len(str_c) - correction[str_c[0]] ##### Helper Functions #################################################### def _convert_other(other, raiseit=False, allow_float=False): """Convert other to Decimal. Verifies that it's ok to use in an implicit construction. If allow_float is true, allow conversion from float; this is used in the comparison methods (__eq__ and friends). """ if isinstance(other, Decimal): return other if isinstance(other, int): return Decimal(other) if allow_float and isinstance(other, float): return Decimal.from_float(other) if raiseit: raise TypeError("Unable to convert %s to Decimal" % other) return NotImplemented def _convert_for_comparison(self, other, equality_op=False): """Given a Decimal instance self and a Python object other, return a pair (s, o) of Decimal instances such that "s op o" is equivalent to "self op other" for any of the 6 comparison operators "op". """ if isinstance(other, Decimal): return self, other # Comparison with a Rational instance (also includes integers): # self op n/d <=> self*d op n (for n and d integers, d positive). # A NaN or infinity can be left unchanged without affecting the # comparison result. if isinstance(other, _numbers.Rational): if not self._is_special: self = _dec_from_triple(self._sign, str(int(self._int) * other.denominator), self._exp) return self, Decimal(other.numerator) # Comparisons with float and complex types. == and != comparisons # with complex numbers should succeed, returning either True or False # as appropriate. Other comparisons return NotImplemented. if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0: other = other.real if isinstance(other, float): context = getcontext() if equality_op: context.flags[FloatOperation] = 1 else: context._raise_error(FloatOperation, "strict semantics for mixing floats and Decimals are enabled") return self, Decimal.from_float(other) return NotImplemented, NotImplemented ##### Setup Specific Contexts ############################################ # The default context prototype used by Context() # Is mutable, so that new contexts can have different default values DefaultContext = Context( prec=17, rounding=ROUND_HALF_EVEN, traps=[DivisionByZero, Overflow, InvalidOperation], flags=[], Emax=308, Emin=-324, capitals=1, clamp=0 ) # Pre-made alternate contexts offered by the specification # Don't change these; the user should be able to select these # contexts and be able to reproduce results from other implementations # of the spec. BasicContext = Context( prec=9, rounding=ROUND_HALF_UP, traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow], flags=[], ) ExtendedContext = Context( prec=9, rounding=ROUND_HALF_EVEN, traps=[], flags=[], ) ##### crud for parsing strings ############################################# # # Regular expression used for parsing numeric strings. Additional # comments: # # 1. Uncomment the two '\s*' lines to allow leading and/or trailing # whitespace. But note that the specification disallows whitespace in # a numeric string. # # 2. For finite numbers (not infinities and NaNs) the body of the # number between the optional sign and the optional exponent must have # at least one decimal digit, possibly after the decimal point. The # lookahead expression '(?=\d|\.\d)' checks this. #import re #_parser = re.compile(r""" # A numeric string consists of: # \s* # (?P<sign>[-+])? # an optional sign, followed by either... # ( # (?=\d|\.\d) # ...a number (with at least one digit) # (?P<int>\d*) # having a (possibly empty) integer part # (\.(?P<frac>\d*))? # followed by an optional fractional part # (E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or... # | # Inf(inity)? # ...an infinity, or... # | # (?P<signal>s)? # ...an (optionally signaling) # NaN # NaN # (?P<diag>\d*) # with (possibly empty) diagnostic info. # ) # \s* # \Z #""", re.VERBOSE | re.IGNORECASE).match import _jsre as re _all_zeros = re.compile('0*$').match _exact_half = re.compile('50*$').match ##### PEP3101 support functions ############################################## # The functions in this section have little to do with the Decimal # class, and could potentially be reused or adapted for other pure # Python numeric classes that want to implement __format__ # # A format specifier for Decimal looks like: # # [[fill]align][sign][#][0][minimumwidth][,][.precision][type] #_parse_format_specifier_regex = re.compile(r"""\A #(?: # (?P<fill>.)? # (?P<align>[<>=^]) #)? #(?P<sign>[-+ ])? #(?P<alt>\#)? #(?P<zeropad>0)? #(?P<minimumwidth>(?!0)\d+)? #(?P<thousands_sep>,)? #(?:\.(?P<precision>0|(?!0)\d+))? #(?P<type>[eEfFgGn%])? #\Z #""", re.VERBOSE|re.DOTALL) del re # The locale module is only needed for the 'n' format specifier. The # rest of the PEP 3101 code functions quite happily without it, so we # don't care too much if locale isn't present. try: import locale as _locale except ImportError: pass def _parse_format_specifier(format_spec, _localeconv=None): """Parse and validate a format specifier. Turns a standard numeric format specifier into a dict, with the following entries: fill: fill character to pad field to minimum width align: alignment type, either '<', '>', '=' or '^' sign: either '+', '-' or ' ' minimumwidth: nonnegative integer giving minimum width zeropad: boolean, indicating whether to pad with zeros thousands_sep: string to use as thousands separator, or '' grouping: grouping for thousands separators, in format used by localeconv decimal_point: string to use for decimal point precision: nonnegative integer giving precision, or None type: one of the characters 'eEfFgG%', or None """ m = _parse_format_specifier_regex.match(format_spec) if m is None: raise ValueError("Invalid format specifier: " + format_spec) # get the dictionary format_dict = m.groupdict() # zeropad; defaults for fill and alignment. If zero padding # is requested, the fill and align fields should be absent. fill = format_dict['fill'] align = format_dict['align'] format_dict['zeropad'] = (format_dict['zeropad'] is not None) if format_dict['zeropad']: if fill is not None: raise ValueError("Fill character conflicts with '0'" " in format specifier: " + format_spec) if align is not None: raise ValueError("Alignment conflicts with '0' in " "format specifier: " + format_spec) format_dict['fill'] = fill or ' ' # PEP 3101 originally specified that the default alignment should # be left; it was later agreed that right-aligned makes more sense # for numeric types. See http://bugs.python.org/issue6857. format_dict['align'] = align or '>' # default sign handling: '-' for negative, '' for positive if format_dict['sign'] is None: format_dict['sign'] = '-' # minimumwidth defaults to 0; precision remains None if not given format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0') if format_dict['precision'] is not None: format_dict['precision'] = int(format_dict['precision']) # if format type is 'g' or 'G' then a precision of 0 makes little # sense; convert it to 1. Same if format type is unspecified. if format_dict['precision'] == 0: if format_dict['type'] is None or format_dict['type'] in 'gGn': format_dict['precision'] = 1 # determine thousands separator, grouping, and decimal separator, and # add appropriate entries to format_dict if format_dict['type'] == 'n': # apart from separators, 'n' behaves just like 'g' format_dict['type'] = 'g' if _localeconv is None: _localeconv = _locale.localeconv() if format_dict['thousands_sep'] is not None: raise ValueError("Explicit thousands separator conflicts with " "'n' type in format specifier: " + format_spec) format_dict['thousands_sep'] = _localeconv['thousands_sep'] format_dict['grouping'] = _localeconv['grouping'] format_dict['decimal_point'] = _localeconv['decimal_point'] else: if format_dict['thousands_sep'] is None: format_dict['thousands_sep'] = '' format_dict['grouping'] = [3, 0] format_dict['decimal_point'] = '.' return format_dict def _format_align(sign, body, spec): """Given an unpadded, non-aligned numeric string 'body' and sign string 'sign', add padding and alignment conforming to the given format specifier dictionary 'spec' (as produced by parse_format_specifier). """ # how much extra space do we have to play with? minimumwidth = spec['minimumwidth'] fill = spec['fill'] padding = fill*(minimumwidth - len(sign) - len(body)) align = spec['align'] if align == '<': result = sign + body + padding elif align == '>': result = padding + sign + body elif align == '=': result = sign + padding + body elif align == '^': half = len(padding)//2 result = padding[:half] + sign + body + padding[half:] else: raise ValueError('Unrecognised alignment field') return result def _group_lengths(grouping): """Convert a localeconv-style grouping into a (possibly infinite) iterable of integers representing group lengths. """ # The result from localeconv()['grouping'], and the input to this # function, should be a list of integers in one of the # following three forms: # # (1) an empty list, or # (2) nonempty list of positive integers + [0] # (3) list of positive integers + [locale.CHAR_MAX], or from itertools import chain, repeat if not grouping: return [] elif grouping[-1] == 0 and len(grouping) >= 2: return chain(grouping[:-1], repeat(grouping[-2])) elif grouping[-1] == _locale.CHAR_MAX: return grouping[:-1] else: raise ValueError('unrecognised format for grouping') def _insert_thousands_sep(digits, spec, min_width=1): """Insert thousands separators into a digit string. spec is a dictionary whose keys should include 'thousands_sep' and 'grouping'; typically it's the result of parsing the format specifier using _parse_format_specifier. The min_width keyword argument gives the minimum length of the result, which will be padded on the left with zeros if necessary. If necessary, the zero padding adds an extra '0' on the left to avoid a leading thousands separator. For example, inserting commas every three digits in '123456', with min_width=8, gives '0,123,456', even though that has length 9. """ sep = spec['thousands_sep'] grouping = spec['grouping'] groups = [] for l in _group_lengths(grouping): if l <= 0: raise ValueError("group length should be positive") # max(..., 1) forces at least 1 digit to the left of a separator l = min(max(len(digits), min_width, 1), l) groups.append('0'*(l - len(digits)) + digits[-l:]) digits = digits[:-l] min_width -= l if not digits and min_width <= 0: break min_width -= len(sep) else: l = max(len(digits), min_width, 1) groups.append('0'*(l - len(digits)) + digits[-l:]) return sep.join(reversed(groups)) def _format_sign(is_negative, spec): """Determine sign character.""" if is_negative: return '-' elif spec['sign'] in ' +': return spec['sign'] else: return '' def _format_number(is_negative, intpart, fracpart, exp, spec): """Format a number, given the following data: is_negative: true if the number is negative, else false intpart: string of digits that must appear before the decimal point fracpart: string of digits that must come after the point exp: exponent, as an integer spec: dictionary resulting from parsing the format specifier This function uses the information in spec to: insert separators (decimal separator and thousands separators) format the sign format the exponent add trailing '%' for the '%' type zero-pad if necessary fill and align if necessary """ sign = _format_sign(is_negative, spec) if fracpart or spec['alt']: fracpart = spec['decimal_point'] + fracpart if exp != 0 or spec['type'] in 'eE': echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']] fracpart += "{0}{1:+}".format(echar, exp) if spec['type'] == '%': fracpart += '%' if spec['zeropad']: min_width = spec['minimumwidth'] - len(fracpart) - len(sign) else: min_width = 0 intpart = _insert_thousands_sep(intpart, spec, min_width) return _format_align(sign, intpart+fracpart, spec) ##### Useful Constants (internal use only) ################################ # Reusable defaults _Infinity = Decimal('Inf') _NegativeInfinity = Decimal('-Inf') _NaN = Decimal('NaN') _Zero = Decimal(0) _One = Decimal(1) _NegativeOne = Decimal(-1) # _SignedInfinity[sign] is infinity w/ that sign _SignedInfinity = (_Infinity, _NegativeInfinity) # Constants related to the hash implementation; hash(x) is based # on the reduction of x modulo _PyHASH_MODULUS _PyHASH_MODULUS = sys.hash_info.modulus # hash values to use for positive and negative infinities, and nans _PyHASH_INF = sys.hash_info.inf _PyHASH_NAN = sys.hash_info.nan # _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS _PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS) del sys try: import _decimal except ImportError: pass else: s1 = set(dir()) s2 = set(dir(_decimal)) for name in s1 - s2: del globals()[name] del s1, s2, name from _decimal import * if __name__ == '__main__': import doctest, decimal doctest.testmod(decimal)
qiankunshe/sky_engine
refs/heads/master
sky/tools/webkitpy/layout_tests/process_json_data_unittest.py
59
# Copyright (C) 2014 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import json import unittest from webkitpy.layout_tests.generate_results_dashboard import ProcessJsonData class ProcessJsonDataTester(unittest.TestCase): def test_check_failing_results(self): valid_json_data = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}}}}} valid_json_data_1 = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'expected': u'TEXT', u'actual': u'TEXT', u'is_unexpected': True}}}}}} valid_json_data_2 = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name_2': {u'test_2.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}, u'test_name': {u'test.html': {u'expected': u'TEXT', u'actual': u'TEXT', u'is_unexpected': True}}}}}} expected_result = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'archived_results': [u'TEXT', u'PASS']}}}}}} process_json_data = ProcessJsonData(valid_json_data, [valid_json_data_1], [valid_json_data_2]) actual_result = process_json_data.generate_archived_result() self.assertEqual(expected_result, actual_result) def test_check_full_results(self): valid_json_data = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name_2': {u'test_2.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}}}}} valid_json_data_1 = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'expected': u'TEXT', u'actual': u'TEXT', u'is_unexpected': True}}}}}} valid_json_data_2 = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name_2': {u'test_2.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}, u'test_name': {u'test.html': {u'expected': u'TEXT', u'actual': u'TEXT', u'is_unexpected': True}}}}}} expected_result = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name_2': {u'test_2.html': {u'archived_results': [u'TEXT', u'TEXT']}}}}}} process_json_data = ProcessJsonData(valid_json_data, [valid_json_data_1], [valid_json_data_2]) actual_result = process_json_data.generate_archived_result() self.assertEqual(expected_result, actual_result) def test_null_check(self): valid_json_data = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}}}}} expected_result = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'archived_results': [u'TEXT']}}}}}} process_json_data = ProcessJsonData(valid_json_data, [], []) actual_result = process_json_data.generate_archived_result() self.assertEqual(expected_result, actual_result)
greyfenrir/taurus
refs/heads/master
tests/resources/selenium/external_logging.py
1
# coding=utf-8 import logging import random import string import sys import unittest from time import time, sleep import apiritif import os import re from selenium import webdriver from selenium.common.exceptions import NoSuchElementException, TimeoutException from selenium.webdriver.common.by import By from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.support.ui import Select from selenium.webdriver.support import expected_conditions as econd from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.common.keys import Keys from bzt.resources.selenium_extras import add_logging_handlers, get_locator, dialogs_replace class TestSample(unittest.TestCase): def setUp(self): self.vars = {} timeout = 30.0 self.driver = None options = webdriver.ChromeOptions() options.add_argument('--no-sandbox') options.add_argument('--disable-dev-shm-usage') self.driver = webdriver.Chrome(service_log_path='/somewhere/webdriver.log', options=options) self.driver.implicitly_wait(timeout) add_logging_handlers() apiritif.put_into_thread_store(timeout=timeout, func_mode=False, driver=self.driver, windows={}, scenario_name='sample') def _1_Test(self): with apiritif.smart_transaction('Test'): apiritif.external_log('start: go(http://blazedemo.com/)') self.driver.get('http://blazedemo.com/') dialogs_replace() apiritif.external_log('end: go(http://blazedemo.com/)') apiritif.external_log('start: log(leaving blazedemo)') apiritif.external_log('leaving blazedemo') apiritif.external_log('end: log(leaving blazedemo)') def test_sample(self): self._1_Test() def tearDown(self): if self.driver: self.driver.quit()
riggsd/solartime
refs/heads/dev
solartime.py
1
""" The :mod:`solartime` module provides the means to calculate dawn, sunrise, solar noon, sunset, and dusk, at a specific latitude/longitude. The module provides one main class, :class:`SolarTime`. :class:`SolarTime` * Calculates events in the UTC timezone. Example usage :: >>> from datetime import date >>> from pytz import timezone >>> from solartime import SolarTime >>> today = date(2014, 1, 19) >>> localtz = timezone('US/Eastern') >>> lat, lon = 38.0, -79.0 >>> sun = SolarTime() >>> schedule = sun.sun_utc(today, lat, lon) >>> sunset = schedule['sunset'].astimezone(localtz) >>> print(sunset) 2014-01-19 17:24:43-05:00 """ import datetime from math import cos, sin, tan, acos, asin, floor, radians, degrees try: import pytz except ImportError: raise ImportError('The solartime module requires the pytz module to be available.') try: basestring # Python 2/3 compatibility except NameError: basestring = str __all__ = ['SolarTime', 'SolarError'] __version__ = '0.1b1' __license__ = 'Apache 2.0' __author__ = 'Simon Kennedy <code@sffjunkie.co.uk>, David Riggs <driggs@myotisoft.com>' class SolarError(Exception): pass class SolarTime(object): def __init__(self, solar_depression=6): """Create a SolarTime calculator. :param solar_depression: Number of degrees the sun must be below the horizon for dawn/dusk calculation :type number or str: Either number of degrees, or one of 'civil', 'nautical', 'astronomical' """ self._depression = 6 self.solar_depression = solar_depression # Set default depression in degrees @property def solar_depression(self): """The number of degrees the sun must be below the horizon for the dawn/dusk calc. Can either be set as a number of degrees below the horizon or as one of the following strings ============= ======= String Degrees ============= ======= civil 6.0 nautical 12.0 astronomical 18.0 ============= ======= """ return self._depression @solar_depression.setter def solar_depression(self, depression): if isinstance(depression, basestring): try: self._depression = { 'civil': 6, 'nautical': 12, 'astronomical': 18}[depression] except: raise KeyError("solar_depression must be either a number or one of 'civil', 'nautical' or 'astronomical'") else: self._depression = float(depression) def sun_utc(self, date, latitude, longitude): """Calculate all the info for the sun at once. :param date: Date to calculate for. :type date: :class:`datetime.date` :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :rtype: Dictionary with keys ``dawn``, ``sunrise``, ``noon``, ``sunset`` and ``dusk`` """ return { 'dawn': self.dawn_utc(date, latitude, longitude), 'sunrise': self.sunrise_utc(date, latitude, longitude), 'noon': self.solar_noon_utc(date, longitude), 'sunset': self.sunset_utc(date, latitude, longitude), 'dusk': self.dusk_utc(date, latitude, longitude) } def dawn_utc(self, date, latitude, longitude): """Calculate dawn time in the UTC timezone. :param date: Date to calculate for. :type date: datetime.date :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :rtype: date/time in UTC timezone """ try: return self._calc_time(date, latitude, longitude, self._depression) except: raise SolarError('Sun remains below the horizon on this day, at this location.') def sunrise_utc(self, date, latitude, longitude): """Calculate sunrise time in the UTC timezone. :param date: Date to calculate for. :type date: datetime.date :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :rtype: date/time in UTC timezone """ try: return self._calc_time(date, latitude, longitude, 0.833) except: raise SolarError('Sun remains below the horizon on this day, at this location.') def solar_noon_utc(self, date, longitude): """Calculate solar noon time in the UTC timezone. :param date: Date to calculate for. :type date: datetime.date :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :rtype: date/time in UTC timezone """ julianday = self._julianday(date) newt = self._jday_to_jcentury(julianday + 0.5 + -longitude / 360.0) eqtime = self._eq_of_time(newt) timeUTC = 720.0 + (-longitude * 4.0) - eqtime timeUTC /= 60.0 hour = int(timeUTC) minute = int((timeUTC - hour) * 60) second = int((((timeUTC - hour) * 60) - minute) * 60) if second > 59: second -= 60 minute += 1 elif second < 0: second += 60 minute -= 1 if minute > 59: minute -= 60 hour += 1 elif minute < 0: minute += 60 hour -= 1 if hour > 23: hour -= 24 date += datetime.timedelta(days=1) elif hour < 0: hour += 24 date -= datetime.timedelta(days=1) return datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc) def sunset_utc(self, date, latitude, longitude): """Calculate sunset time in the UTC timezone. :param date: Date to calculate for. :type date: datetime.date :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :rtype: date/time in UTC timezone """ try: return self._calc_time(date, latitude, longitude, -0.833) except: raise SolarError('Sun remains below the horizon on this day, at this location.') def dusk_utc(self, date, latitude, longitude): """Calculate dusk time in the UTC timezone. :param date: Date to calculate for. :type date: datetime.date :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :rtype: date/time in UTC timezone """ try: return self._calc_time(date, latitude, longitude, - self._depression) except: raise SolarError('Sun remains below the horizon on this day, at this location.') def _julianday(self, date, timezone=None): day = date.day month = date.month year = date.year if timezone is not None: offset = timezone.localize(datetime.datetime(year, month, day)).utcoffset() offset = offset.total_seconds() / 1440.0 day += offset + 0.5 if month <= 2: year -= 1 month += 12 A = floor(year / 100.0) B = 2 - A + floor(A / 4.0) jd = floor(365.25 * (year + 4716)) + floor(30.6001 * (month + 1)) + day - 1524.5 if jd > 2299160.4999999: jd += B return jd def _jday_to_jcentury(self, julianday): return (julianday - 2451545.0) / 36525.0 def _jcentury_to_jday(self, juliancentury): return (juliancentury * 36525.0) + 2451545.0 def _mean_obliquity_of_ecliptic(self, juliancentury): seconds = 21.448 - juliancentury * (46.815 + juliancentury * (0.00059 - juliancentury * 0.001813)) return 23.0 + (26.0 + (seconds / 60.0)) / 60.0 def _obliquity_correction(self, juliancentury): e0 = self._mean_obliquity_of_ecliptic(juliancentury) omega = 125.04 - 1934.136 * juliancentury return e0 + 0.00256 * cos(radians(omega)) def _geom_mean_long_sun(self, juliancentury): l0 = 280.46646 + juliancentury * (36000.76983 + 0.0003032 * juliancentury) return l0 % 360.0 def _eccentrilocation_earth_orbit(self, juliancentury): return 0.016708634 - juliancentury * (0.000042037 + 0.0000001267 * juliancentury) def _geom_mean_anomaly_sun(self, juliancentury): return 357.52911 + juliancentury * (35999.05029 - 0.0001537 * juliancentury) def _eq_of_time(self, juliancentury): epsilon = self._obliquity_correction(juliancentury) l0 = self._geom_mean_long_sun(juliancentury) e = self._eccentrilocation_earth_orbit(juliancentury) m = self._geom_mean_anomaly_sun(juliancentury) y = tan(radians(epsilon) / 2.0) y = y * y sin2l0 = sin(2.0 * radians(l0)) sinm = sin(radians(m)) cos2l0 = cos(2.0 * radians(l0)) sin4l0 = sin(4.0 * radians(l0)) sin2m = sin(2.0 * radians(m)) Etime = y * sin2l0 - 2.0 * e * sinm + 4.0 * e * y * sinm * cos2l0 - 0.5 * y * y * sin4l0 - 1.25 * e * e * sin2m return degrees(Etime) * 4.0 def _sun_eq_of_center(self, juliancentury): m = self._geom_mean_anomaly_sun(juliancentury) mrad = radians(m) sinm = sin(mrad) sin2m = sin(mrad + mrad) sin3m = sin(mrad + mrad + mrad) c = sinm * (1.914602 - juliancentury * (0.004817 + 0.000014 * juliancentury)) + sin2m * (0.019993 - 0.000101 * juliancentury) + sin3m * 0.000289 return c def _sun_true_long(self, juliancentury): l0 = self._geom_mean_long_sun(juliancentury) c = self._sun_eq_of_center(juliancentury) return l0 + c def _sun_apparent_long(self, juliancentury): O = self._sun_true_long(juliancentury) omega = 125.04 - 1934.136 * juliancentury return O - 0.00569 - 0.00478 * sin(radians(omega)) def _sun_declination(self, juliancentury): e = self._obliquity_correction(juliancentury) lambd = self._sun_apparent_long(juliancentury) sint = sin(radians(e)) * sin(radians(lambd)) return degrees(asin(sint)) def _hour_angle(self, latitude, solar_dec, solar_depression): latRad = radians(latitude) sdRad = radians(solar_dec) HA = (acos(cos(radians(90 + solar_depression)) / (cos(latRad) * cos(sdRad)) - tan(latRad) * tan(sdRad))) return HA def _calc_time(self, date, latitude, longitude, depression): julianday = self._julianday(date) if latitude > 89.8: latitude = 89.8 if latitude < -89.8: latitude = -89.8 t = self._jday_to_jcentury(julianday) eqtime = self._eq_of_time(t) solarDec = self._sun_declination(t) hourangle = -self._hour_angle(latitude, solarDec, 0.833) delta = -longitude - degrees(hourangle) timeDiff = 4.0 * delta timeUTC = 720.0 + timeDiff - eqtime newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0) eqtime = self._eq_of_time(newt) solarDec = self._sun_declination(newt) if depression < 0: depression = abs(depression) hourangle = -self._hour_angle(latitude, solarDec, depression) else: hourangle = self._hour_angle(latitude, solarDec, depression) delta = -longitude - degrees(hourangle) timeDiff = 4 * delta timeUTC = 720 + timeDiff - eqtime timeUTC /= 60.0 hour = int(timeUTC) minute = int((timeUTC - hour) * 60) second = int((((timeUTC - hour) * 60) - minute) * 60) if second > 59: second -= 60 minute += 1 elif second < 0: second += 60 minute -= 1 if minute > 59: minute -= 60 hour += 1 elif minute < 0: minute += 60 hour -= 1 if hour > 23: hour -= 24 date += datetime.timedelta(days=1) elif hour < 0: hour += 24 date -= datetime.timedelta(days=1) return datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)
Pixelblaster/kotti_contentgenerator
refs/heads/master
kotti_contentgenerator/tests/test_views.py
2
from kotti.testing import DummyRequest from mock import MagicMock class TestGeneratorView: def make_one(self): from kotti_contentgenerator.views import GeneratorView return GeneratorView({}, DummyRequest()) def test_it(self, db_session, monkeypatch): from kotti.resources import Document, File, Image from kotti_contentgenerator.generator import default_factories params = {} class DummyGenerator: def __init__(self, **kwargs): self.factories = default_factories params.update(kwargs) def generate(self, root): return 100 monkeypatch.setattr( 'kotti_contentgenerator.generator.Generator', DummyGenerator) view = self.make_one() appstruct = {'content_types': set([u'class kotti.resources.Document', u'class kotti.resources.File', u'class kotti.resources.Image']), 'depth': 2, 'root_types': set([u'class kotti.resources.Document']), 'seed': 1, 'sub_level': 10, 'top_level': 10, 'users': 10} view.generate_success(appstruct) assert set(params['content_types']) == set([Document, File, Image]) assert params['root_types'] == [Document] def test_view_schema(self, db_session, monkeypatch): from kotti_contentgenerator.views import make_generator_schema from kotti_contentgenerator.generator import Generator schema = make_generator_schema(Generator()) choices = \ set([(u'class kotti.resources.File', u'class kotti.resources.File'), (u'class kotti.security.Principal', u'class kotti.security.Principal'), (u'class kotti.resources.Document', u'class kotti.resources.Document'), (u'class kotti.resources.Image', u'class kotti.resources.Image' )]) assert set(schema['root_types'].widget.values) == choices assert set(schema['content_types'].widget.values) == choices
mas2df/racefinder
refs/heads/master
processing/test/race_distance_regex_test.py
1
import unittest from processing.utils import getDistanceAndType, getDistanceUnitAndType data = [ ["5K run", "5", "K", "run"], ["10K run", "10", "K", "run"], ["8K run", "8", "K", "run"], ["5K novelty run", "5", "K", "novelty run"], ["5K walk", "5", "K", "walk"], ["1M fun run", "1", "M", "fun run"], ["1M walk", "1", "M", "walk"], ["13.1M run", "13.1", "M", "run"], ["27.2M trail run", "27.2", "M", "trail run"], ["fun run", "", "", "fun run"], ["triathlon", "", "", "triathlon"], ["kid's run", "", "", "kid's run"], ["track meet", "", "", "track meet"], ["track-meet", "", "", "track-meet"], ] class MyTest(unittest.TestCase): def test_getDistanceUnitAndType(self): for test_data in data: print "Testing: " + test_data[0] result = getDistanceUnitAndType(test_data[0]) self.assertEqual(test_data[1], result[0]) self.assertEqual(test_data[2], result[1]) self.assertEqual(test_data[3], result[2]) def test_getDistanceAndType(self): for test_data in data: print "Testing: " + test_data[0] result = getDistanceAndType(test_data[0]) if test_data[1] and test_data[2]: self.assertEqual((test_data[1] + " " + test_data[2]), result[0]) else: self.assertEqual("", result[0]) self.assertEqual(test_data[3], result[1])
santidediego/LearningDjango
refs/heads/master
lib/python3.5/site-packages/pip/_vendor/pkg_resources/__init__.py
252
""" Package resource API -------------------- A resource is a logical file contained within a package, or a logical subdirectory thereof. The package resource API expects resource names to have their path parts separated with ``/``, *not* whatever the local path separator is. Do not use os.path operations to manipulate resource names being passed into the API. The package resource API is designed to work with normal filesystem packages, .egg files, and unpacked .egg files. It can also work in a limited way with .zip files and with custom PEP 302 loaders that support the ``get_data()`` method. """ from __future__ import absolute_import import sys import os import io import time import re import types import zipfile import zipimport import warnings import stat import functools import pkgutil import token import symbol import operator import platform import collections import plistlib import email.parser import tempfile import textwrap from pkgutil import get_importer try: import _imp except ImportError: # Python 3.2 compatibility import imp as _imp PY3 = sys.version_info > (3,) PY2 = not PY3 if PY3: from urllib.parse import urlparse, urlunparse if PY2: from urlparse import urlparse, urlunparse if PY3: string_types = str, else: string_types = str, eval('unicode') iteritems = (lambda i: i.items()) if PY3 else lambda i: i.iteritems() # capture these to bypass sandboxing from os import utime try: from os import mkdir, rename, unlink WRITE_SUPPORT = True except ImportError: # no write support, probably under GAE WRITE_SUPPORT = False from os import open as os_open from os.path import isdir, split # Avoid try/except due to potential problems with delayed import mechanisms. if sys.version_info >= (3, 3) and sys.implementation.name == "cpython": import importlib.machinery as importlib_machinery else: importlib_machinery = None try: import parser except ImportError: pass import pip._vendor.packaging.version import pip._vendor.packaging.specifiers packaging = pip._vendor.packaging # declare some globals that will be defined later to # satisfy the linters. require = None working_set = None class PEP440Warning(RuntimeWarning): """ Used when there is an issue with a version or specifier not complying with PEP 440. """ class _SetuptoolsVersionMixin(object): def __hash__(self): return super(_SetuptoolsVersionMixin, self).__hash__() def __lt__(self, other): if isinstance(other, tuple): return tuple(self) < other else: return super(_SetuptoolsVersionMixin, self).__lt__(other) def __le__(self, other): if isinstance(other, tuple): return tuple(self) <= other else: return super(_SetuptoolsVersionMixin, self).__le__(other) def __eq__(self, other): if isinstance(other, tuple): return tuple(self) == other else: return super(_SetuptoolsVersionMixin, self).__eq__(other) def __ge__(self, other): if isinstance(other, tuple): return tuple(self) >= other else: return super(_SetuptoolsVersionMixin, self).__ge__(other) def __gt__(self, other): if isinstance(other, tuple): return tuple(self) > other else: return super(_SetuptoolsVersionMixin, self).__gt__(other) def __ne__(self, other): if isinstance(other, tuple): return tuple(self) != other else: return super(_SetuptoolsVersionMixin, self).__ne__(other) def __getitem__(self, key): return tuple(self)[key] def __iter__(self): component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE) replace = { 'pre': 'c', 'preview': 'c', '-': 'final-', 'rc': 'c', 'dev': '@', }.get def _parse_version_parts(s): for part in component_re.split(s): part = replace(part, part) if not part or part == '.': continue if part[:1] in '0123456789': # pad for numeric comparison yield part.zfill(8) else: yield '*'+part # ensure that alpha/beta/candidate are before final yield '*final' def old_parse_version(s): parts = [] for part in _parse_version_parts(s.lower()): if part.startswith('*'): # remove '-' before a prerelease tag if part < '*final': while parts and parts[-1] == '*final-': parts.pop() # remove trailing zeros from each series of numeric parts while parts and parts[-1] == '00000000': parts.pop() parts.append(part) return tuple(parts) # Warn for use of this function warnings.warn( "You have iterated over the result of " "pkg_resources.parse_version. This is a legacy behavior which is " "inconsistent with the new version class introduced in setuptools " "8.0. In most cases, conversion to a tuple is unnecessary. For " "comparison of versions, sort the Version instances directly. If " "you have another use case requiring the tuple, please file a " "bug with the setuptools project describing that need.", RuntimeWarning, stacklevel=1, ) for part in old_parse_version(str(self)): yield part class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version): pass class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin, packaging.version.LegacyVersion): pass def parse_version(v): try: return SetuptoolsVersion(v) except packaging.version.InvalidVersion: return SetuptoolsLegacyVersion(v) _state_vars = {} def _declare_state(vartype, **kw): globals().update(kw) _state_vars.update(dict.fromkeys(kw, vartype)) def __getstate__(): state = {} g = globals() for k, v in _state_vars.items(): state[k] = g['_sget_'+v](g[k]) return state def __setstate__(state): g = globals() for k, v in state.items(): g['_sset_'+_state_vars[k]](k, g[k], v) return state def _sget_dict(val): return val.copy() def _sset_dict(key, ob, state): ob.clear() ob.update(state) def _sget_object(val): return val.__getstate__() def _sset_object(key, ob, state): ob.__setstate__(state) _sget_none = _sset_none = lambda *args: None def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. """ plat = get_build_platform() m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) except ValueError: # not Mac OS X pass return plat __all__ = [ # Basic resource access and distribution/entry point discovery 'require', 'run_script', 'get_provider', 'get_distribution', 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', 'resource_string', 'resource_stream', 'resource_filename', 'resource_listdir', 'resource_exists', 'resource_isdir', # Environmental control 'declare_namespace', 'working_set', 'add_activation_listener', 'find_distributions', 'set_extraction_path', 'cleanup_resources', 'get_default_cache', # Primary implementation classes 'Environment', 'WorkingSet', 'ResourceManager', 'Distribution', 'Requirement', 'EntryPoint', # Exceptions 'ResolutionError', 'VersionConflict', 'DistributionNotFound', 'UnknownExtra', 'ExtractionError', # Warnings 'PEP440Warning', # Parsing functions and string utilities 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', # filesystem utilities 'ensure_directory', 'normalize_path', # Distribution "precedence" constants 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', # "Provider" interfaces, implementations, and registration/lookup APIs 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', 'register_finder', 'register_namespace_handler', 'register_loader_type', 'fixup_namespace_packages', 'get_importer', # Deprecated/backward compatibility only 'run_main', 'AvailableDistributions', ] class ResolutionError(Exception): """Abstract base for dependency resolution errors""" def __repr__(self): return self.__class__.__name__+repr(self.args) class VersionConflict(ResolutionError): """ An already-installed version conflicts with the requested version. Should be initialized with the installed Distribution and the requested Requirement. """ _template = "{self.dist} is installed but {self.req} is required" @property def dist(self): return self.args[0] @property def req(self): return self.args[1] def report(self): return self._template.format(**locals()) def with_context(self, required_by): """ If required_by is non-empty, return a version of self that is a ContextualVersionConflict. """ if not required_by: return self args = self.args + (required_by,) return ContextualVersionConflict(*args) class ContextualVersionConflict(VersionConflict): """ A VersionConflict that accepts a third parameter, the set of the requirements that required the installed Distribution. """ _template = VersionConflict._template + ' by {self.required_by}' @property def required_by(self): return self.args[2] class DistributionNotFound(ResolutionError): """A requested distribution was not found""" _template = ("The '{self.req}' distribution was not found " "and is required by {self.requirers_str}") @property def req(self): return self.args[0] @property def requirers(self): return self.args[1] @property def requirers_str(self): if not self.requirers: return 'the application' return ', '.join(self.requirers) def report(self): return self._template.format(**locals()) def __str__(self): return self.report() class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" _provider_factories = {} PY_MAJOR = sys.version[:3] EGG_DIST = 3 BINARY_DIST = 2 SOURCE_DIST = 1 CHECKOUT_DIST = 0 DEVELOP_DIST = -1 def register_loader_type(loader_type, provider_factory): """Register `provider_factory` to make providers for `loader_type` `loader_type` is the type or class of a PEP 302 ``module.__loader__``, and `provider_factory` is a function that, passed a *module* object, returns an ``IResourceProvider`` for that module. """ _provider_factories[loader_type] = provider_factory def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] try: module = sys.modules[moduleOrReq] except KeyError: __import__(moduleOrReq) module = sys.modules[moduleOrReq] loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module) def _macosx_vers(_cache=[]): if not _cache: version = platform.mac_ver()[0] # fallback for MacPorts if version == '': plist = '/System/Library/CoreServices/SystemVersion.plist' if os.path.exists(plist): if hasattr(plistlib, 'readPlist'): plist_content = plistlib.readPlist(plist) if 'ProductVersion' in plist_content: version = plist_content['ProductVersion'] _cache.append(version.split('.')) return _cache[0] def _macosx_arch(machine): return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and Mac OS X. """ try: # Python 2.7 or >=3.2 from sysconfig import get_platform except ImportError: from distutils.util import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macosx_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), _macosx_arch(machine)) except ValueError: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation pass return plat macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") # XXX backward compat get_platform = get_build_platform def compatible_platforms(provided, required): """Can code for the `provided` platform run on the `required` platform? Returns true if either platform is ``None``, or the platforms are equal. XXX Needs compatibility checks for Linux and other unixy OSes. """ if provided is None or required is None or provided==required: # easy case return True # Mac OS X special cases reqMac = macosVersionString.match(required) if reqMac: provMac = macosVersionString.match(provided) # is this a Mac package? if not provMac: # this is backwards compatibility for packages built before # setuptools 0.6. All packages built after this point will # use the new macosx designation. provDarwin = darwinVersionString.match(provided) if provDarwin: dversion = int(provDarwin.group(1)) macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) if dversion == 7 and macosversion >= "10.3" or \ dversion == 8 and macosversion >= "10.4": return True # egg isn't macosx or legacy darwin return False # are they the same major version and machine type? if provMac.group(1) != reqMac.group(1) or \ provMac.group(3) != reqMac.group(3): return False # is the required OS major update >= the provided one? if int(provMac.group(2)) > int(reqMac.group(2)): return False return True # XXX Linux and other platforms' special cases should go here return False def run_script(dist_spec, script_name): """Locate distribution `dist_spec` and run its `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name require(dist_spec)[0].run_script(script_name, ns) # backward compatibility run_main = run_script def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist, string_types): dist = Requirement.parse(dist) if isinstance(dist, Requirement): dist = get_provider(dist) if not isinstance(dist, Distribution): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) def get_entry_map(dist, group=None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) def get_entry_info(dist, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) class IMetadataProvider: def has_metadata(name): """Does the package's distribution contain the named metadata?""" def get_metadata(name): """The named metadata resource as a string""" def get_metadata_lines(name): """Yield named metadata resource as list of non-blank non-comment lines Leading and trailing whitespace is stripped from each line, and lines with ``#`` as the first non-blank character are omitted.""" def metadata_isdir(name): """Is the named metadata a directory? (like ``os.path.isdir()``)""" def metadata_listdir(name): """List of metadata names in the directory (like ``os.listdir()``)""" def run_script(script_name, namespace): """Execute the named script in the supplied namespace dictionary""" class IResourceProvider(IMetadataProvider): """An object that provides access to package resources""" def get_resource_filename(manager, resource_name): """Return a true filesystem path for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_stream(manager, resource_name): """Return a readable file-like object for `resource_name` `manager` must be an ``IResourceManager``""" def get_resource_string(manager, resource_name): """Return a string containing the contents of `resource_name` `manager` must be an ``IResourceManager``""" def has_resource(resource_name): """Does the package contain the named resource?""" def resource_isdir(resource_name): """Is the named resource a directory? (like ``os.path.isdir()``)""" def resource_listdir(resource_name): """List of resource names in the directory (like ``os.listdir()``)""" class WorkingSet(object): """A collection of active distributions on sys.path (or a similar list)""" def __init__(self, entries=None): """Create working set from list of path entries (default=sys.path)""" self.entries = [] self.entry_keys = {} self.by_key = {} self.callbacks = [] if entries is None: entries = sys.path for entry in entries: self.add_entry(entry) @classmethod def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws @classmethod def _build_from_requirements(cls, req_spec): """ Build a working set from a requirement spec. Rewrites sys.path. """ # try it without defaults already on sys.path # by starting with an empty path ws = cls([]) reqs = parse_requirements(req_spec) dists = ws.resolve(reqs, Environment()) for dist in dists: ws.add(dist) # add any missing entries from sys.path for entry in sys.path: if entry not in ws.entries: ws.add_entry(entry) # then copy back to sys.path sys.path[:] = ws.entries return ws def add_entry(self, entry): """Add a path item to ``.entries``, finding any distributions on it ``find_distributions(entry, True)`` is used to find distributions corresponding to the path entry, and they are added. `entry` is always appended to ``.entries``, even if it is already present. (This is because ``sys.path`` can contain the same value more than once, and the ``.entries`` of the ``sys.path`` WorkingSet should always equal ``sys.path``.) """ self.entry_keys.setdefault(entry, []) self.entries.append(entry) for dist in find_distributions(entry, True): self.add(dist, entry, False) def __contains__(self, dist): """True if `dist` is the active distribution for its project""" return self.by_key.get(dist.key) == dist def find(self, req): """Find a distribution matching requirement `req` If there is an active distribution for the requested project, this returns it as long as it meets the version requirement specified by `req`. But, if there is an active distribution for the project and it does *not* meet the `req` requirement, ``VersionConflict`` is raised. If there is no active distribution for the requested project, ``None`` is returned. """ dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) return dist def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ for dist in self: entries = dist.get_entry_map(group) if name is None: for ep in entries.values(): yield ep elif name in entries: yield entries[name] def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" ns = sys._getframe(1).f_globals name = ns['__name__'] ns.clear() ns['__name__'] = name self.require(requires)[0].run_script(script_name, ns) def __iter__(self): """Yield distributions for non-duplicate projects in the working set The yield order is the order in which the items' path entries were added to the working set. """ seen = {} for item in self.entries: if item not in self.entry_keys: # workaround a cache issue continue for key in self.entry_keys[item]: if key not in seen: seen[key]=1 yield self.by_key[key] def add(self, dist, entry=None, insert=True, replace=False): """Add `dist` to working set, associated with `entry` If `entry` is unspecified, it defaults to the ``.location`` of `dist`. On exit from this routine, `entry` is added to the end of the working set's ``.entries`` (if it wasn't already present). `dist` is only added to the working set if it's for a project that doesn't already have a distribution in the set, unless `replace=True`. If it's added, any callbacks registered with the ``subscribe()`` method will be called. """ if insert: dist.insert_on(self.entries, entry) if entry is None: entry = dist.location keys = self.entry_keys.setdefault(entry,[]) keys2 = self.entry_keys.setdefault(dist.location,[]) if not replace and dist.key in self.by_key: # ignore hidden distros return self.by_key[dist.key] = dist if dist.key not in keys: keys.append(dist.key) if dist.key not in keys2: keys2.append(dist.key) self._added_new(dist) def resolve(self, requirements, env=None, installer=None, replace_conflicting=False): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, if supplied, should be an ``Environment`` instance. If not supplied, it defaults to all distributions available within any entry or distribution in the working set. `installer`, if supplied, will be invoked with each requirement that cannot be met by an already-installed distribution; it should return a ``Distribution`` or ``None``. Unless `replace_conflicting=True`, raises a VersionConflict exception if any requirements are found on the path that have the correct name but the wrong version. Otherwise, if an `installer` is supplied it will be invoked to obtain the correct version of the requirement and activate it. """ # set up the stack requirements = list(requirements)[::-1] # set of processed requirements processed = {} # key -> dist best = {} to_activate = [] # Mapping of requirement to set of distributions that required it; # useful for reporting info about conflicts. required_by = collections.defaultdict(set) while requirements: # process dependencies breadth-first req = requirements.pop(0) if req in processed: # Ignore cyclic or redundant dependencies continue dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map dist = self.by_key.get(req.key) if dist is None or (dist not in req and replace_conflicting): ws = self if env is None: if dist is None: env = Environment(self.entries) else: # Use an empty environment and workingset to avoid # any further conflicts with the conflicting # distribution env = Environment([]) ws = WorkingSet([]) dist = best[req.key] = env.best_match(req, ws, installer) if dist is None: requirers = required_by.get(req, None) raise DistributionNotFound(req, requirers) to_activate.append(dist) if dist not in req: # Oops, the "best" so far conflicts with a dependency dependent_req = required_by[req] raise VersionConflict(dist, req).with_context(dependent_req) # push the new requirements onto the stack new_requirements = dist.requires(req.extras)[::-1] requirements.extend(new_requirements) # Register the new requirements needed by req for new_requirement in new_requirements: required_by[new_requirement].add(req.project_name) processed[req] = True # return list of distros to activate return to_activate def find_plugins(self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: distributions, errors = working_set.find_plugins( Environment(plugin_dirlist) ) # add plugins+libs to sys.path map(working_set.add, distributions) # display errors print('Could not load', errors) The `plugin_env` should be an ``Environment`` instance that contains only distributions that are in the project's "plugin directory" or directories. The `full_env`, if supplied, should be an ``Environment`` contains all currently-available distributions. If `full_env` is not supplied, one is created automatically from the ``WorkingSet`` this method is called on, which will typically mean that every directory on ``sys.path`` will be scanned for distributions. `installer` is a standard installer callback as used by the ``resolve()`` method. The `fallback` flag indicates whether we should attempt to resolve older versions of a plugin if the newest version cannot be resolved. This method returns a 2-tuple: (`distributions`, `error_info`), where `distributions` is a list of the distributions found in `plugin_env` that were loadable, along with any other distributions that are needed to resolve their dependencies. `error_info` is a dictionary mapping unloadable plugin distributions to an exception instance describing the error that occurred. Usually this will be a ``DistributionNotFound`` or ``VersionConflict`` instance. """ plugin_projects = list(plugin_env) # scan project names in alphabetic order plugin_projects.sort() error_info = {} distributions = {} if full_env is None: env = Environment(self.entries) env += plugin_env else: env = full_env + plugin_env shadow_set = self.__class__([]) # put all our entries in shadow_set list(map(shadow_set.add, self)) for project_name in plugin_projects: for dist in plugin_env[project_name]: req = [dist.as_requirement()] try: resolvees = shadow_set.resolve(req, env, installer) except ResolutionError as v: # save error info error_info[dist] = v if fallback: # try the next older version of project continue else: # give up on this project, keep going break else: list(map(shadow_set.add, resolvees)) distributions.update(dict.fromkeys(resolvees)) # success, no need to try any more versions of this project break distributions = list(distributions) distributions.sort() return distributions, error_info def require(self, *requirements): """Ensure that distributions matching `requirements` are activated `requirements` must be a string or a (possibly-nested) sequence thereof, specifying the distributions and versions required. The return value is a sequence of the distributions that needed to be activated to fulfill the requirements; all relevant distributions are included, even if they were already activated in this working set. """ needed = self.resolve(parse_requirements(requirements)) for dist in needed: self.add(dist) return needed def subscribe(self, callback): """Invoke `callback` for all distributions (including existing ones)""" if callback in self.callbacks: return self.callbacks.append(callback) for dist in self: callback(dist) def _added_new(self, dist): for callback in self.callbacks: callback(dist) def __getstate__(self): return ( self.entries[:], self.entry_keys.copy(), self.by_key.copy(), self.callbacks[:] ) def __setstate__(self, e_k_b_c): entries, keys, by_key, callbacks = e_k_b_c self.entries = entries[:] self.entry_keys = keys.copy() self.by_key = by_key.copy() self.callbacks = callbacks[:] class Environment(object): """Searchable snapshot of distributions on a search path""" def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): """Snapshot distributions available on a search path Any distributions found on `search_path` are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an optional string naming the desired version of Python (e.g. ``'3.3'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you wish to map *all* distributions, not just those compatible with the running platform or Python version. """ self._distmap = {} self.platform = platform self.python = python self.scan(search_path) def can_add(self, dist): """Is distribution `dist` acceptable for this environment? The distribution must match the platform and python version requirements specified when this environment was created, or False is returned. """ return (self.python is None or dist.py_version is None or dist.py_version==self.python) \ and compatible_platforms(dist.platform, self.platform) def remove(self, dist): """Remove `dist` from the environment""" self._distmap[dist.key].remove(dist) def scan(self, search_path=None): """Scan `search_path` for distributions usable in this environment Any distributions found are added to the environment. `search_path` should be a sequence of ``sys.path`` items. If not supplied, ``sys.path`` is used. Only distributions conforming to the platform/python version defined at initialization are added. """ if search_path is None: search_path = sys.path for item in search_path: for dist in find_distributions(item): self.add(dist) def __getitem__(self, project_name): """Return a newest-to-oldest list of distributions for `project_name` Uses case-insensitive `project_name` comparison, assuming all the project's distributions use their project's name converted to all lowercase as their key. """ distribution_key = project_name.lower() return self._distmap.get(distribution_key, []) def add(self, dist): """Add `dist` if we ``can_add()`` it and it has not already been added """ if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) def best_match(self, req, working_set, installer=None): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a suitable distribution is already active. (This may raise ``VersionConflict`` if an unsuitable version of the project is already active in the specified `working_set`.) If a suitable distribution isn't active, this method returns the newest distribution in the environment that meets the ``Requirement`` in `req`. If no suitable distribution is found, and `installer` is supplied, then the result of calling the environment's ``obtain(req, installer)`` method will be returned. """ dist = working_set.find(req) if dist is not None: return dist for dist in self[req.key]: if dist in req: return dist # try to download/install return self.obtain(req, installer) def obtain(self, requirement, installer=None): """Obtain a distribution matching `requirement` (e.g. via download) Obtain a distro that matches requirement (e.g. via download). In the base ``Environment`` class, this routine just returns ``installer(requirement)``, unless `installer` is None, in which case None is returned instead. This method is a hook that allows subclasses to attempt other ways of obtaining a distribution before falling back to the `installer` argument.""" if installer is not None: return installer(requirement) def __iter__(self): """Yield the unique project names of the available distributions""" for key in self._distmap.keys(): if self[key]: yield key def __iadd__(self, other): """In-place addition of a distribution or environment""" if isinstance(other, Distribution): self.add(other) elif isinstance(other, Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError("Can't add %r to environment" % (other,)) return self def __add__(self, other): """Add an environment or distribution to an environment""" new = self.__class__([], platform=None, python=None) for env in self, other: new += env return new # XXX backward compatibility AvailableDistributions = Environment class ExtractionError(RuntimeError): """An error occurred extracting a resource The following attributes are available from instances of this exception: manager The resource manager that raised this exception cache_path The base directory for resource extraction original_error The exception instance that caused extraction to fail """ class ResourceManager: """Manage resource extraction and packages""" extraction_path = None def __init__(self): self.cached_files = {} def resource_exists(self, package_or_requirement, resource_name): """Does the named resource exist?""" return get_provider(package_or_requirement).has_resource(resource_name) def resource_isdir(self, package_or_requirement, resource_name): """Is the named resource an existing directory?""" return get_provider(package_or_requirement).resource_isdir( resource_name ) def resource_filename(self, package_or_requirement, resource_name): """Return a true filesystem path for specified resource""" return get_provider(package_or_requirement).get_resource_filename( self, resource_name ) def resource_stream(self, package_or_requirement, resource_name): """Return a readable file-like object for specified resource""" return get_provider(package_or_requirement).get_resource_stream( self, resource_name ) def resource_string(self, package_or_requirement, resource_name): """Return specified resource as a string""" return get_provider(package_or_requirement).get_resource_string( self, resource_name ) def resource_listdir(self, package_or_requirement, resource_name): """List the contents of the named resource directory""" return get_provider(package_or_requirement).resource_listdir( resource_name ) def extraction_error(self): """Give an error message for problems extracting file(s)""" old_exc = sys.exc_info()[1] cache_path = self.extraction_path or get_default_cache() err = ExtractionError("""Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: %s The Python egg cache directory is currently set to: %s Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. """ % (old_exc, cache_path) ) err.manager = self err.cache_path = cache_path err.original_error = old_exc raise err def get_cache_path(self, archive_name, names=()): """Return absolute location in cache for `archive_name` and `names` The parent directory of the resulting path will be created if it does not already exist. `archive_name` should be the base filename of the enclosing egg (which may not be the name of the enclosing zipfile!), including its ".egg" extension. `names`, if provided, should be a sequence of path name parts "under" the egg's extraction location. This method should only be called by resource providers that need to obtain an extraction location, and only for names they intend to extract, as it tracks the generated names for possible cleanup later. """ extract_path = self.extraction_path or get_default_cache() target_path = os.path.join(extract_path, archive_name+'-tmp', *names) try: _bypass_ensure_directory(target_path) except: self.extraction_error() self._warn_unsafe_extraction_path(extract_path) self.cached_files[target_path] = 1 return target_path @staticmethod def _warn_unsafe_extraction_path(path): """ If the default extraction path is overridden and set to an insecure location, such as /tmp, it opens up an opportunity for an attacker to replace an extracted file with an unauthorized payload. Warn the user if a known insecure location is used. See Distribute #375 for more details. """ if os.name == 'nt' and not path.startswith(os.environ['windir']): # On Windows, permissions are generally restrictive by default # and temp directories are not writable by other users, so # bypass the warning. return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: msg = ("%s is writable by group/others and vulnerable to attack " "when " "used with get_resource_filename. Consider a more secure " "location (set with .set_extraction_path or the " "PYTHON_EGG_CACHE environment variable)." % path) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): """Perform any platform-specific postprocessing of `tempname` This is where Mac header rewrites should be done; other platforms don't have anything special they should do. Resource providers should call this method ONLY after successfully extracting a compressed resource. They must NOT call it on resources that are already in the filesystem. `tempname` is the current (temporary) name of the file, and `filename` is the name it will be renamed to by the caller after this routine returns. """ if os.name == 'posix': # Make the resource executable mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 os.chmod(tempname, mode) def set_extraction_path(self, path): """Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) """ if self.cached_files: raise ValueError( "Can't change extraction path, files already extracted" ) self.extraction_path = path def cleanup_resources(self, force=False): """ Delete all extracted resource files and directories, returning a list of the file and directory names that could not be successfully removed. This function does not have any concurrency protection, so it should generally only be called when the extraction path is a temporary directory exclusive to a single process. This method is not automatically called; you must call it explicitly or register it as an ``atexit`` function if you wish to ensure cleanup of a temporary directory used for extractions. """ # XXX def get_default_cache(): """Determine the default cache location This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the "Application Data" directory. On all other systems, it's "~/.python-eggs". """ try: return os.environ['PYTHON_EGG_CACHE'] except KeyError: pass if os.name!='nt': return os.path.expanduser('~/.python-eggs') # XXX this may be locale-specific! app_data = 'Application Data' app_homes = [ # best option, should be locale-safe (('APPDATA',), None), (('USERPROFILE',), app_data), (('HOMEDRIVE','HOMEPATH'), app_data), (('HOMEPATH',), app_data), (('HOME',), None), # 95/98/ME (('WINDIR',), app_data), ] for keys, subdir in app_homes: dirname = '' for key in keys: if key in os.environ: dirname = os.path.join(dirname, os.environ[key]) else: break else: if subdir: dirname = os.path.join(dirname, subdir) return os.path.join(dirname, 'Python-Eggs') else: raise RuntimeError( "Please set the PYTHON_EGG_CACHE enviroment variable" ) def safe_name(name): """Convert an arbitrary string to a standard distribution name Any runs of non-alphanumeric/. characters are replaced with a single '-'. """ return re.sub('[^A-Za-z0-9.]+', '-', name) def safe_version(version): """ Convert an arbitrary string to a standard version string """ try: # normalize the version return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: version = version.replace(' ','.') return re.sub('[^A-Za-z0-9.]+', '-', version) def safe_extra(extra): """Convert an arbitrary string to a standard 'extra' name Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. """ return re.sub('[^A-Za-z0-9.]+', '_', extra).lower() def to_filename(name): """Convert a project or version name to its filename-escaped form Any '-' characters are currently replaced with '_'. """ return name.replace('-','_') class MarkerEvaluation(object): values = { 'os_name': lambda: os.name, 'sys_platform': lambda: sys.platform, 'python_full_version': platform.python_version, 'python_version': lambda: platform.python_version()[:3], 'platform_version': platform.version, 'platform_machine': platform.machine, 'python_implementation': platform.python_implementation, } @classmethod def is_invalid_marker(cls, text): """ Validate text as a PEP 426 environment marker; return an exception if invalid or False otherwise. """ try: cls.evaluate_marker(text) except SyntaxError as e: return cls.normalize_exception(e) return False @staticmethod def normalize_exception(exc): """ Given a SyntaxError from a marker evaluation, normalize the error message: - Remove indications of filename and line number. - Replace platform-specific error messages with standard error messages. """ subs = { 'unexpected EOF while parsing': 'invalid syntax', 'parenthesis is never closed': 'invalid syntax', } exc.filename = None exc.lineno = None exc.msg = subs.get(exc.msg, exc.msg) return exc @classmethod def and_test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! items = [ cls.interpret(nodelist[i]) for i in range(1, len(nodelist), 2) ] return functools.reduce(operator.and_, items) @classmethod def test(cls, nodelist): # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! items = [ cls.interpret(nodelist[i]) for i in range(1, len(nodelist), 2) ] return functools.reduce(operator.or_, items) @classmethod def atom(cls, nodelist): t = nodelist[1][0] if t == token.LPAR: if nodelist[2][0] == token.RPAR: raise SyntaxError("Empty parentheses") return cls.interpret(nodelist[2]) msg = "Language feature not supported in environment markers" raise SyntaxError(msg) @classmethod def comparison(cls, nodelist): if len(nodelist) > 4: msg = "Chained comparison not allowed in environment markers" raise SyntaxError(msg) comp = nodelist[2][1] cop = comp[1] if comp[0] == token.NAME: if len(nodelist[2]) == 3: if cop == 'not': cop = 'not in' else: cop = 'is not' try: cop = cls.get_op(cop) except KeyError: msg = repr(cop) + " operator not allowed in environment markers" raise SyntaxError(msg) return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3])) @classmethod def get_op(cls, op): ops = { symbol.test: cls.test, symbol.and_test: cls.and_test, symbol.atom: cls.atom, symbol.comparison: cls.comparison, 'not in': lambda x, y: x not in y, 'in': lambda x, y: x in y, '==': operator.eq, '!=': operator.ne, '<': operator.lt, '>': operator.gt, '<=': operator.le, '>=': operator.ge, } if hasattr(symbol, 'or_test'): ops[symbol.or_test] = cls.test return ops[op] @classmethod def evaluate_marker(cls, text, extra=None): """ Evaluate a PEP 426 environment marker on CPython 2.4+. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. This implementation uses the 'parser' module, which is not implemented on Jython and has been superseded by the 'ast' module in Python 2.6 and later. """ return cls.interpret(parser.expr(text).totuple(1)[1]) @classmethod def _markerlib_evaluate(cls, text): """ Evaluate a PEP 426 environment marker using markerlib. Return a boolean indicating the marker result in this environment. Raise SyntaxError if marker is invalid. """ from pip._vendor import _markerlib # markerlib implements Metadata 1.2 (PEP 345) environment markers. # Translate the variables to Metadata 2.0 (PEP 426). env = _markerlib.default_environment() for key in env.keys(): new_key = key.replace('.', '_') env[new_key] = env.pop(key) try: result = _markerlib.interpret(text, env) except NameError as e: raise SyntaxError(e.args[0]) return result if 'parser' not in globals(): # Fall back to less-complete _markerlib implementation if 'parser' module # is not available. evaluate_marker = _markerlib_evaluate @classmethod def interpret(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] try: op = cls.get_op(nodelist[0]) except KeyError: raise SyntaxError("Comparison or logical expression expected") return op(nodelist) @classmethod def evaluate(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] kind = nodelist[0] name = nodelist[1] if kind==token.NAME: try: op = cls.values[name] except KeyError: raise SyntaxError("Unknown name %r" % name) return op() if kind==token.STRING: s = nodelist[1] if not cls._safe_string(s): raise SyntaxError( "Only plain strings allowed in environment markers") return s[1:-1] msg = "Language feature not supported in environment markers" raise SyntaxError(msg) @staticmethod def _safe_string(cand): return ( cand[:1] in "'\"" and not cand.startswith('"""') and not cand.startswith("'''") and '\\' not in cand ) invalid_marker = MarkerEvaluation.is_invalid_marker evaluate_marker = MarkerEvaluation.evaluate_marker class NullProvider: """Try to implement resources and metadata for arbitrary PEP 302 loaders""" egg_name = None egg_info = None loader = None def __init__(self, module): self.loader = getattr(module, '__loader__', None) self.module_path = os.path.dirname(getattr(module, '__file__', '')) def get_resource_filename(self, manager, resource_name): return self._fn(self.module_path, resource_name) def get_resource_stream(self, manager, resource_name): return io.BytesIO(self.get_resource_string(manager, resource_name)) def get_resource_string(self, manager, resource_name): return self._get(self._fn(self.module_path, resource_name)) def has_resource(self, resource_name): return self._has(self._fn(self.module_path, resource_name)) def has_metadata(self, name): return self.egg_info and self._has(self._fn(self.egg_info, name)) if sys.version_info <= (3,): def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info, name)) else: def get_metadata(self, name): if not self.egg_info: return "" return self._get(self._fn(self.egg_info, name)).decode("utf-8") def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) def resource_isdir(self, resource_name): return self._isdir(self._fn(self.module_path, resource_name)) def metadata_isdir(self, name): return self.egg_info and self._isdir(self._fn(self.egg_info, name)) def resource_listdir(self, resource_name): return self._listdir(self._fn(self.module_path, resource_name)) def metadata_listdir(self, name): if self.egg_info: return self._listdir(self._fn(self.egg_info, name)) return [] def run_script(self, script_name, namespace): script = 'scripts/'+script_name if not self.has_metadata(script): raise ResolutionError("No script named %r" % script_name) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') script_filename = self._fn(self.egg_info, script) namespace['__file__'] = script_filename if os.path.exists(script_filename): source = open(script_filename).read() code = compile(source, script_filename, 'exec') exec(code, namespace, namespace) else: from linecache import cache cache[script_filename] = ( len(script_text), 0, script_text.split('\n'), script_filename ) script_code = compile(script_text, script_filename,'exec') exec(script_code, namespace, namespace) def _has(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _isdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _listdir(self, path): raise NotImplementedError( "Can't perform this operation for unregistered loader type" ) def _fn(self, base, resource_name): if resource_name: return os.path.join(base, *resource_name.split('/')) return base def _get(self, path): if hasattr(self.loader, 'get_data'): return self.loader.get_data(path) raise NotImplementedError( "Can't perform this operation for loaders without 'get_data()'" ) register_loader_type(object, NullProvider) class EggProvider(NullProvider): """Provider based on a virtual filesystem""" def __init__(self, module): NullProvider.__init__(self, module) self._setup_prefix() def _setup_prefix(self): # we assume here that our metadata may be nested inside a "basket" # of multiple eggs; that's why we use module_path instead of .archive path = self.module_path old = None while path!=old: if path.lower().endswith('.egg'): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') self.egg_root = path break old = path path, base = os.path.split(path) class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" def _has(self, path): return os.path.exists(path) def _isdir(self, path): return os.path.isdir(path) def _listdir(self, path): return os.listdir(path) def get_resource_stream(self, manager, resource_name): return open(self._fn(self.module_path, resource_name), 'rb') def _get(self, path): with open(path, 'rb') as stream: return stream.read() register_loader_type(type(None), DefaultProvider) if importlib_machinery is not None: register_loader_type(importlib_machinery.SourceFileLoader, DefaultProvider) class EmptyProvider(NullProvider): """Provider that returns nothing for all requests""" _isdir = _has = lambda self, path: False _get = lambda self, path: '' _listdir = lambda self, path: [] module_path = None def __init__(self): pass empty_provider = EmptyProvider() class ZipManifests(dict): """ zip manifest builder """ @classmethod def build(cls, path): """ Build a dictionary similar to the zipimport directory caches, except instead of tuples, store ZipInfo objects. Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. """ with ContextualZipFile(path) as zfile: items = ( ( name.replace('/', os.sep), zfile.getinfo(name), ) for name in zfile.namelist() ) return dict(items) load = build class MemoizedZipManifests(ZipManifests): """ Memoized zipfile manifests. """ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') def load(self, path): """ Load a manifest at path or return a suitable manifest already loaded. """ path = os.path.normpath(path) mtime = os.stat(path).st_mtime if path not in self or self[path].mtime != mtime: manifest = self.build(path) self[path] = self.manifest_mod(manifest, mtime) return self[path].manifest class ContextualZipFile(zipfile.ZipFile): """ Supplement ZipFile class to support context manager for Python 2.6 """ def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __new__(cls, *args, **kwargs): """ Construct a ZipFile or ContextualZipFile as appropriate """ if hasattr(zipfile.ZipFile, '__exit__'): return zipfile.ZipFile(*args, **kwargs) return super(ContextualZipFile, cls).__new__(cls) class ZipProvider(EggProvider): """Resource support for zips and eggs""" eagers = None _zip_manifests = MemoizedZipManifests() def __init__(self, module): EggProvider.__init__(self, module) self.zip_pre = self.loader.archive+os.sep def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath # usable with the zipimport directory cache for our target archive if fspath.startswith(self.zip_pre): return fspath[len(self.zip_pre):] raise AssertionError( "%s is not a subpath of %s" % (fspath, self.zip_pre) ) def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path fspath = self.zip_pre+zip_path if fspath.startswith(self.egg_root+os.sep): return fspath[len(self.egg_root)+1:].split(os.sep) raise AssertionError( "%s is not a subpath of %s" % (fspath, self.egg_root) ) @property def zipinfo(self): return self._zip_manifests.load(self.loader.archive) def get_resource_filename(self, manager, resource_name): if not self.egg_name: raise NotImplementedError( "resource_filename() only supported for .egg, not .zip" ) # no need to lock for extraction, since we use temp names zip_path = self._resource_to_zip(resource_name) eagers = self._get_eager_resources() if '/'.join(self._parts(zip_path)) in eagers: for name in eagers: self._extract_resource(manager, self._eager_to_zip(name)) return self._extract_resource(manager, zip_path) @staticmethod def _get_date_and_size(zip_stat): size = zip_stat.file_size # ymdhms+wday, yday, dst date_time = zip_stat.date_time + (0, 0, -1) # 1980 offset already done timestamp = time.mktime(date_time) return timestamp, size def _extract_resource(self, manager, zip_path): if zip_path in self._index(): for name in self._index()[zip_path]: last = self._extract_resource( manager, os.path.join(zip_path, name) ) # return the extracted directory name return os.path.dirname(last) timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not WRITE_SUPPORT: raise IOError('"os.rename" and "os.unlink" are not supported ' 'on this platform') try: real_path = manager.get_cache_path( self.egg_name, self._parts(zip_path) ) if self._is_current(real_path, zip_path): return real_path outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) os.write(outf, self.loader.get_data(zip_path)) os.close(outf) utime(tmpnam, (timestamp, timestamp)) manager.postprocess(tmpnam, real_path) try: rename(tmpnam, real_path) except os.error: if os.path.isfile(real_path): if self._is_current(real_path, zip_path): # the file became current since it was checked above, # so proceed. return real_path # Windows, del old file and retry elif os.name=='nt': unlink(real_path) rename(tmpnam, real_path) return real_path raise except os.error: # report a user-friendly error manager.extraction_error() return real_path def _is_current(self, file_path, zip_path): """ Return True if the file_path is current for this zip_path """ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size!=size or stat.st_mtime!=timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) with open(file_path, 'rb') as f: file_contents = f.read() return zip_contents == file_contents def _get_eager_resources(self): if self.eagers is None: eagers = [] for name in ('native_libs.txt', 'eager_resources.txt'): if self.has_metadata(name): eagers.extend(self.get_metadata_lines(name)) self.eagers = eagers return self.eagers def _index(self): try: return self._dirindex except AttributeError: ind = {} for path in self.zipinfo: parts = path.split(os.sep) while parts: parent = os.sep.join(parts[:-1]) if parent in ind: ind[parent].append(parts[-1]) break else: ind[parent] = [parts.pop()] self._dirindex = ind return ind def _has(self, fspath): zip_path = self._zipinfo_name(fspath) return zip_path in self.zipinfo or zip_path in self._index() def _isdir(self, fspath): return self._zipinfo_name(fspath) in self._index() def _listdir(self, fspath): return list(self._index().get(self._zipinfo_name(fspath), ())) def _eager_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.egg_root, resource_name)) def _resource_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.module_path, resource_name)) register_loader_type(zipimport.zipimporter, ZipProvider) class FileMetadata(EmptyProvider): """Metadata handler for standalone PKG-INFO files Usage:: metadata = FileMetadata("/path/to/PKG-INFO") This provider rejects all data and metadata requests except for PKG-INFO, which is treated as existing, and will be the contents of the file at the provided location. """ def __init__(self, path): self.path = path def has_metadata(self, name): return name=='PKG-INFO' def get_metadata(self, name): if name=='PKG-INFO': with open(self.path,'rU') as f: metadata = f.read() return metadata raise KeyError("No metadata except PKG-INFO is available") def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) class PathMetadata(DefaultProvider): """Metadata provider for egg directories Usage:: # Development eggs: egg_info = "/path/to/PackageName.egg-info" base_dir = os.path.dirname(egg_info) metadata = PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] dist = Distribution(basedir, project_name=dist_name, metadata=metadata) # Unpacked egg directories: egg_path = "/path/to/PackageName-ver-pyver-etc.egg" metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) dist = Distribution.from_filename(egg_path, metadata=metadata) """ def __init__(self, path, egg_info): self.module_path = path self.egg_info = egg_info class EggMetadata(ZipProvider): """Metadata provider for .egg files""" def __init__(self, importer): """Create a metadata provider from a zipimporter""" self.zip_pre = importer.archive+os.sep self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) else: self.module_path = importer.archive self._setup_prefix() _declare_state('dict', _distribution_finders = {}) def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `distribution_finder` is a callable that, passed a path item and the importer instance, yields ``Distribution`` instances found on that path item. See ``pkg_resources.find_on_path`` for an example.""" _distribution_finders[importer_type] = distribution_finder def find_distributions(path_item, only=False): """Yield distributions accessible via `path_item`""" importer = get_importer(path_item) finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. """ if importer.archive.endswith('.whl'): # wheels are not supported with this finder # they don't have PKG-INFO metadata, and won't ever contain eggs return metadata = EggMetadata(importer) if metadata.has_metadata('PKG-INFO'): yield Distribution.from_filename(path_item, metadata=metadata) if only: # don't yield nested distros return for subitem in metadata.resource_listdir('/'): if subitem.endswith('.egg'): subpath = os.path.join(path_item, subitem) for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath): yield dist register_finder(zipimport.zipimporter, find_eggs_in_zip) def find_nothing(importer, path_item, only=False): return () register_finder(object, find_nothing) def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) if os.path.isdir(path_item) and os.access(path_item, os.R_OK): if path_item.lower().endswith('.egg'): # unpacked egg yield Distribution.from_filename( path_item, metadata=PathMetadata( path_item, os.path.join(path_item,'EGG-INFO') ) ) else: # scan for .egg and .egg-info in directory for entry in os.listdir(path_item): lower = entry.lower() if lower.endswith('.egg-info') or lower.endswith('.dist-info'): fullpath = os.path.join(path_item, entry) if os.path.isdir(fullpath): # egg-info directory, allow getting metadata metadata = PathMetadata(path_item, fullpath) else: metadata = FileMetadata(fullpath) yield Distribution.from_location( path_item, entry, metadata, precedence=DEVELOP_DIST ) elif not only and lower.endswith('.egg'): dists = find_distributions(os.path.join(path_item, entry)) for dist in dists: yield dist elif not only and lower.endswith('.egg-link'): with open(os.path.join(path_item, entry)) as entry_file: entry_lines = entry_file.readlines() for line in entry_lines: if not line.strip(): continue path = os.path.join(path_item, line.rstrip()) dists = find_distributions(path) for item in dists: yield item break register_finder(pkgutil.ImpImporter, find_on_path) if importlib_machinery is not None: register_finder(importlib_machinery.FileFinder, find_on_path) _declare_state('dict', _namespace_handlers={}) _declare_state('dict', _namespace_packages={}) def register_namespace_handler(importer_type, namespace_handler): """Register `namespace_handler` to declare namespace packages `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item handler), and `namespace_handler` is a callable like this:: def namespace_handler(importer, path_entry, moduleName, module): # return a path_entry to use for child packages Namespace handlers are only called if the importer object has already agreed that it can handle the relevant path item, and they should only return a subpath if the module __path__ does not already contain an equivalent subpath. For an example namespace handler, see ``pkg_resources.file_ns_handler``. """ _namespace_handlers[importer_type] = namespace_handler def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None loader = importer.find_module(packageName) if loader is None: return None module = sys.modules.get(packageName) if module is None: module = sys.modules[packageName] = types.ModuleType(packageName) module.__path__ = [] _set_parent_ns(packageName) elif not hasattr(module,'__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) if subpath is not None: path = module.__path__ path.append(subpath) loader.load_module(packageName) for path_item in path: if path_item not in module.__path__: module.__path__.append(path_item) return subpath def declare_namespace(packageName): """Declare that package 'packageName' is a namespace package""" _imp.acquire_lock() try: if packageName in _namespace_packages: return path, parent = sys.path, None if '.' in packageName: parent = '.'.join(packageName.split('.')[:-1]) declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) try: path = sys.modules[parent].__path__ except AttributeError: raise TypeError("Not a package:", parent) # Track what packages are namespaces, so when new path items are added, # they can be updated _namespace_packages.setdefault(parent,[]).append(packageName) _namespace_packages.setdefault(packageName,[]) for path_item in path: # Ensure all the parent's path items are reflected in the child, # if they apply _handle_ns(packageName, path_item) finally: _imp.release_lock() def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" _imp.acquire_lock() try: for package in _namespace_packages.get(parent,()): subpath = _handle_ns(package, path_item) if subpath: fixup_namespace_packages(subpath, package) finally: _imp.release_lock() def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: if _normalize_cached(item)==normalized: break else: # Only return the path if it's not already there return subpath register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) register_namespace_handler(zipimport.zipimporter, file_ns_handler) if importlib_machinery is not None: register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) def null_ns_handler(importer, path_item, packageName, module): return None register_namespace_handler(object, null_ns_handler) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" return os.path.normcase(os.path.realpath(filename)) def _normalize_cached(filename, _cache={}): try: return _cache[filename] except KeyError: _cache[filename] = result = normalize_path(filename) return result def _set_parent_ns(packageName): parts = packageName.split('.') name = parts.pop() if parts: parent = '.'.join(parts) setattr(sys.modules[parent], name, sys.modules[packageName]) def yield_lines(strs): """Yield non-empty/non-comment lines of a string or sequence""" if isinstance(strs, string_types): for s in strs.splitlines(): s = s.strip() # skip blank lines/comments if s and not s.startswith('#'): yield s else: for ss in strs: for s in yield_lines(ss): yield s # whitespace and comment LINE_END = re.compile(r"\s*(#.*)?$").match # line continuation CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # Distribution or extra DISTRO = re.compile(r"\s*((\w|[-.])+)").match # ver. info VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match # comma between items COMMA = re.compile(r"\s*,").match OBRACKET = re.compile(r"\s*\[").match CBRACKET = re.compile(r"\s*\]").match MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r""" (?P<name>[^-]+) ( -(?P<ver>[^-]+) ( -py(?P<pyver>[^-]+) ( -(?P<plat>.+) )? )? )? """, re.VERBOSE | re.IGNORECASE, ).match class EntryPoint(object): """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): if not MODULE(module_name): raise ValueError("Invalid module name", module_name) self.name = name self.module_name = module_name self.attrs = tuple(attrs) self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras self.dist = dist def __str__(self): s = "%s = %s" % (self.name, self.module_name) if self.attrs: s += ':' + '.'.join(self.attrs) if self.extras: s += ' [%s]' % ','.join(self.extras) return s def __repr__(self): return "EntryPoint.parse(%r)" % str(self) def load(self, require=True, *args, **kwargs): """ Require packages for this EntryPoint, then resolve it. """ if not require or args or kwargs: warnings.warn( "Parameters to load are deprecated. Call .resolve and " ".require separately.", DeprecationWarning, stacklevel=2, ) if require: self.require(*args, **kwargs) return self.resolve() def resolve(self): """ Resolve the entry point from its module and attrs. """ module = __import__(self.module_name, fromlist=['__name__'], level=0) try: return functools.reduce(getattr, self.attrs, module) except AttributeError as exc: raise ImportError(str(exc)) def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) reqs = self.dist.requires(self.extras) items = working_set.resolve(reqs, env, installer) list(map(working_set.add, items)) pattern = re.compile( r'\s*' r'(?P<name>.+?)\s*' r'=\s*' r'(?P<module>[\w.]+)\s*' r'(:\s*(?P<attr>[\w.]+))?\s*' r'(?P<extras>\[.*\])?\s*$' ) @classmethod def parse(cls, src, dist=None): """Parse a single entry point from string `src` Entry point syntax follows the form:: name = some.module:some.attr [extra1, extra2] The entry name and module name are required, but the ``:attrs`` and ``[extras]`` parts are optional """ m = cls.pattern.match(src) if not m: msg = "EntryPoint must be in 'name=module:attrs [extras]' format" raise ValueError(msg, src) res = m.groupdict() extras = cls._parse_extras(res['extras']) attrs = res['attr'].split('.') if res['attr'] else () return cls(res['name'], res['module'], attrs, extras, dist) @classmethod def _parse_extras(cls, extras_spec): if not extras_spec: return () req = Requirement.parse('x' + extras_spec) if req.specs: raise ValueError() return req.extras @classmethod def parse_group(cls, group, lines, dist=None): """Parse an entry point group""" if not MODULE(group): raise ValueError("Invalid group name", group) this = {} for line in yield_lines(lines): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) this[ep.name]=ep return this @classmethod def parse_map(cls, data, dist=None): """Parse a map of entry point groups""" if isinstance(data, dict): data = data.items() else: data = split_sections(data) maps = {} for group, lines in data: if group is None: if not lines: continue raise ValueError("Entry points must be listed in groups") group = group.strip() if group in maps: raise ValueError("Duplicate group name", group) maps[group] = cls.parse_group(group, lines, dist) return maps def _remove_md5_fragment(location): if not location: return '' parsed = urlparse(location) if parsed[-1].startswith('md5='): return urlunparse(parsed[:-1] + ('',)) return location class Distribution(object): """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' def __init__(self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST): self.project_name = safe_name(project_name or 'Unknown') if version is not None: self._version = safe_version(version) self.py_version = py_version self.platform = platform self.location = location self.precedence = precedence self._provider = metadata or empty_provider @classmethod def from_location(cls, location, basename, metadata=None,**kw): project_name, version, py_version, platform = [None]*4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: # .dist-info gets much metadata differently match = EGG_NAME(basename) if match: project_name, version, py_version, platform = match.group( 'name','ver','pyver','plat' ) cls = _distributionImpl[ext.lower()] return cls( location, metadata, project_name=project_name, version=version, py_version=py_version, platform=platform, **kw ) @property def hashcmp(self): return ( self.parsed_version, self.precedence, self.key, _remove_md5_fragment(self.location), self.py_version or '', self.platform or '', ) def __hash__(self): return hash(self.hashcmp) def __lt__(self, other): return self.hashcmp < other.hashcmp def __le__(self, other): return self.hashcmp <= other.hashcmp def __gt__(self, other): return self.hashcmp > other.hashcmp def __ge__(self, other): return self.hashcmp >= other.hashcmp def __eq__(self, other): if not isinstance(other, self.__class__): # It's not a Distribution, so they are not equal return False return self.hashcmp == other.hashcmp def __ne__(self, other): return not self == other # These properties have to be lazy so that we don't have to load any # metadata until/unless it's actually needed. (i.e., some distributions # may not know their name or version without loading PKG-INFO) @property def key(self): try: return self._key except AttributeError: self._key = key = self.project_name.lower() return key @property def parsed_version(self): if not hasattr(self, "_parsed_version"): self._parsed_version = parse_version(self.version) return self._parsed_version def _warn_legacy_version(self): LV = packaging.version.LegacyVersion is_legacy = isinstance(self._parsed_version, LV) if not is_legacy: return # While an empty version is technically a legacy version and # is not a valid PEP 440 version, it's also unlikely to # actually come from someone and instead it is more likely that # it comes from setuptools attempting to parse a filename and # including it in the list. So for that we'll gate this warning # on if the version is anything at all or not. if not self.version: return tmpl = textwrap.dedent(""" '{project_name} ({version})' is being parsed as a legacy, non PEP 440, version. You may find odd behavior and sort order. In particular it will be sorted as less than 0.0. It is recommended to migrate to PEP 440 compatible versions. """).strip().replace('\n', ' ') warnings.warn(tmpl.format(**vars(self)), PEP440Warning) @property def version(self): try: return self._version except AttributeError: for line in self._get_metadata(self.PKG_INFO): if line.lower().startswith('version:'): self._version = safe_version(line.split(':',1)[1].strip()) return self._version else: tmpl = "Missing 'Version:' header and/or %s file" raise ValueError(tmpl % self.PKG_INFO, self) @property def _dep_map(self): try: return self.__dep_map except AttributeError: dm = self.__dep_map = {None: []} for name in 'requires.txt', 'depends.txt': for extra, reqs in split_sections(self._get_metadata(name)): if extra: if ':' in extra: extra, marker = extra.split(':', 1) if invalid_marker(marker): # XXX warn reqs=[] elif not evaluate_marker(marker): reqs=[] extra = safe_extra(extra) or None dm.setdefault(extra,[]).extend(parse_requirements(reqs)) return dm def requires(self, extras=()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps def _get_metadata(self, name): if self.has_metadata(name): for line in self.get_metadata_lines(name): yield line def activate(self, path=None): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: path = sys.path self.insert_on(path) if path is sys.path: fixup_namespace_packages(self.location) for pkg in self._get_metadata('namespace_packages.txt'): if pkg in sys.modules: declare_namespace(pkg) def egg_name(self): """Return what this distribution's standard .egg filename should be""" filename = "%s-%s-py%s" % ( to_filename(self.project_name), to_filename(self.version), self.py_version or PY_MAJOR ) if self.platform: filename += '-' + self.platform return filename def __repr__(self): if self.location: return "%s (%s)" % (self, self.location) else: return str(self) def __str__(self): try: version = getattr(self, 'version', None) except ValueError: version = None version = version or "[unknown version]" return "%s %s" % (self.project_name, version) def __getattr__(self, attr): """Delegate all unrecognized public attributes to .metadata provider""" if attr.startswith('_'): raise AttributeError(attr) return getattr(self._provider, attr) @classmethod def from_filename(cls, filename, metadata=None, **kw): return cls.from_location( _normalize_cached(filename), os.path.basename(filename), metadata, **kw ) def as_requirement(self): """Return a ``Requirement`` that matches this distribution exactly""" if isinstance(self.parsed_version, packaging.version.Version): spec = "%s==%s" % (self.project_name, self.parsed_version) else: spec = "%s===%s" % (self.project_name, self.parsed_version) return Requirement.parse(spec) def load_entry_point(self, group, name): """Return the `name` entry point of `group` or raise ImportError""" ep = self.get_entry_info(group, name) if ep is None: raise ImportError("Entry point %r not found" % ((group, name),)) return ep.load() def get_entry_map(self, group=None): """Return the entry point map for `group`, or the full entry map""" try: ep_map = self._ep_map except AttributeError: ep_map = self._ep_map = EntryPoint.parse_map( self._get_metadata('entry_points.txt'), self ) if group is not None: return ep_map.get(group,{}) return ep_map def get_entry_info(self, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return self.get_entry_map(group).get(name) def insert_on(self, path, loc = None): """Insert self.location in path before its nearest parent directory""" loc = loc or self.location if not loc: return nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) npath= [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: break elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory if path is sys.path: self.check_version_conflict() path.insert(p, loc) npath.insert(p, nloc) break else: if path is sys.path: self.check_version_conflict() path.append(loc) return # p is the spot where we found or inserted loc; now remove duplicates while True: try: np = npath.index(nloc, p+1) except ValueError: break else: del npath[np], path[np] # ha! p = np return def check_version_conflict(self): if self.key == 'setuptools': # ignore the inevitable setuptools self-conflicts :( return nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) loc = normalize_path(self.location) for modname in self._get_metadata('top_level.txt'): if (modname not in sys.modules or modname in nsp or modname in _namespace_packages): continue if modname in ('pkg_resources', 'setuptools', 'site'): continue fn = getattr(sys.modules[modname], '__file__', None) if fn and (normalize_path(fn).startswith(loc) or fn.startswith(self.location)): continue issue_warning( "Module %s was already imported from %s, but %s is being added" " to sys.path" % (modname, fn, self.location), ) def has_version(self): try: self.version except ValueError: issue_warning("Unbuilt egg for " + repr(self)) return False return True def clone(self,**kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): kw.setdefault(attr, getattr(self, attr, None)) kw.setdefault('metadata', self._provider) return self.__class__(**kw) @property def extras(self): return [dep for dep in self._dep_map if dep] class DistInfoDistribution(Distribution): """Wrap an actual or potential sys.path entry w/metadata, .dist-info style""" PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @property def _parsed_pkg_info(self): """Parse and cache metadata""" try: return self._pkg_info except AttributeError: metadata = self.get_metadata(self.PKG_INFO) self._pkg_info = email.parser.Parser().parsestr(metadata) return self._pkg_info @property def _dep_map(self): try: return self.__dep_map except AttributeError: self.__dep_map = self._compute_dependencies() return self.__dep_map def _preparse_requirement(self, requires_dist): """Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz') Split environment marker, add == prefix to version specifiers as necessary, and remove parenthesis. """ parts = requires_dist.split(';', 1) + [''] distvers = parts[0].strip() mark = parts[1].strip() distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers) distvers = distvers.replace('(', '').replace(')', '') return (distvers, mark) def _compute_dependencies(self): """Recompute this distribution's dependencies.""" from pip._vendor._markerlib import compile as compile_marker dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: distvers, mark = self._preparse_requirement(req) parsed = next(parse_requirements(distvers)) parsed.marker_fn = compile_marker(mark) reqs.append(parsed) def reqs_for_extra(extra): for req in reqs: if req.marker_fn(override={'extra':extra}): yield req common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: extra = safe_extra(extra.strip()) dm[extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm _distributionImpl = { '.egg': Distribution, '.egg-info': Distribution, '.dist-info': DistInfoDistribution, } def issue_warning(*args,**kw): level = 1 g = globals() try: # find the first stack frame that is *not* code in # the pkg_resources module, to use for the warning while sys._getframe(level).f_globals is g: level += 1 except ValueError: pass warnings.warn(stacklevel=level + 1, *args, **kw) class RequirementParseError(ValueError): def __str__(self): return ' '.join(self.args) def parse_requirements(strs): """Yield ``Requirement`` objects for each specification in `strs` `strs` must be a string, or a (possibly-nested) iterable thereof. """ # create a steppable iterator, so we can handle \-continuations lines = iter(yield_lines(strs)) def scan_list(ITEM, TERMINATOR, line, p, groups, item_name): items = [] while not TERMINATOR(line, p): if CONTINUE(line, p): try: line = next(lines) p = 0 except StopIteration: msg = "\\ must not appear on the last nonblank line" raise RequirementParseError(msg) match = ITEM(line, p) if not match: msg = "Expected " + item_name + " in" raise RequirementParseError(msg, line, "at", line[p:]) items.append(match.group(*groups)) p = match.end() match = COMMA(line, p) if match: # skip the comma p = match.end() elif not TERMINATOR(line, p): msg = "Expected ',' or end-of-list in" raise RequirementParseError(msg, line, "at", line[p:]) match = TERMINATOR(line, p) # skip the terminator, if any if match: p = match.end() return line, p, items for line in lines: match = DISTRO(line) if not match: raise RequirementParseError("Missing distribution spec", line) project_name = match.group(1) p = match.end() extras = [] match = OBRACKET(line, p) if match: p = match.end() line, p, extras = scan_list( DISTRO, CBRACKET, line, p, (1,), "'extra' name" ) line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2), "version spec") specs = [(op, val) for op, val in specs] yield Requirement(project_name, specs, extras) class Requirement: def __init__(self, project_name, specs, extras): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" self.unsafe_name, project_name = project_name, safe_name(project_name) self.project_name, self.key = project_name, project_name.lower() self.specifier = packaging.specifiers.SpecifierSet( ",".join(["".join([x, y]) for x, y in specs]) ) self.specs = specs self.extras = tuple(map(safe_extra, extras)) self.hashCmp = ( self.key, self.specifier, frozenset(self.extras), ) self.__hash = hash(self.hashCmp) def __str__(self): extras = ','.join(self.extras) if extras: extras = '[%s]' % extras return '%s%s%s' % (self.project_name, extras, self.specifier) def __eq__(self, other): return ( isinstance(other, Requirement) and self.hashCmp == other.hashCmp ) def __ne__(self, other): return not self == other def __contains__(self, item): if isinstance(item, Distribution): if item.key != self.key: return False item = item.version # Allow prereleases always in order to match the previous behavior of # this method. In the future this should be smarter and follow PEP 440 # more accurately. return self.specifier.contains(item, prereleases=True) def __hash__(self): return self.__hash def __repr__(self): return "Requirement.parse(%r)" % str(self) @staticmethod def parse(s): reqs = list(parse_requirements(s)) if reqs: if len(reqs) == 1: return reqs[0] raise ValueError("Expected only one requirement", s) raise ValueError("No requirements found", s) def _get_mro(cls): """Get an mro for a type or classic class""" if not isinstance(cls, type): class cls(cls, object): pass return cls.__mro__[1:] return cls.__mro__ def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" for t in _get_mro(getattr(ob, '__class__', type(ob))): if t in registry: return registry[t] def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) def _bypass_ensure_directory(path): """Sandbox-bypassing version of ensure_directory()""" if not WRITE_SUPPORT: raise IOError('"os.mkdir" not supported on this platform.') dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) mkdir(dirname, 0o755) def split_sections(s): """Split a string or iterable thereof into (section, content) pairs Each ``section`` is a stripped version of the section header ("[section]") and each ``content`` is a list of stripped lines excluding blank lines and comment-only lines. If there are any such lines before the first section header, they're returned in a first ``section`` of ``None``. """ section = None content = [] for line in yield_lines(s): if line.startswith("["): if line.endswith("]"): if section or content: yield section, content section = line[1:-1].strip() content = [] else: raise ValueError("Invalid section heading", line) else: content.append(line) # wrap up last segment yield section, content def _mkstemp(*args,**kw): old_open = os.open try: # temporarily bypass sandboxing os.open = os_open return tempfile.mkstemp(*args,**kw) finally: # and then put it back os.open = old_open # Silence the PEP440Warning by default, so that end users don't get hit by it # randomly just because they use pkg_resources. We want to append the rule # because we want earlier uses of filterwarnings to take precedence over this # one. warnings.filterwarnings("ignore", category=PEP440Warning, append=True) # from jaraco.functools 1.3 def _call_aside(f, *args, **kwargs): f(*args, **kwargs) return f @_call_aside def _initialize(g=globals()): "Set up global resource manager (deliberately not state-saved)" manager = ResourceManager() g['_manager'] = manager for name in dir(manager): if not name.startswith('_'): g[name] = getattr(manager, name) @_call_aside def _initialize_master_working_set(): """ Prepare the master working set and make the ``require()`` API available. This function has explicit effects on the global state of pkg_resources. It is intended to be invoked once at the initialization of this module. Invocation by other packages is unsupported and done at their own risk. """ working_set = WorkingSet._build_master() _declare_state('object', working_set=working_set) require = working_set.require iter_entry_points = working_set.iter_entry_points add_activation_listener = working_set.subscribe run_script = working_set.run_script # backward compatibility run_main = run_script # Activate all distributions already on sys.path, and ensure that # all distributions added to the working set in the future (e.g. by # calling ``require()``) will get activated as well. add_activation_listener(lambda dist: dist.activate()) working_set.entries=[] # match order list(map(working_set.add_entry, sys.path)) globals().update(locals())
chatelak/RMG-Py
refs/heads/master
rmgpy/data/thermoTest.py
2
#!/usr/bin/python # -*- coding: utf-8 -*- import os import unittest from external.wip import work_in_progress from rmgpy import settings from rmgpy.species import Species from rmgpy.data.thermo import ThermoDatabase from rmgpy.molecule.molecule import Molecule ################################################################################ class TestThermoDatabase(unittest.TestCase): """ Contains unit tests of the ThermoDatabase class. """ # Only load these once to save time database = ThermoDatabase() database.load(os.path.join(settings['database.directory'], 'thermo')) # oldDatabase = ThermoDatabase() # oldDatabase.loadOld(os.path.join(settings['database.directory'], '../output/RMG_database')) def setUp(self): """ A function run before each unit test in this class. """ self.database = self.__class__.database # self.oldDatabase = self.__class__.oldDatabase self.Tlist = [300, 400, 500, 600, 800, 1000, 1500] self.testCases = [ # SMILES symm H298 S298 Cp300 Cp400 Cp500 Cp600 Cp800 Cp1000 Cp1500 # 1,3-hexadiene decomposition products ['C=CC=CCC', 3, 13.45, 86.37, 29.49, 37.67, 44.54, 50.12, 58.66, 64.95, 74.71], ['[CH]=CC=CCC', 3, 72.55, 87.76, 29.30, 36.92, 43.18, 48.20, 55.84, 61.46, 70.18], ['C=[C]C=CCC', 3, 61.15, 87.08, 29.68, 36.91, 43.03, 48.11, 55.96, 61.78, 71.54], ['C=C[C]=CCC', 3, 61.15, 87.08, 29.68, 36.91, 43.03, 48.11, 55.96, 61.78, 71.54], ['C=CC=[C]CC', 3, 70.35, 88.18, 29.15, 36.46, 42.6, 47.6, 55.32, 61.04, 69.95], ['C=CC=C[CH]C', 6, 38.24, 84.41, 27.79, 35.46, 41.94, 47.43, 55.74, 61.92, 71.86], ['C=CC=CC[CH2]', 2, 62.45, 89.78, 28.72, 36.31, 42.63, 47.72, 55.50, 61.21, 70.05], ['[CH3]', 6, 34.81, 46.37, 9.14, 10.18, 10.81, 11.34, 12.57, 13.71, 15.2], ['C=CC=C[CH2]', 2, 46.11, 75.82, 22.54, 28.95, 34.24, 38.64, 45.14, 49.97, 57.85], ['[CH2]C', 6, 28.6, 59.87, 11.73, 14.47, 17.05, 19.34, 23.02, 25.91, 31.53], ['C=CC=[CH]', 1, 85.18, 69.37, 18.93, 23.55, 27.16, 29.92, 34.02, 37.03, 41.81], ['C=[CH]', 1, 71.62, 56.61, 10.01, 11.97, 13.66, 15.08, 17.32, 19.05, 21.85], ['[CH]=CCC', 3, 58.99, 75.0, 20.38, 25.34, 29.68, 33.36, 39.14, 43.48, 50.22], # Cyclic Structures ['C1CCCCC1', 12, -29.45, 69.71, 27.20, 37.60, 46.60, 54.80, 67.50, 76.20, 88.50], ['C1CCC1', 8, 6.51, 63.35, 17.39, 23.91, 29.86, 34.76, 42.40, 47.98, 56.33], ['C1C=CC=C1', 2, 32.5, 65.5, 18.16, 24.71, 30.25, 34.7, 41.25, 45.83, 52.61], ] @work_in_progress def testNewThermoGeneration(self): """ Test that the new ThermoDatabase generates appropriate thermo data. """ for smiles, symm, H298, S298, Cp300, Cp400, Cp500, Cp600, Cp800, Cp1000, Cp1500 in self.testCases: Cplist = [Cp300, Cp400, Cp500, Cp600, Cp800, Cp1000, Cp1500] molecule=Molecule(SMILES=smiles) species = Species(molecule=molecule) species.generateResonanceIsomers() species.molecule[0] thermoData = self.database.getThermoDataFromGroups(species) molecule = species.molecule[0] for mol in species.molecule[1:]: thermoData0 = self.database.getAllThermoData(Species(molecule=[mol]))[0][0] for data in self.database.getAllThermoData(Species(molecule=[mol]))[1:]: if data.getEnthalpy(298) < thermoData0.getEnthalpy(298): thermoData0 = data if thermoData0.getEnthalpy(298) < thermoData.getEnthalpy(298): thermoData = thermoData0 molecule = mol self.assertAlmostEqual(H298, thermoData.getEnthalpy(298) / 4184, places=1, msg="H298 error for {0}".format(smiles)) self.assertAlmostEqual(S298, thermoData.getEntropy(298) / 4.184, places=1, msg="S298 error for {0}".format(smiles)) for T, Cp in zip(self.Tlist, Cplist): self.assertAlmostEqual(Cp, thermoData.getHeatCapacity(T) / 4.184, places=1, msg="Cp{1} error for {0}".format(smiles,T)) def testSymmetryContributionRadicals(self): """ Test that the symmetry contribution is correctly added for radicals estimated via the HBI method. """ spc = Species(molecule=[Molecule().fromSMILES('[CH3]')]) thermoData_lib = self.database.getThermoDataFromLibraries(spc)[0] thermoData_ga = self.database.getThermoDataFromGroups(spc) self.assertAlmostEqual(thermoData_lib.getEntropy(298.), thermoData_ga.getEntropy(298.), 0) @work_in_progress def testSymmetryNumberGeneration(self): """ Test we generate symmetry numbers correctly. This uses the new thermo database to generate the H298, used to select the stablest resonance isomer. """ for smiles, symm, H298, S298, Cp300, Cp400, Cp500, Cp600, Cp800, Cp1000, Cp1500 in self.testCases: molecule=Molecule(SMILES=smiles) species = Species(molecule=molecule) species.generateResonanceIsomers() thermoData = self.database.getThermoDataFromGroups(Species(molecule=[species.molecule[0]])) # pick the molecule with lowest H298 molecule = species.molecule[0] for mol in species.molecule[1:]: thermoData0 = self.database.getAllThermoData(Species(molecule=[mol]))[0][0] for data in self.database.getAllThermoData(Species(molecule=[mol]))[1:]: if data.getEnthalpy(298) < thermoData0.getEnthalpy(298): thermoData0 = data if thermoData0.getEnthalpy(298) < thermoData.getEnthalpy(298): thermoData = thermoData0 molecule = mol self.assertEqual(molecule.calculateSymmetryNumber(), symm, msg="Symmetry number error for {0}".format(smiles)) # @work_in_progress # def testOldThermoGeneration(self): # """ # Test that the old ThermoDatabase generates relatively accurate thermo data. # """ # for smiles, symm, H298, S298, Cp300, Cp400, Cp500, Cp600, Cp800, Cp1000, Cp1500 in self.testCases: # Cplist = [Cp300, Cp400, Cp500, Cp600, Cp800, Cp1000, Cp1500] # species = Species(molecule=[Molecule(SMILES=smiles)]) # species.generateResonanceIsomers() # thermoData = self.oldDatabase.getThermoData(Species(molecule=[species.molecule[0]])) # molecule = species.molecule[0] # for mol in species.molecule[1:]: # thermoData0 = self.oldDatabase.getAllThermoData(Species(molecule=[mol]))[0][0] # for data in self.oldDatabase.getAllThermoData(Species(molecule=[mol]))[1:]: # if data.getEnthalpy(298) < thermoData0.getEnthalpy(298): # thermoData0 = data # if thermoData0.getEnthalpy(298) < thermoData.getEnthalpy(298): # thermoData = thermoData0 # molecule = mol # # self.assertAlmostEqual(H298, thermoData.getEnthalpy(298) / 4184, places=1, msg="H298 error for {0}".format(smiles)) # self.assertAlmostEqual(S298, thermoData.getEntropy(298) / 4.184, places=1, msg="S298 error for {0}".format(smiles)) # for T, Cp in zip(self.Tlist, Cplist): # self.assertAlmostEqual(Cp, thermoData.getHeatCapacity(T) / 4.184, places=1, msg="Cp{1} error for {0}".format(smiles, T)) class TestThermoDatabaseAromatics(TestThermoDatabase): """ Test only Aromatic species. A copy of the above class, but with different test compounds """ def setUp(self): TestThermoDatabase.setUp(self) self.testCases = [ # SMILES symm H298 S298 Cp300 Cp400 Cp500 Cp600 Cp800 Cp1000 Cp1500 ['c1ccccc1', 12, 19.80, 64.24, 19.44, 26.64, 32.76, 37.80, 45.24, 50.46, 58.38], ['c1ccc2ccccc2c1', 4, 36.0, 79.49, 31.94, 42.88, 52.08, 59.62, 70.72, 78.68, 90.24], ] def __init__(self, *args, **kwargs): super(TestThermoDatabaseAromatics, self).__init__(*args, **kwargs) self._testMethodDoc = self._testMethodDoc.strip().split('\n')[0] + " for Aromatics.\n" class TestCyclicThermo(unittest.TestCase): """ Contains unit tests of the ThermoDatabase class. """ database = ThermoDatabase() database.load(os.path.join(settings['database.directory'], 'thermo')) def setUp(self): """ A function run before each unit test in this class. """ self.database = self.__class__.database def testComputeGroupAdditivityThermoForTwoRingMolecule(self): """ The molecule being tested has two rings, one is 13cyclohexadiene5methylene the other is benzene ring. This method is to test thermo estimation will give two different corrections accordingly. """ spec = Species().fromSMILES('CCCCCCCCCCCC(CC=C1C=CC=CC1)c1ccccc1') spec.generateResonanceIsomers() thermo = self.database.getThermoDataFromGroups(spec) ringGroups, polycyclicGroups = self.database.getRingGroupsFromComments(thermo) self.assertEqual(len(ringGroups),2) self.assertEqual(len(polycyclicGroups),0) expected_matchedRingsLabels = ['13cyclohexadiene5methylene', 'Benzene'] expected_matchedRings = [self.database.groups['ring'].entries[label] for label in expected_matchedRingsLabels] self.assertEqual(set(ringGroups), set(expected_matchedRings)) def testThermoForMonocyclicAndPolycyclicSameMolecule(self): """ Test a molecule that has both a polycyclic and a monocyclic ring in the same molecule """ spec = Species().fromSMILES('C(CCC1C2CCC1CC2)CC1CCC1') spec.generateResonanceIsomers() thermo = self.database.getThermoDataFromGroups(spec) ringGroups, polycyclicGroups = self.database.getRingGroupsFromComments(thermo) self.assertEqual(len(ringGroups),1) self.assertEqual(len(polycyclicGroups),1) expected_matchedRingsLabels = ['Cyclobutane'] expected_matchedRings = [self.database.groups['ring'].entries[label] for label in expected_matchedRingsLabels] self.assertEqual(set(ringGroups), set(expected_matchedRings)) expected_matchedPolyringsLabels = ['norbornane'] expected_matchedPolyrings = [self.database.groups['polycyclic'].entries[label] for label in expected_matchedPolyringsLabels] self.assertEqual(set(polycyclicGroups), set(expected_matchedPolyrings)) def testPolycyclicPicksBestThermo(self): """ Test that RMG prioritizes thermo correctly and chooses the thermo from the isomer which has a non generic polycyclic ring correction """ spec = Species().fromSMILES('C1=C[C]2CCC=C2C1') spec.generateResonanceIsomers() thermoDataList = [] for molecule in spec.molecule: thermo = self.database.estimateRadicalThermoViaHBI(molecule, self.database.computeGroupAdditivityThermo) thermoDataList.append(thermo) thermoDataList.sort(key=lambda x: x.getEnthalpy(298)) most_stable_thermo = thermoDataList[0] ringGroups, polycyclicGroups = self.database.getRingGroupsFromComments(most_stable_thermo) selected_thermo = self.database.getThermoDataFromGroups(spec) self.assertNotEqual(selected_thermo, thermoDataList) selected_ringGroups, selected_polycyclicGroups = self.database.getRingGroupsFromComments(selected_thermo) # The group used to estimate the most stable thermo is the generic polycyclic group and # therefore is not selected. Note that this unit test will have to change if the correction is fixed later. self.assertEqual(polycyclicGroups[0].label, 'PolycyclicRing') self.assertEqual(selected_polycyclicGroups[0].label, 'C12CCC=C1CC=C2') ################################################################################ if __name__ == '__main__': unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
lancezlin/pylearn2
refs/heads/master
pylearn2/scripts/papers/jia_huang_wkshp_11/assemble.py
44
from __future__ import print_function import numpy as np import os from theano.compat.six.moves import xrange #check that the right files are present names = os.listdir('.') if 'features.npy' in names: print("Not doing anything, features.npy already exists.") quit(0) chunk_names = [ 'features_A.npy', 'features_B.npy', 'features_C.npy', 'features_D.npy', 'features_E.npy' ] for name in chunk_names: assert name in names for name in chunk_names: if name.startswith('features') and name.endswith('.npy'): if name not in chunk_names: print("I'm not sure what to do with "+name) print("The existence of this file makes me think extract_features.yaml has changed") print("I don't want to do something incorrect so I'm going to give up.") quit(-1) #Do the conversion print('loading '+chunk_names[0]) first_chunk = np.load(chunk_names[0]) final_shape = list(first_chunk.shape) final_shape[0] = 50000 print('making output') X = np.zeros(final_shape,dtype='float32') idx = first_chunk.shape[0] X[0:idx,:] = first_chunk for i in xrange(1, len(chunk_names)): arg = chunk_names[i] print('loading '+arg) chunk = np.load(arg) chunk_span = chunk.shape[0] X[idx:idx+chunk_span,...] = chunk idx += chunk_span print("Saving features.npy...") np.save('features.npy',X) print("Deleting the chunks...") for chunk_name in chunk_names: os.remove(chunk_name)
m-ober/byceps
refs/heads/master
byceps/blueprints/admin/email/authorization.py
1
""" byceps.blueprints.admin.email.authorization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2020 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from byceps.util.authorization import create_permission_enum EmailConfigPermission = create_permission_enum('email_config', [ 'view', ])
open-homeautomation/home-assistant
refs/heads/dev
tests/components/test_google.py
13
"""The tests for the Google Calendar component.""" import logging import unittest from unittest.mock import patch import homeassistant.components.google as google from homeassistant.bootstrap import setup_component from tests.common import get_test_home_assistant _LOGGER = logging.getLogger(__name__) class TestGoogle(unittest.TestCase): """Test the Google component.""" def setUp(self): # pylint: disable=invalid-name """Setup things to be run when tests are started.""" self.hass = get_test_home_assistant() def tearDown(self): # pylint: disable=invalid-name """Stop everything that was started.""" self.hass.stop() @patch('homeassistant.components.google.do_authentication') def test_setup_component(self, mock_do_auth): """Test setup component.""" config = { 'google': { 'client_id': 'id', 'client_secret': 'secret', } } self.assertTrue(setup_component(self.hass, 'google', config)) def test_get_calendar_info(self): """Test getting the calendar info.""" calendar = { 'id': 'qwertyuiopasdfghjklzxcvbnm@import.calendar.google.com', 'etag': '"3584134138943410"', 'timeZone': 'UTC', 'accessRole': 'reader', 'foregroundColor': '#000000', 'selected': True, 'kind': 'calendar#calendarListEntry', 'backgroundColor': '#16a765', 'description': 'Test Calendar', 'summary': 'We are, we are, a... Test Calendar', 'colorId': '8', 'defaultReminders': [], 'track': True } calendar_info = google.get_calendar_info(self.hass, calendar) self.assertEquals(calendar_info, { 'cal_id': 'qwertyuiopasdfghjklzxcvbnm@import.calendar.google.com', 'entities': [{ 'device_id': 'we_are_we_are_a_test_calendar', 'name': 'We are, we are, a... Test Calendar', 'track': True, }] }) def test_found_calendar(self): """Test when a calendar is found.""" # calendar = { # 'id': 'qwertyuiopasdfghjklzxcvbnm@import.calendar.google.com', # 'etag': '"3584134138943410"', # 'timeZone': 'UTC', # 'accessRole': 'reader', # 'foregroundColor': '#000000', # 'selected': True, # 'kind': 'calendar#calendarListEntry', # 'backgroundColor': '#16a765', # 'description': 'Test Calendar', # 'summary': 'We are, we are, a... Test Calendar', # 'colorId': '8', # 'defaultReminders': [], # 'track': True # } # self.assertIsInstance(self.hass.data[google.DATA_INDEX], dict) # self.assertEquals(self.hass.data[google.DATA_INDEX], {}) calendar_service = google.GoogleCalendarService( self.hass.config.path(google.TOKEN_FILE)) self.assertTrue(google.setup_services(self.hass, True, calendar_service)) # self.hass.services.call('google', 'found_calendar', calendar, # blocking=True) # TODO: Fix this # self.assertTrue(self.hass.data[google.DATA_INDEX] # # .get(calendar['id'], None) is not None)
tonybaloney/st2contrib
refs/heads/master
packs/github/actions/add_status.py
15
from github import GithubObject from lib.base import BaseGithubAction __all__ = [ 'AddCommitStatusAction' ] class AddCommitStatusAction(BaseGithubAction): def run(self, user, repo, sha, state, target_url=None, description=None): target_url = target_url or GithubObject.NotSet description = description or GithubObject.NotSet user = self._client.get_user(user) repo = user.get_repo(repo) commit = repo.get_commit(sha) commit.create_status(state=state, target_url=target_url, description=description) return True
niknow/scipy
refs/heads/master
scipy/stats/vonmises.py
139
from __future__ import division, print_function, absolute_import import numpy as np import scipy.stats from scipy.special import i0 def von_mises_cdf_series(k,x,p): x = float(x) s = np.sin(x) c = np.cos(x) sn = np.sin(p*x) cn = np.cos(p*x) R = 0 V = 0 for n in range(p-1,0,-1): sn, cn = sn*c - cn*s, cn*c + sn*s R = 1./(2*n/k + R) V = R*(sn/n+V) return 0.5+x/(2*np.pi) + V/np.pi def von_mises_cdf_normalapprox(k, x): b = np.sqrt(2/np.pi)*np.exp(k)/i0(k) z = b*np.sin(x/2.) return scipy.stats.norm.cdf(z) def von_mises_cdf(k,x): ix = 2*np.pi*np.round(x/(2*np.pi)) x = x-ix k = float(k) # These values should give 12 decimal digits CK = 50 a = [28., 0.5, 100., 5.0] if k < CK: p = int(np.ceil(a[0]+a[1]*k-a[2]/(k+a[3]))) F = np.clip(von_mises_cdf_series(k,x,p),0,1) else: F = von_mises_cdf_normalapprox(k, x) return F+ix
Ben-Healey/PGE-Extension
refs/heads/master
pge-0.3/src/pge2d.py
2
#!/usr/bin/env python # # # def rgb (r, g, b):
trungdong/prov
refs/heads/master
src/prov/identifier.py
1
__author__ = "Trung Dong Huynh" __email__ = "trungdong@donggiang.com" class Identifier(object): """Base class for all identifiers and also represents xsd:anyURI.""" # TODO: make Identifier an "abstract" base class and move xsd:anyURI # into a subclass def __init__(self, uri): """ Constructor. :param uri: URI string for the long namespace identifier. """ self._uri = str(uri) # Ensure this is a unicode string @property def uri(self): """Identifier's URI.""" return self._uri def __str__(self): return self._uri def __eq__(self, other): return self.uri == other.uri if isinstance(other, Identifier) else False def __hash__(self): return hash((self.uri, self.__class__)) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self._uri) def provn_representation(self): """PROV-N representation of qualified name in a string.""" return '"%s" %%%% xsd:anyURI' % self._uri class QualifiedName(Identifier): """Qualified name of an identifier in a particular namespace.""" def __init__(self, namespace, localpart): """ Constructor. :param namespace: Namespace to use for qualified name resolution. :param localpart: Portion of identifier not part of the namespace prefix. """ Identifier.__init__(self, "".join([namespace.uri, localpart])) self._namespace = namespace self._localpart = localpart self._str = ( ":".join([namespace.prefix, localpart]) if namespace.prefix else localpart ) @property def namespace(self): """Namespace of qualified name.""" return self._namespace @property def localpart(self): """Local part of qualified name.""" return self._localpart def __str__(self): return self._str def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self._str) def __hash__(self): return hash(self.uri) def provn_representation(self): """PROV-N representation of qualified name in a string.""" return "'%s'" % self._str class Namespace(object): """PROV Namespace.""" def __init__(self, prefix, uri): """ Constructor. :param prefix: String short hand prefix for the namespace. :param uri: URI string for the long namespace identifier. """ self._prefix = prefix self._uri = uri self._cache = dict() @property def uri(self): """Namespace URI.""" return self._uri @property def prefix(self): """Namespace prefix.""" return self._prefix def contains(self, identifier): """ Indicates whether the identifier provided is contained in this namespace. :param identifier: Identifier to check. :return: bool """ uri = ( identifier if isinstance(identifier, str) else (identifier.uri if isinstance(identifier, Identifier) else None) ) return uri.startswith(self._uri) if uri else False def qname(self, identifier): """ Returns the qualified name of the identifier given using the namespace prefix. :param identifier: Identifier to resolve to a qualified name. :return: :py:class:`QualifiedName` """ uri = ( identifier if isinstance(identifier, str) else (identifier.uri if isinstance(identifier, Identifier) else None) ) if uri and uri.startswith(self._uri): return QualifiedName(self, uri[len(self._uri) :]) else: return None def __eq__(self, other): return ( (self._uri == other.uri and self._prefix == other.prefix) if isinstance(other, Namespace) else False ) def __ne__(self, other): return ( not isinstance(other, Namespace) or self._uri != other.uri or self._prefix != other.prefix ) def __hash__(self): return hash((self._uri, self._prefix)) def __repr__(self): return "<%s: %s {%s}>" % (self.__class__.__name__, self._prefix, self._uri) def __getitem__(self, localpart): if localpart in self._cache: return self._cache[localpart] else: qname = QualifiedName(self, localpart) self._cache[localpart] = qname return qname
manasi24/jiocloud-tempest-qatempest
refs/heads/master
tempest/api/identity/admin/v2/test_roles.py
11
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six import moves from tempest_lib.common.utils import data_utils from tempest.api.identity import base from tempest import test class RolesTestJSON(base.BaseIdentityV2AdminTest): @classmethod def resource_setup(cls): super(RolesTestJSON, cls).resource_setup() for _ in moves.xrange(5): role_name = data_utils.rand_name(name='role') role = cls.client.create_role(role_name) cls.data.roles.append(role) def _get_role_params(self): self.data.setup_test_user() self.data.setup_test_role() user = self.get_user_by_name(self.data.test_user) tenant = self.get_tenant_by_name(self.data.test_tenant) role = self.get_role_by_name(self.data.test_role) return (user, tenant, role) def assert_role_in_role_list(self, role, roles): found = False for user_role in roles: if user_role['id'] == role['id']: found = True self.assertTrue(found, "assigned role was not in list") @test.idempotent_id('75d9593f-50b7-4fcf-bd64-e3fb4a278e23') def test_list_roles(self): """Return a list of all roles.""" body = self.client.list_roles() found = [role for role in body if role in self.data.roles] self.assertTrue(any(found)) self.assertEqual(len(found), len(self.data.roles)) @test.idempotent_id('c62d909d-6c21-48c0-ae40-0a0760e6db5e') def test_role_create_delete(self): """Role should be created, verified, and deleted.""" role_name = data_utils.rand_name(name='role-test') body = self.client.create_role(role_name) self.assertEqual(role_name, body['name']) body = self.client.list_roles() found = [role for role in body if role['name'] == role_name] self.assertTrue(any(found)) body = self.client.delete_role(found[0]['id']) body = self.client.list_roles() found = [role for role in body if role['name'] == role_name] self.assertFalse(any(found)) @test.idempotent_id('db6870bd-a6ed-43be-a9b1-2f10a5c9994f') def test_get_role_by_id(self): """Get a role by its id.""" self.data.setup_test_role() role_id = self.data.role['id'] role_name = self.data.role['name'] body = self.client.get_role(role_id) self.assertEqual(role_id, body['id']) self.assertEqual(role_name, body['name']) @test.idempotent_id('0146f675-ffbd-4208-b3a4-60eb628dbc5e') def test_assign_user_role(self): """Assign a role to a user on a tenant.""" (user, tenant, role) = self._get_role_params() self.client.assign_user_role(tenant['id'], user['id'], role['id']) roles = self.client.list_user_roles(tenant['id'], user['id']) self.assert_role_in_role_list(role, roles) @test.idempotent_id('f0b9292c-d3ba-4082-aa6c-440489beef69') def test_remove_user_role(self): """Remove a role assigned to a user on a tenant.""" (user, tenant, role) = self._get_role_params() user_role = self.client.assign_user_role(tenant['id'], user['id'], role['id']) self.client.remove_user_role(tenant['id'], user['id'], user_role['id']) @test.idempotent_id('262e1e3e-ed71-4edd-a0e5-d64e83d66d05') def test_list_user_roles(self): """List roles assigned to a user on tenant.""" (user, tenant, role) = self._get_role_params() self.client.assign_user_role(tenant['id'], user['id'], role['id']) roles = self.client.list_user_roles(tenant['id'], user['id']) self.assert_role_in_role_list(role, roles)
eckucukoglu/arm-linux-gnueabihf
refs/heads/master
arm-linux-gnueabihf/libc/usr/lib/python2.7/test/test_xml_etree.py
37
# xml.etree test. This file contains enough tests to make sure that # all included components work as they should. # Large parts are extracted from the upstream test suite. # IMPORTANT: the same doctests are run from "test_xml_etree_c" in # order to ensure consistency between the C implementation and the # Python implementation. # # For this purpose, the module-level "ET" symbol is temporarily # monkey-patched when running the "test_xml_etree_c" test suite. # Don't re-import "xml.etree.ElementTree" module in the docstring, # except if the test is specific to the Python implementation. import sys import cgi from test import test_support from test.test_support import findfile from xml.etree import ElementTree as ET SIMPLE_XMLFILE = findfile("simple.xml", subdir="xmltestdata") SIMPLE_NS_XMLFILE = findfile("simple-ns.xml", subdir="xmltestdata") SAMPLE_XML = """\ <body> <tag class='a'>text</tag> <tag class='b' /> <section> <tag class='b' id='inner'>subtext</tag> </section> </body> """ SAMPLE_SECTION = """\ <section> <tag class='b' id='inner'>subtext</tag> <nexttag /> <nextsection> <tag /> </nextsection> </section> """ SAMPLE_XML_NS = """ <body xmlns="http://effbot.org/ns"> <tag>text</tag> <tag /> <section> <tag>subtext</tag> </section> </body> """ def sanity(): """ Import sanity. >>> from xml.etree import ElementTree >>> from xml.etree import ElementInclude >>> from xml.etree import ElementPath """ def check_method(method): if not hasattr(method, '__call__'): print method, "not callable" def serialize(elem, to_string=True, **options): import StringIO file = StringIO.StringIO() tree = ET.ElementTree(elem) tree.write(file, **options) if to_string: return file.getvalue() else: file.seek(0) return file def summarize(elem): if elem.tag == ET.Comment: return "<Comment>" return elem.tag def summarize_list(seq): return [summarize(elem) for elem in seq] def normalize_crlf(tree): for elem in tree.iter(): if elem.text: elem.text = elem.text.replace("\r\n", "\n") if elem.tail: elem.tail = elem.tail.replace("\r\n", "\n") def check_string(string): len(string) for char in string: if len(char) != 1: print "expected one-character string, got %r" % char new_string = string + "" new_string = string + " " string[:0] def check_mapping(mapping): len(mapping) keys = mapping.keys() items = mapping.items() for key in keys: item = mapping[key] mapping["key"] = "value" if mapping["key"] != "value": print "expected value string, got %r" % mapping["key"] def check_element(element): if not ET.iselement(element): print "not an element" if not hasattr(element, "tag"): print "no tag member" if not hasattr(element, "attrib"): print "no attrib member" if not hasattr(element, "text"): print "no text member" if not hasattr(element, "tail"): print "no tail member" check_string(element.tag) check_mapping(element.attrib) if element.text is not None: check_string(element.text) if element.tail is not None: check_string(element.tail) for elem in element: check_element(elem) # -------------------------------------------------------------------- # element tree tests def interface(): r""" Test element tree interface. >>> element = ET.Element("tag") >>> check_element(element) >>> tree = ET.ElementTree(element) >>> check_element(tree.getroot()) >>> element = ET.Element("t\xe4g", key="value") >>> tree = ET.ElementTree(element) >>> repr(element) # doctest: +ELLIPSIS "<Element 't\\xe4g' at 0x...>" >>> element = ET.Element("tag", key="value") Make sure all standard element methods exist. >>> check_method(element.append) >>> check_method(element.extend) >>> check_method(element.insert) >>> check_method(element.remove) >>> check_method(element.getchildren) >>> check_method(element.find) >>> check_method(element.iterfind) >>> check_method(element.findall) >>> check_method(element.findtext) >>> check_method(element.clear) >>> check_method(element.get) >>> check_method(element.set) >>> check_method(element.keys) >>> check_method(element.items) >>> check_method(element.iter) >>> check_method(element.itertext) >>> check_method(element.getiterator) These methods return an iterable. See bug 6472. >>> check_method(element.iter("tag").next) >>> check_method(element.iterfind("tag").next) >>> check_method(element.iterfind("*").next) >>> check_method(tree.iter("tag").next) >>> check_method(tree.iterfind("tag").next) >>> check_method(tree.iterfind("*").next) These aliases are provided: >>> assert ET.XML == ET.fromstring >>> assert ET.PI == ET.ProcessingInstruction >>> assert ET.XMLParser == ET.XMLTreeBuilder """ def simpleops(): """ Basic method sanity checks. >>> elem = ET.XML("<body><tag/></body>") >>> serialize(elem) '<body><tag /></body>' >>> e = ET.Element("tag2") >>> elem.append(e) >>> serialize(elem) '<body><tag /><tag2 /></body>' >>> elem.remove(e) >>> serialize(elem) '<body><tag /></body>' >>> elem.insert(0, e) >>> serialize(elem) '<body><tag2 /><tag /></body>' >>> elem.remove(e) >>> elem.extend([e]) >>> serialize(elem) '<body><tag /><tag2 /></body>' >>> elem.remove(e) >>> element = ET.Element("tag", key="value") >>> serialize(element) # 1 '<tag key="value" />' >>> subelement = ET.Element("subtag") >>> element.append(subelement) >>> serialize(element) # 2 '<tag key="value"><subtag /></tag>' >>> element.insert(0, subelement) >>> serialize(element) # 3 '<tag key="value"><subtag /><subtag /></tag>' >>> element.remove(subelement) >>> serialize(element) # 4 '<tag key="value"><subtag /></tag>' >>> element.remove(subelement) >>> serialize(element) # 5 '<tag key="value" />' >>> element.remove(subelement) Traceback (most recent call last): ValueError: list.remove(x): x not in list >>> serialize(element) # 6 '<tag key="value" />' >>> element[0:0] = [subelement, subelement, subelement] >>> serialize(element[1]) '<subtag />' >>> element[1:9] == [element[1], element[2]] True >>> element[:9:2] == [element[0], element[2]] True >>> del element[1:2] >>> serialize(element) '<tag key="value"><subtag /><subtag /></tag>' """ def cdata(): """ Test CDATA handling (etc). >>> serialize(ET.XML("<tag>hello</tag>")) '<tag>hello</tag>' >>> serialize(ET.XML("<tag>&#104;&#101;&#108;&#108;&#111;</tag>")) '<tag>hello</tag>' >>> serialize(ET.XML("<tag><![CDATA[hello]]></tag>")) '<tag>hello</tag>' """ # Only with Python implementation def simplefind(): """ Test find methods using the elementpath fallback. >>> from xml.etree import ElementTree >>> CurrentElementPath = ElementTree.ElementPath >>> ElementTree.ElementPath = ElementTree._SimpleElementPath() >>> elem = ElementTree.XML(SAMPLE_XML) >>> elem.find("tag").tag 'tag' >>> ElementTree.ElementTree(elem).find("tag").tag 'tag' >>> elem.findtext("tag") 'text' >>> elem.findtext("tog") >>> elem.findtext("tog", "default") 'default' >>> ElementTree.ElementTree(elem).findtext("tag") 'text' >>> summarize_list(elem.findall("tag")) ['tag', 'tag'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag'] Path syntax doesn't work in this case. >>> elem.find("section/tag") >>> elem.findtext("section/tag") >>> summarize_list(elem.findall("section/tag")) [] >>> ElementTree.ElementPath = CurrentElementPath """ def find(): """ Test find methods (including xpath syntax). >>> elem = ET.XML(SAMPLE_XML) >>> elem.find("tag").tag 'tag' >>> ET.ElementTree(elem).find("tag").tag 'tag' >>> elem.find("section/tag").tag 'tag' >>> elem.find("./tag").tag 'tag' >>> ET.ElementTree(elem).find("./tag").tag 'tag' >>> ET.ElementTree(elem).find("/tag").tag 'tag' >>> elem[2] = ET.XML(SAMPLE_SECTION) >>> elem.find("section/nexttag").tag 'nexttag' >>> ET.ElementTree(elem).find("section/tag").tag 'tag' >>> ET.ElementTree(elem).find("tog") >>> ET.ElementTree(elem).find("tog/foo") >>> elem.findtext("tag") 'text' >>> elem.findtext("section/nexttag") '' >>> elem.findtext("section/nexttag", "default") '' >>> elem.findtext("tog") >>> elem.findtext("tog", "default") 'default' >>> ET.ElementTree(elem).findtext("tag") 'text' >>> ET.ElementTree(elem).findtext("tog/foo") >>> ET.ElementTree(elem).findtext("tog/foo", "default") 'default' >>> ET.ElementTree(elem).findtext("./tag") 'text' >>> ET.ElementTree(elem).findtext("/tag") 'text' >>> elem.findtext("section/tag") 'subtext' >>> ET.ElementTree(elem).findtext("section/tag") 'subtext' >>> summarize_list(elem.findall(".")) ['body'] >>> summarize_list(elem.findall("tag")) ['tag', 'tag'] >>> summarize_list(elem.findall("tog")) [] >>> summarize_list(elem.findall("tog/foo")) [] >>> summarize_list(elem.findall("*")) ['tag', 'tag', 'section'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag', 'tag'] >>> summarize_list(elem.findall("section/tag")) ['tag'] >>> summarize_list(elem.findall("section//tag")) ['tag', 'tag'] >>> summarize_list(elem.findall("section/*")) ['tag', 'nexttag', 'nextsection'] >>> summarize_list(elem.findall("section//*")) ['tag', 'nexttag', 'nextsection', 'tag'] >>> summarize_list(elem.findall("section/.//*")) ['tag', 'nexttag', 'nextsection', 'tag'] >>> summarize_list(elem.findall("*/*")) ['tag', 'nexttag', 'nextsection'] >>> summarize_list(elem.findall("*//*")) ['tag', 'nexttag', 'nextsection', 'tag'] >>> summarize_list(elem.findall("*/tag")) ['tag'] >>> summarize_list(elem.findall("*/./tag")) ['tag'] >>> summarize_list(elem.findall("./tag")) ['tag', 'tag'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag', 'tag'] >>> summarize_list(elem.findall("././tag")) ['tag', 'tag'] >>> summarize_list(elem.findall(".//tag[@class]")) ['tag', 'tag', 'tag'] >>> summarize_list(elem.findall(".//tag[@class='a']")) ['tag'] >>> summarize_list(elem.findall(".//tag[@class='b']")) ['tag', 'tag'] >>> summarize_list(elem.findall(".//tag[@id]")) ['tag'] >>> summarize_list(elem.findall(".//section[tag]")) ['section'] >>> summarize_list(elem.findall(".//section[element]")) [] >>> summarize_list(elem.findall("../tag")) [] >>> summarize_list(elem.findall("section/../tag")) ['tag', 'tag'] >>> summarize_list(ET.ElementTree(elem).findall("./tag")) ['tag', 'tag'] Following example is invalid in 1.2. A leading '*' is assumed in 1.3. >>> elem.findall("section//") == elem.findall("section//*") True ET's Path module handles this case incorrectly; this gives a warning in 1.3, and the behaviour will be modified in 1.4. >>> summarize_list(ET.ElementTree(elem).findall("/tag")) ['tag', 'tag'] >>> elem = ET.XML(SAMPLE_XML_NS) >>> summarize_list(elem.findall("tag")) [] >>> summarize_list(elem.findall("{http://effbot.org/ns}tag")) ['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag'] >>> summarize_list(elem.findall(".//{http://effbot.org/ns}tag")) ['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag'] """ def file_init(): """ >>> import StringIO >>> stringfile = StringIO.StringIO(SAMPLE_XML) >>> tree = ET.ElementTree(file=stringfile) >>> tree.find("tag").tag 'tag' >>> tree.find("section/tag").tag 'tag' >>> tree = ET.ElementTree(file=SIMPLE_XMLFILE) >>> tree.find("element").tag 'element' >>> tree.find("element/../empty-element").tag 'empty-element' """ def bad_find(): """ Check bad or unsupported path expressions. >>> elem = ET.XML(SAMPLE_XML) >>> elem.findall("/tag") Traceback (most recent call last): SyntaxError: cannot use absolute path on element """ def path_cache(): """ Check that the path cache behaves sanely. >>> elem = ET.XML(SAMPLE_XML) >>> for i in range(10): ET.ElementTree(elem).find('./'+str(i)) >>> cache_len_10 = len(ET.ElementPath._cache) >>> for i in range(10): ET.ElementTree(elem).find('./'+str(i)) >>> len(ET.ElementPath._cache) == cache_len_10 True >>> for i in range(20): ET.ElementTree(elem).find('./'+str(i)) >>> len(ET.ElementPath._cache) > cache_len_10 True >>> for i in range(600): ET.ElementTree(elem).find('./'+str(i)) >>> len(ET.ElementPath._cache) < 500 True """ def copy(): """ Test copy handling (etc). >>> import copy >>> e1 = ET.XML("<tag>hello<foo/></tag>") >>> e2 = copy.copy(e1) >>> e3 = copy.deepcopy(e1) >>> e1.find("foo").tag = "bar" >>> serialize(e1) '<tag>hello<bar /></tag>' >>> serialize(e2) '<tag>hello<bar /></tag>' >>> serialize(e3) '<tag>hello<foo /></tag>' """ def attrib(): """ Test attribute handling. >>> elem = ET.Element("tag") >>> elem.get("key") # 1.1 >>> elem.get("key", "default") # 1.2 'default' >>> elem.set("key", "value") >>> elem.get("key") # 1.3 'value' >>> elem = ET.Element("tag", key="value") >>> elem.get("key") # 2.1 'value' >>> elem.attrib # 2.2 {'key': 'value'} >>> attrib = {"key": "value"} >>> elem = ET.Element("tag", attrib) >>> attrib.clear() # check for aliasing issues >>> elem.get("key") # 3.1 'value' >>> elem.attrib # 3.2 {'key': 'value'} >>> attrib = {"key": "value"} >>> elem = ET.Element("tag", **attrib) >>> attrib.clear() # check for aliasing issues >>> elem.get("key") # 4.1 'value' >>> elem.attrib # 4.2 {'key': 'value'} >>> elem = ET.Element("tag", {"key": "other"}, key="value") >>> elem.get("key") # 5.1 'value' >>> elem.attrib # 5.2 {'key': 'value'} >>> elem = ET.Element('test') >>> elem.text = "aa" >>> elem.set('testa', 'testval') >>> elem.set('testb', 'test2') >>> ET.tostring(elem) '<test testa="testval" testb="test2">aa</test>' >>> sorted(elem.keys()) ['testa', 'testb'] >>> sorted(elem.items()) [('testa', 'testval'), ('testb', 'test2')] >>> elem.attrib['testb'] 'test2' >>> elem.attrib['testb'] = 'test1' >>> elem.attrib['testc'] = 'test2' >>> ET.tostring(elem) '<test testa="testval" testb="test1" testc="test2">aa</test>' """ def makeelement(): """ Test makeelement handling. >>> elem = ET.Element("tag") >>> attrib = {"key": "value"} >>> subelem = elem.makeelement("subtag", attrib) >>> if subelem.attrib is attrib: ... print "attrib aliasing" >>> elem.append(subelem) >>> serialize(elem) '<tag><subtag key="value" /></tag>' >>> elem.clear() >>> serialize(elem) '<tag />' >>> elem.append(subelem) >>> serialize(elem) '<tag><subtag key="value" /></tag>' >>> elem.extend([subelem, subelem]) >>> serialize(elem) '<tag><subtag key="value" /><subtag key="value" /><subtag key="value" /></tag>' >>> elem[:] = [subelem] >>> serialize(elem) '<tag><subtag key="value" /></tag>' >>> elem[:] = tuple([subelem]) >>> serialize(elem) '<tag><subtag key="value" /></tag>' """ def parsefile(): """ Test parsing from file. >>> tree = ET.parse(SIMPLE_XMLFILE) >>> normalize_crlf(tree) >>> tree.write(sys.stdout) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> >>> tree = ET.parse(SIMPLE_NS_XMLFILE) >>> normalize_crlf(tree) >>> tree.write(sys.stdout) <ns0:root xmlns:ns0="namespace"> <ns0:element key="value">text</ns0:element> <ns0:element>text</ns0:element>tail <ns0:empty-element /> </ns0:root> >>> with open(SIMPLE_XMLFILE) as f: ... data = f.read() >>> parser = ET.XMLParser() >>> parser.version # doctest: +ELLIPSIS 'Expat ...' >>> parser.feed(data) >>> print serialize(parser.close()) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> >>> parser = ET.XMLTreeBuilder() # 1.2 compatibility >>> parser.feed(data) >>> print serialize(parser.close()) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> >>> target = ET.TreeBuilder() >>> parser = ET.XMLParser(target=target) >>> parser.feed(data) >>> print serialize(parser.close()) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> """ def parseliteral(): """ >>> element = ET.XML("<html><body>text</body></html>") >>> ET.ElementTree(element).write(sys.stdout) <html><body>text</body></html> >>> element = ET.fromstring("<html><body>text</body></html>") >>> ET.ElementTree(element).write(sys.stdout) <html><body>text</body></html> >>> sequence = ["<html><body>", "text</bo", "dy></html>"] >>> element = ET.fromstringlist(sequence) >>> print ET.tostring(element) <html><body>text</body></html> >>> print "".join(ET.tostringlist(element)) <html><body>text</body></html> >>> ET.tostring(element, "ascii") "<?xml version='1.0' encoding='ascii'?>\\n<html><body>text</body></html>" >>> _, ids = ET.XMLID("<html><body>text</body></html>") >>> len(ids) 0 >>> _, ids = ET.XMLID("<html><body id='body'>text</body></html>") >>> len(ids) 1 >>> ids["body"].tag 'body' """ def iterparse(): """ Test iterparse interface. >>> iterparse = ET.iterparse >>> context = iterparse(SIMPLE_XMLFILE) >>> action, elem = next(context) >>> print action, elem.tag end element >>> for action, elem in context: ... print action, elem.tag end element end empty-element end root >>> context.root.tag 'root' >>> context = iterparse(SIMPLE_NS_XMLFILE) >>> for action, elem in context: ... print action, elem.tag end {namespace}element end {namespace}element end {namespace}empty-element end {namespace}root >>> events = () >>> context = iterparse(SIMPLE_XMLFILE, events) >>> for action, elem in context: ... print action, elem.tag >>> events = () >>> context = iterparse(SIMPLE_XMLFILE, events=events) >>> for action, elem in context: ... print action, elem.tag >>> events = ("start", "end") >>> context = iterparse(SIMPLE_XMLFILE, events) >>> for action, elem in context: ... print action, elem.tag start root start element end element start element end element start empty-element end empty-element end root >>> events = ("start", "end", "start-ns", "end-ns") >>> context = iterparse(SIMPLE_NS_XMLFILE, events) >>> for action, elem in context: ... if action in ("start", "end"): ... print action, elem.tag ... else: ... print action, elem start-ns ('', 'namespace') start {namespace}root start {namespace}element end {namespace}element start {namespace}element end {namespace}element start {namespace}empty-element end {namespace}empty-element end {namespace}root end-ns None >>> events = ("start", "end", "bogus") >>> with open(SIMPLE_XMLFILE, "rb") as f: ... iterparse(f, events) Traceback (most recent call last): ValueError: unknown event 'bogus' >>> import StringIO >>> source = StringIO.StringIO( ... "<?xml version='1.0' encoding='iso-8859-1'?>\\n" ... "<body xmlns='http://&#233;ffbot.org/ns'\\n" ... " xmlns:cl\\xe9='http://effbot.org/ns'>text</body>\\n") >>> events = ("start-ns",) >>> context = iterparse(source, events) >>> for action, elem in context: ... print action, elem start-ns ('', u'http://\\xe9ffbot.org/ns') start-ns (u'cl\\xe9', 'http://effbot.org/ns') >>> source = StringIO.StringIO("<document />junk") >>> try: ... for action, elem in iterparse(source): ... print action, elem.tag ... except ET.ParseError, v: ... print v end document junk after document element: line 1, column 12 """ def writefile(): """ >>> elem = ET.Element("tag") >>> elem.text = "text" >>> serialize(elem) '<tag>text</tag>' >>> ET.SubElement(elem, "subtag").text = "subtext" >>> serialize(elem) '<tag>text<subtag>subtext</subtag></tag>' Test tag suppression >>> elem.tag = None >>> serialize(elem) 'text<subtag>subtext</subtag>' >>> elem.insert(0, ET.Comment("comment")) >>> serialize(elem) # assumes 1.3 'text<!--comment--><subtag>subtext</subtag>' >>> elem[0] = ET.PI("key", "value") >>> serialize(elem) 'text<?key value?><subtag>subtext</subtag>' """ def custom_builder(): """ Test parser w. custom builder. >>> with open(SIMPLE_XMLFILE) as f: ... data = f.read() >>> class Builder: ... def start(self, tag, attrib): ... print "start", tag ... def end(self, tag): ... print "end", tag ... def data(self, text): ... pass >>> builder = Builder() >>> parser = ET.XMLParser(target=builder) >>> parser.feed(data) start root start element end element start element end element start empty-element end empty-element end root >>> with open(SIMPLE_NS_XMLFILE) as f: ... data = f.read() >>> class Builder: ... def start(self, tag, attrib): ... print "start", tag ... def end(self, tag): ... print "end", tag ... def data(self, text): ... pass ... def pi(self, target, data): ... print "pi", target, repr(data) ... def comment(self, data): ... print "comment", repr(data) >>> builder = Builder() >>> parser = ET.XMLParser(target=builder) >>> parser.feed(data) pi pi 'data' comment ' comment ' start {namespace}root start {namespace}element end {namespace}element start {namespace}element end {namespace}element start {namespace}empty-element end {namespace}empty-element end {namespace}root """ def getchildren(): """ Test Element.getchildren() >>> with open(SIMPLE_XMLFILE, "r") as f: ... tree = ET.parse(f) >>> for elem in tree.getroot().iter(): ... summarize_list(elem.getchildren()) ['element', 'element', 'empty-element'] [] [] [] >>> for elem in tree.getiterator(): ... summarize_list(elem.getchildren()) ['element', 'element', 'empty-element'] [] [] [] >>> elem = ET.XML(SAMPLE_XML) >>> len(elem.getchildren()) 3 >>> len(elem[2].getchildren()) 1 >>> elem[:] == elem.getchildren() True >>> child1 = elem[0] >>> child2 = elem[2] >>> del elem[1:2] >>> len(elem.getchildren()) 2 >>> child1 == elem[0] True >>> child2 == elem[1] True >>> elem[0:2] = [child2, child1] >>> child2 == elem[0] True >>> child1 == elem[1] True >>> child1 == elem[0] False >>> elem.clear() >>> elem.getchildren() [] """ def writestring(): """ >>> elem = ET.XML("<html><body>text</body></html>") >>> ET.tostring(elem) '<html><body>text</body></html>' >>> elem = ET.fromstring("<html><body>text</body></html>") >>> ET.tostring(elem) '<html><body>text</body></html>' """ def check_encoding(encoding): """ >>> check_encoding("ascii") >>> check_encoding("us-ascii") >>> check_encoding("iso-8859-1") >>> check_encoding("iso-8859-15") >>> check_encoding("cp437") >>> check_encoding("mac-roman") """ ET.XML("<?xml version='1.0' encoding='%s'?><xml />" % encoding) def encoding(): r""" Test encoding issues. >>> elem = ET.Element("tag") >>> elem.text = u"abc" >>> serialize(elem) '<tag>abc</tag>' >>> serialize(elem, encoding="utf-8") '<tag>abc</tag>' >>> serialize(elem, encoding="us-ascii") '<tag>abc</tag>' >>> serialize(elem, encoding="iso-8859-1") "<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>abc</tag>" >>> elem.text = "<&\"\'>" >>> serialize(elem) '<tag>&lt;&amp;"\'&gt;</tag>' >>> serialize(elem, encoding="utf-8") '<tag>&lt;&amp;"\'&gt;</tag>' >>> serialize(elem, encoding="us-ascii") # cdata characters '<tag>&lt;&amp;"\'&gt;</tag>' >>> serialize(elem, encoding="iso-8859-1") '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag>&lt;&amp;"\'&gt;</tag>' >>> elem.attrib["key"] = "<&\"\'>" >>> elem.text = None >>> serialize(elem) '<tag key="&lt;&amp;&quot;\'&gt;" />' >>> serialize(elem, encoding="utf-8") '<tag key="&lt;&amp;&quot;\'&gt;" />' >>> serialize(elem, encoding="us-ascii") '<tag key="&lt;&amp;&quot;\'&gt;" />' >>> serialize(elem, encoding="iso-8859-1") '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="&lt;&amp;&quot;\'&gt;" />' >>> elem.text = u'\xe5\xf6\xf6<>' >>> elem.attrib.clear() >>> serialize(elem) '<tag>&#229;&#246;&#246;&lt;&gt;</tag>' >>> serialize(elem, encoding="utf-8") '<tag>\xc3\xa5\xc3\xb6\xc3\xb6&lt;&gt;</tag>' >>> serialize(elem, encoding="us-ascii") '<tag>&#229;&#246;&#246;&lt;&gt;</tag>' >>> serialize(elem, encoding="iso-8859-1") "<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>\xe5\xf6\xf6&lt;&gt;</tag>" >>> elem.attrib["key"] = u'\xe5\xf6\xf6<>' >>> elem.text = None >>> serialize(elem) '<tag key="&#229;&#246;&#246;&lt;&gt;" />' >>> serialize(elem, encoding="utf-8") '<tag key="\xc3\xa5\xc3\xb6\xc3\xb6&lt;&gt;" />' >>> serialize(elem, encoding="us-ascii") '<tag key="&#229;&#246;&#246;&lt;&gt;" />' >>> serialize(elem, encoding="iso-8859-1") '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="\xe5\xf6\xf6&lt;&gt;" />' """ def methods(): r""" Test serialization methods. >>> e = ET.XML("<html><link/><script>1 &lt; 2</script></html>") >>> e.tail = "\n" >>> serialize(e) '<html><link /><script>1 &lt; 2</script></html>\n' >>> serialize(e, method=None) '<html><link /><script>1 &lt; 2</script></html>\n' >>> serialize(e, method="xml") '<html><link /><script>1 &lt; 2</script></html>\n' >>> serialize(e, method="html") '<html><link><script>1 < 2</script></html>\n' >>> serialize(e, method="text") '1 < 2\n' """ def iterators(): """ Test iterators. >>> e = ET.XML("<html><body>this is a <i>paragraph</i>.</body>..</html>") >>> summarize_list(e.iter()) ['html', 'body', 'i'] >>> summarize_list(e.find("body").iter()) ['body', 'i'] >>> summarize(next(e.iter())) 'html' >>> "".join(e.itertext()) 'this is a paragraph...' >>> "".join(e.find("body").itertext()) 'this is a paragraph.' >>> next(e.itertext()) 'this is a ' Method iterparse should return an iterator. See bug 6472. >>> sourcefile = serialize(e, to_string=False) >>> next(ET.iterparse(sourcefile)) # doctest: +ELLIPSIS ('end', <Element 'i' at 0x...>) >>> tree = ET.ElementTree(None) >>> tree.iter() Traceback (most recent call last): AttributeError: 'NoneType' object has no attribute 'iter' """ ENTITY_XML = """\ <!DOCTYPE points [ <!ENTITY % user-entities SYSTEM 'user-entities.xml'> %user-entities; ]> <document>&entity;</document> """ def entity(): """ Test entity handling. 1) good entities >>> e = ET.XML("<document title='&#x8230;'>test</document>") >>> serialize(e) '<document title="&#33328;">test</document>' 2) bad entities >>> ET.XML("<document>&entity;</document>") Traceback (most recent call last): ParseError: undefined entity: line 1, column 10 >>> ET.XML(ENTITY_XML) Traceback (most recent call last): ParseError: undefined entity &entity;: line 5, column 10 3) custom entity >>> parser = ET.XMLParser() >>> parser.entity["entity"] = "text" >>> parser.feed(ENTITY_XML) >>> root = parser.close() >>> serialize(root) '<document>text</document>' """ def error(xml): """ Test error handling. >>> issubclass(ET.ParseError, SyntaxError) True >>> error("foo").position (1, 0) >>> error("<tag>&foo;</tag>").position (1, 5) >>> error("foobar<").position (1, 6) """ try: ET.XML(xml) except ET.ParseError: return sys.exc_value def namespace(): """ Test namespace issues. 1) xml namespace >>> elem = ET.XML("<tag xml:lang='en' />") >>> serialize(elem) # 1.1 '<tag xml:lang="en" />' 2) other "well-known" namespaces >>> elem = ET.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />") >>> serialize(elem) # 2.1 '<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />' >>> elem = ET.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />") >>> serialize(elem) # 2.2 '<html:html xmlns:html="http://www.w3.org/1999/xhtml" />' >>> elem = ET.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />") >>> serialize(elem) # 2.3 '<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />' 3) unknown namespaces >>> elem = ET.XML(SAMPLE_XML_NS) >>> print serialize(elem) <ns0:body xmlns:ns0="http://effbot.org/ns"> <ns0:tag>text</ns0:tag> <ns0:tag /> <ns0:section> <ns0:tag>subtext</ns0:tag> </ns0:section> </ns0:body> """ def qname(): """ Test QName handling. 1) decorated tags >>> elem = ET.Element("{uri}tag") >>> serialize(elem) # 1.1 '<ns0:tag xmlns:ns0="uri" />' >>> elem = ET.Element(ET.QName("{uri}tag")) >>> serialize(elem) # 1.2 '<ns0:tag xmlns:ns0="uri" />' >>> elem = ET.Element(ET.QName("uri", "tag")) >>> serialize(elem) # 1.3 '<ns0:tag xmlns:ns0="uri" />' >>> elem = ET.Element(ET.QName("uri", "tag")) >>> subelem = ET.SubElement(elem, ET.QName("uri", "tag1")) >>> subelem = ET.SubElement(elem, ET.QName("uri", "tag2")) >>> serialize(elem) # 1.4 '<ns0:tag xmlns:ns0="uri"><ns0:tag1 /><ns0:tag2 /></ns0:tag>' 2) decorated attributes >>> elem.clear() >>> elem.attrib["{uri}key"] = "value" >>> serialize(elem) # 2.1 '<ns0:tag xmlns:ns0="uri" ns0:key="value" />' >>> elem.clear() >>> elem.attrib[ET.QName("{uri}key")] = "value" >>> serialize(elem) # 2.2 '<ns0:tag xmlns:ns0="uri" ns0:key="value" />' 3) decorated values are not converted by default, but the QName wrapper can be used for values >>> elem.clear() >>> elem.attrib["{uri}key"] = "{uri}value" >>> serialize(elem) # 3.1 '<ns0:tag xmlns:ns0="uri" ns0:key="{uri}value" />' >>> elem.clear() >>> elem.attrib["{uri}key"] = ET.QName("{uri}value") >>> serialize(elem) # 3.2 '<ns0:tag xmlns:ns0="uri" ns0:key="ns0:value" />' >>> elem.clear() >>> subelem = ET.Element("tag") >>> subelem.attrib["{uri1}key"] = ET.QName("{uri2}value") >>> elem.append(subelem) >>> elem.append(subelem) >>> serialize(elem) # 3.3 '<ns0:tag xmlns:ns0="uri" xmlns:ns1="uri1" xmlns:ns2="uri2"><tag ns1:key="ns2:value" /><tag ns1:key="ns2:value" /></ns0:tag>' 4) Direct QName tests >>> str(ET.QName('ns', 'tag')) '{ns}tag' >>> str(ET.QName('{ns}tag')) '{ns}tag' >>> q1 = ET.QName('ns', 'tag') >>> q2 = ET.QName('ns', 'tag') >>> q1 == q2 True >>> q2 = ET.QName('ns', 'other-tag') >>> q1 == q2 False >>> q1 == 'ns:tag' False >>> q1 == '{ns}tag' True """ def doctype_public(): """ Test PUBLIC doctype. >>> elem = ET.XML('<!DOCTYPE html PUBLIC' ... ' "-//W3C//DTD XHTML 1.0 Transitional//EN"' ... ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">' ... '<html>text</html>') """ def xpath_tokenizer(p): """ Test the XPath tokenizer. >>> # tests from the xml specification >>> xpath_tokenizer("*") ['*'] >>> xpath_tokenizer("text()") ['text', '()'] >>> xpath_tokenizer("@name") ['@', 'name'] >>> xpath_tokenizer("@*") ['@', '*'] >>> xpath_tokenizer("para[1]") ['para', '[', '1', ']'] >>> xpath_tokenizer("para[last()]") ['para', '[', 'last', '()', ']'] >>> xpath_tokenizer("*/para") ['*', '/', 'para'] >>> xpath_tokenizer("/doc/chapter[5]/section[2]") ['/', 'doc', '/', 'chapter', '[', '5', ']', '/', 'section', '[', '2', ']'] >>> xpath_tokenizer("chapter//para") ['chapter', '//', 'para'] >>> xpath_tokenizer("//para") ['//', 'para'] >>> xpath_tokenizer("//olist/item") ['//', 'olist', '/', 'item'] >>> xpath_tokenizer(".") ['.'] >>> xpath_tokenizer(".//para") ['.', '//', 'para'] >>> xpath_tokenizer("..") ['..'] >>> xpath_tokenizer("../@lang") ['..', '/', '@', 'lang'] >>> xpath_tokenizer("chapter[title]") ['chapter', '[', 'title', ']'] >>> xpath_tokenizer("employee[@secretary and @assistant]") ['employee', '[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']'] >>> # additional tests >>> xpath_tokenizer("{http://spam}egg") ['{http://spam}egg'] >>> xpath_tokenizer("./spam.egg") ['.', '/', 'spam.egg'] >>> xpath_tokenizer(".//{http://spam}egg") ['.', '//', '{http://spam}egg'] """ from xml.etree import ElementPath out = [] for op, tag in ElementPath.xpath_tokenizer(p): out.append(op or tag) return out def processinginstruction(): """ Test ProcessingInstruction directly >>> ET.tostring(ET.ProcessingInstruction('test', 'instruction')) '<?test instruction?>' >>> ET.tostring(ET.PI('test', 'instruction')) '<?test instruction?>' Issue #2746 >>> ET.tostring(ET.PI('test', '<testing&>')) '<?test <testing&>?>' >>> ET.tostring(ET.PI('test', u'<testing&>\xe3'), 'latin1') "<?xml version='1.0' encoding='latin1'?>\\n<?test <testing&>\\xe3?>" """ # # xinclude tests (samples from appendix C of the xinclude specification) XINCLUDE = {} XINCLUDE["C1.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>120 Mz is adequate for an average home user.</p> <xi:include href="disclaimer.xml"/> </document> """ XINCLUDE["disclaimer.xml"] = """\ <?xml version='1.0'?> <disclaimer> <p>The opinions represented herein represent those of the individual and should not be interpreted as official policy endorsed by this organization.</p> </disclaimer> """ XINCLUDE["C2.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>This document has been accessed <xi:include href="count.txt" parse="text"/> times.</p> </document> """ XINCLUDE["count.txt"] = "324387" XINCLUDE["C2b.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>This document has been <em>accessed</em> <xi:include href="count.txt" parse="text"/> times.</p> </document> """ XINCLUDE["C3.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>The following is the source of the "data.xml" resource:</p> <example><xi:include href="data.xml" parse="text"/></example> </document> """ XINCLUDE["data.xml"] = """\ <?xml version='1.0'?> <data> <item><![CDATA[Brooks & Shields]]></item> </data> """ XINCLUDE["C5.xml"] = """\ <?xml version='1.0'?> <div xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="example.txt" parse="text"> <xi:fallback> <xi:include href="fallback-example.txt" parse="text"> <xi:fallback><a href="mailto:bob@example.org">Report error</a></xi:fallback> </xi:include> </xi:fallback> </xi:include> </div> """ XINCLUDE["default.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>Example.</p> <xi:include href="{}"/> </document> """.format(cgi.escape(SIMPLE_XMLFILE, True)) def xinclude_loader(href, parse="xml", encoding=None): try: data = XINCLUDE[href] except KeyError: raise IOError("resource not found") if parse == "xml": from xml.etree.ElementTree import XML return XML(data) return data def xinclude(): r""" Basic inclusion example (XInclude C.1) >>> from xml.etree import ElementTree as ET >>> from xml.etree import ElementInclude >>> document = xinclude_loader("C1.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print serialize(document) # C1 <document> <p>120 Mz is adequate for an average home user.</p> <disclaimer> <p>The opinions represented herein represent those of the individual and should not be interpreted as official policy endorsed by this organization.</p> </disclaimer> </document> Textual inclusion example (XInclude C.2) >>> document = xinclude_loader("C2.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print serialize(document) # C2 <document> <p>This document has been accessed 324387 times.</p> </document> Textual inclusion after sibling element (based on modified XInclude C.2) >>> document = xinclude_loader("C2b.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print(serialize(document)) # C2b <document> <p>This document has been <em>accessed</em> 324387 times.</p> </document> Textual inclusion of XML example (XInclude C.3) >>> document = xinclude_loader("C3.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print serialize(document) # C3 <document> <p>The following is the source of the "data.xml" resource:</p> <example>&lt;?xml version='1.0'?&gt; &lt;data&gt; &lt;item&gt;&lt;![CDATA[Brooks &amp; Shields]]&gt;&lt;/item&gt; &lt;/data&gt; </example> </document> Fallback example (XInclude C.5) Note! Fallback support is not yet implemented >>> document = xinclude_loader("C5.xml") >>> ElementInclude.include(document, xinclude_loader) Traceback (most recent call last): IOError: resource not found >>> # print serialize(document) # C5 """ def xinclude_default(): """ >>> from xml.etree import ElementInclude >>> document = xinclude_loader("default.xml") >>> ElementInclude.include(document) >>> print serialize(document) # default <document> <p>Example.</p> <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> </document> """ # # badly formatted xi:include tags XINCLUDE_BAD = {} XINCLUDE_BAD["B1.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>120 Mz is adequate for an average home user.</p> <xi:include href="disclaimer.xml" parse="BAD_TYPE"/> </document> """ XINCLUDE_BAD["B2.xml"] = """\ <?xml version='1.0'?> <div xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:fallback></xi:fallback> </div> """ def xinclude_failures(): r""" Test failure to locate included XML file. >>> from xml.etree import ElementInclude >>> def none_loader(href, parser, encoding=None): ... return None >>> document = ET.XML(XINCLUDE["C1.xml"]) >>> ElementInclude.include(document, loader=none_loader) Traceback (most recent call last): FatalIncludeError: cannot load 'disclaimer.xml' as 'xml' Test failure to locate included text file. >>> document = ET.XML(XINCLUDE["C2.xml"]) >>> ElementInclude.include(document, loader=none_loader) Traceback (most recent call last): FatalIncludeError: cannot load 'count.txt' as 'text' Test bad parse type. >>> document = ET.XML(XINCLUDE_BAD["B1.xml"]) >>> ElementInclude.include(document, loader=none_loader) Traceback (most recent call last): FatalIncludeError: unknown parse type in xi:include tag ('BAD_TYPE') Test xi:fallback outside xi:include. >>> document = ET.XML(XINCLUDE_BAD["B2.xml"]) >>> ElementInclude.include(document, loader=none_loader) Traceback (most recent call last): FatalIncludeError: xi:fallback tag must be child of xi:include ('{http://www.w3.org/2001/XInclude}fallback') """ # -------------------------------------------------------------------- # reported bugs def bug_xmltoolkit21(): """ marshaller gives obscure errors for non-string values >>> elem = ET.Element(123) >>> serialize(elem) # tag Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ET.Element("elem") >>> elem.text = 123 >>> serialize(elem) # text Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ET.Element("elem") >>> elem.tail = 123 >>> serialize(elem) # tail Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ET.Element("elem") >>> elem.set(123, "123") >>> serialize(elem) # attribute key Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ET.Element("elem") >>> elem.set("123", 123) >>> serialize(elem) # attribute value Traceback (most recent call last): TypeError: cannot serialize 123 (type int) """ def bug_xmltoolkit25(): """ typo in ElementTree.findtext >>> elem = ET.XML(SAMPLE_XML) >>> tree = ET.ElementTree(elem) >>> tree.findtext("tag") 'text' >>> tree.findtext("section/tag") 'subtext' """ def bug_xmltoolkit28(): """ .//tag causes exceptions >>> tree = ET.XML("<doc><table><tbody/></table></doc>") >>> summarize_list(tree.findall(".//thead")) [] >>> summarize_list(tree.findall(".//tbody")) ['tbody'] """ def bug_xmltoolkitX1(): """ dump() doesn't flush the output buffer >>> tree = ET.XML("<doc><table><tbody/></table></doc>") >>> ET.dump(tree); sys.stdout.write("tail") <doc><table><tbody /></table></doc> tail """ def bug_xmltoolkit39(): """ non-ascii element and attribute names doesn't work >>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g />") >>> ET.tostring(tree, "utf-8") '<t\\xc3\\xa4g />' >>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><tag \xe4ttr='v&#228;lue' />") >>> tree.attrib {u'\\xe4ttr': u'v\\xe4lue'} >>> ET.tostring(tree, "utf-8") '<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />' >>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g>text</t\xe4g>") >>> ET.tostring(tree, "utf-8") '<t\\xc3\\xa4g>text</t\\xc3\\xa4g>' >>> tree = ET.Element(u"t\u00e4g") >>> ET.tostring(tree, "utf-8") '<t\\xc3\\xa4g />' >>> tree = ET.Element("tag") >>> tree.set(u"\u00e4ttr", u"v\u00e4lue") >>> ET.tostring(tree, "utf-8") '<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />' """ def bug_xmltoolkit54(): """ problems handling internally defined entities >>> e = ET.XML("<!DOCTYPE doc [<!ENTITY ldots '&#x8230;'>]><doc>&ldots;</doc>") >>> serialize(e) '<doc>&#33328;</doc>' """ def bug_xmltoolkit55(): """ make sure we're reporting the first error, not the last >>> e = ET.XML("<!DOCTYPE doc SYSTEM 'doc.dtd'><doc>&ldots;&ndots;&rdots;</doc>") Traceback (most recent call last): ParseError: undefined entity &ldots;: line 1, column 36 """ class ExceptionFile: def read(self, x): raise IOError def xmltoolkit60(): """ Handle crash in stream source. >>> tree = ET.parse(ExceptionFile()) Traceback (most recent call last): IOError """ XMLTOOLKIT62_DOC = """<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE patent-application-publication SYSTEM "pap-v15-2001-01-31.dtd" []> <patent-application-publication> <subdoc-abstract> <paragraph id="A-0001" lvl="0">A new cultivar of Begonia plant named &lsquo;BCT9801BEG&rsquo;.</paragraph> </subdoc-abstract> </patent-application-publication>""" def xmltoolkit62(): """ Don't crash when using custom entities. >>> xmltoolkit62() u'A new cultivar of Begonia plant named \u2018BCT9801BEG\u2019.' """ ENTITIES = {u'rsquo': u'\u2019', u'lsquo': u'\u2018'} parser = ET.XMLTreeBuilder() parser.entity.update(ENTITIES) parser.feed(XMLTOOLKIT62_DOC) t = parser.close() return t.find('.//paragraph').text def xmltoolkit63(): """ Check reference leak. >>> xmltoolkit63() >>> count = sys.getrefcount(None) >>> for i in range(1000): ... xmltoolkit63() >>> sys.getrefcount(None) - count 0 """ tree = ET.TreeBuilder() tree.start("tag", {}) tree.data("text") tree.end("tag") # -------------------------------------------------------------------- def bug_200708_newline(): r""" Preserve newlines in attributes. >>> e = ET.Element('SomeTag', text="def _f():\n return 3\n") >>> ET.tostring(e) '<SomeTag text="def _f():&#10; return 3&#10;" />' >>> ET.XML(ET.tostring(e)).get("text") 'def _f():\n return 3\n' >>> ET.tostring(ET.XML(ET.tostring(e))) '<SomeTag text="def _f():&#10; return 3&#10;" />' """ def bug_200708_close(): """ Test default builder. >>> parser = ET.XMLParser() # default >>> parser.feed("<element>some text</element>") >>> summarize(parser.close()) 'element' Test custom builder. >>> class EchoTarget: ... def close(self): ... return ET.Element("element") # simulate root >>> parser = ET.XMLParser(EchoTarget()) >>> parser.feed("<element>some text</element>") >>> summarize(parser.close()) 'element' """ def bug_200709_default_namespace(): """ >>> e = ET.Element("{default}elem") >>> s = ET.SubElement(e, "{default}elem") >>> serialize(e, default_namespace="default") # 1 '<elem xmlns="default"><elem /></elem>' >>> e = ET.Element("{default}elem") >>> s = ET.SubElement(e, "{default}elem") >>> s = ET.SubElement(e, "{not-default}elem") >>> serialize(e, default_namespace="default") # 2 '<elem xmlns="default" xmlns:ns1="not-default"><elem /><ns1:elem /></elem>' >>> e = ET.Element("{default}elem") >>> s = ET.SubElement(e, "{default}elem") >>> s = ET.SubElement(e, "elem") # unprefixed name >>> serialize(e, default_namespace="default") # 3 Traceback (most recent call last): ValueError: cannot use non-qualified names with default_namespace option """ def bug_200709_register_namespace(): """ >>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title")) '<ns0:title xmlns:ns0="http://namespace.invalid/does/not/exist/" />' >>> ET.register_namespace("foo", "http://namespace.invalid/does/not/exist/") >>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title")) '<foo:title xmlns:foo="http://namespace.invalid/does/not/exist/" />' And the Dublin Core namespace is in the default list: >>> ET.tostring(ET.Element("{http://purl.org/dc/elements/1.1/}title")) '<dc:title xmlns:dc="http://purl.org/dc/elements/1.1/" />' """ def bug_200709_element_comment(): """ Not sure if this can be fixed, really (since the serializer needs ET.Comment, not cET.comment). >>> a = ET.Element('a') >>> a.append(ET.Comment('foo')) >>> a[0].tag == ET.Comment True >>> a = ET.Element('a') >>> a.append(ET.PI('foo')) >>> a[0].tag == ET.PI True """ def bug_200709_element_insert(): """ >>> a = ET.Element('a') >>> b = ET.SubElement(a, 'b') >>> c = ET.SubElement(a, 'c') >>> d = ET.Element('d') >>> a.insert(0, d) >>> summarize_list(a) ['d', 'b', 'c'] >>> a.insert(-1, d) >>> summarize_list(a) ['d', 'b', 'd', 'c'] """ def bug_200709_iter_comment(): """ >>> a = ET.Element('a') >>> b = ET.SubElement(a, 'b') >>> comment_b = ET.Comment("TEST-b") >>> b.append(comment_b) >>> summarize_list(a.iter(ET.Comment)) ['<Comment>'] """ # -------------------------------------------------------------------- # reported on bugs.python.org def bug_1534630(): """ >>> bob = ET.TreeBuilder() >>> e = bob.data("data") >>> e = bob.start("tag", {}) >>> e = bob.end("tag") >>> e = bob.close() >>> serialize(e) '<tag />' """ def check_issue6233(): """ >>> e = ET.XML("<?xml version='1.0' encoding='utf-8'?><body>t\\xc3\\xa3g</body>") >>> ET.tostring(e, 'ascii') "<?xml version='1.0' encoding='ascii'?>\\n<body>t&#227;g</body>" >>> e = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><body>t\\xe3g</body>") >>> ET.tostring(e, 'ascii') "<?xml version='1.0' encoding='ascii'?>\\n<body>t&#227;g</body>" """ def check_issue3151(): """ >>> e = ET.XML('<prefix:localname xmlns:prefix="${stuff}"/>') >>> e.tag '{${stuff}}localname' >>> t = ET.ElementTree(e) >>> ET.tostring(e) '<ns0:localname xmlns:ns0="${stuff}" />' """ def check_issue6565(): """ >>> elem = ET.XML("<body><tag/></body>") >>> summarize_list(elem) ['tag'] >>> newelem = ET.XML(SAMPLE_XML) >>> elem[:] = newelem[:] >>> summarize_list(elem) ['tag', 'tag', 'section'] """ def check_html_empty_elems_serialization(self): # issue 15970 # from http://www.w3.org/TR/html401/index/elements.html """ >>> empty_elems = ['AREA', 'BASE', 'BASEFONT', 'BR', 'COL', 'FRAME', 'HR', ... 'IMG', 'INPUT', 'ISINDEX', 'LINK', 'META', 'PARAM'] >>> elems = ''.join('<%s />' % elem for elem in empty_elems) >>> serialize(ET.XML('<html>%s</html>' % elems), method='html') '<html><AREA><BASE><BASEFONT><BR><COL><FRAME><HR><IMG><INPUT><ISINDEX><LINK><META><PARAM></html>' >>> serialize(ET.XML('<html>%s</html>' % elems.lower()), method='html') '<html><area><base><basefont><br><col><frame><hr><img><input><isindex><link><meta><param></html>' >>> elems = ''.join('<%s></%s>' % (elem, elem) for elem in empty_elems) >>> serialize(ET.XML('<html>%s</html>' % elems), method='html') '<html><AREA><BASE><BASEFONT><BR><COL><FRAME><HR><IMG><INPUT><ISINDEX><LINK><META><PARAM></html>' >>> serialize(ET.XML('<html>%s</html>' % elems.lower()), method='html') '<html><area><base><basefont><br><col><frame><hr><img><input><isindex><link><meta><param></html>' """ # -------------------------------------------------------------------- class CleanContext(object): """Provide default namespace mapping and path cache.""" checkwarnings = None def __init__(self, quiet=False): if sys.flags.optimize >= 2: # under -OO, doctests cannot be run and therefore not all warnings # will be emitted quiet = True deprecations = ( # Search behaviour is broken if search path starts with "/". ("This search is broken in 1.3 and earlier, and will be fixed " "in a future version. If you rely on the current behaviour, " "change it to '.+'", FutureWarning), # Element.getchildren() and Element.getiterator() are deprecated. ("This method will be removed in future versions. " "Use .+ instead.", DeprecationWarning), ("This method will be removed in future versions. " "Use .+ instead.", PendingDeprecationWarning), # XMLParser.doctype() is deprecated. ("This method of XMLParser is deprecated. Define doctype.. " "method on the TreeBuilder target.", DeprecationWarning)) self.checkwarnings = test_support.check_warnings(*deprecations, quiet=quiet) def __enter__(self): from xml.etree import ElementTree self._nsmap = ElementTree._namespace_map self._path_cache = ElementTree.ElementPath._cache # Copy the default namespace mapping ElementTree._namespace_map = self._nsmap.copy() # Copy the path cache (should be empty) ElementTree.ElementPath._cache = self._path_cache.copy() self.checkwarnings.__enter__() def __exit__(self, *args): from xml.etree import ElementTree # Restore mapping and path cache ElementTree._namespace_map = self._nsmap ElementTree.ElementPath._cache = self._path_cache self.checkwarnings.__exit__(*args) def test_main(module_name='xml.etree.ElementTree'): from test import test_xml_etree use_py_module = (module_name == 'xml.etree.ElementTree') # The same doctests are used for both the Python and the C implementations assert test_xml_etree.ET.__name__ == module_name # XXX the C module should give the same warnings as the Python module with CleanContext(quiet=not use_py_module): test_support.run_doctest(test_xml_etree, verbosity=True) # The module should not be changed by the tests assert test_xml_etree.ET.__name__ == module_name if __name__ == '__main__': test_main()
DrMeers/django
refs/heads/master
tests/file_uploads/views.py
21
from __future__ import unicode_literals import hashlib import json import os from django.core.files.uploadedfile import UploadedFile from django.http import HttpResponse, HttpResponseServerError from django.utils import six from django.utils.encoding import force_bytes, smart_str from .models import FileModel from .tests import UNICODE_FILENAME, UPLOAD_TO from .uploadhandler import QuotaUploadHandler, ErroringUploadHandler def file_upload_view(request): """ Check that a file upload can be updated into the POST dictionary without going pear-shaped. """ form_data = request.POST.copy() form_data.update(request.FILES) if isinstance(form_data.get('file_field'), UploadedFile) and isinstance(form_data['name'], six.text_type): # If a file is posted, the dummy client should only post the file name, # not the full path. if os.path.dirname(form_data['file_field'].name) != '': return HttpResponseServerError() return HttpResponse('') else: return HttpResponseServerError() def file_upload_view_verify(request): """ Use the sha digest hash to verify the uploaded contents. """ form_data = request.POST.copy() form_data.update(request.FILES) for key, value in form_data.items(): if key.endswith('_hash'): continue if key + '_hash' not in form_data: continue submitted_hash = form_data[key + '_hash'] if isinstance(value, UploadedFile): new_hash = hashlib.sha1(value.read()).hexdigest() else: new_hash = hashlib.sha1(force_bytes(value)).hexdigest() if new_hash != submitted_hash: return HttpResponseServerError() # Adding large file to the database should succeed largefile = request.FILES['file_field2'] obj = FileModel() obj.testfile.save(largefile.name, largefile) return HttpResponse('') def file_upload_unicode_name(request): # Check to see if unicode name came through properly. if not request.FILES['file_unicode'].name.endswith(UNICODE_FILENAME): return HttpResponseServerError() response = None # Check to make sure the exotic characters are preserved even # through file save. uni_named_file = request.FILES['file_unicode'] obj = FileModel.objects.create(testfile=uni_named_file) full_name = '%s/%s' % (UPLOAD_TO, uni_named_file.name) if not os.path.exists(full_name): response = HttpResponseServerError() # Cleanup the object with its exotic file name immediately. # (shutil.rmtree used elsewhere in the tests to clean up the # upload directory has been seen to choke on unicode # filenames on Windows.) obj.delete() os.unlink(full_name) if response: return response else: return HttpResponse('') def file_upload_echo(request): """ Simple view to echo back info about uploaded files for tests. """ r = dict((k, f.name) for k, f in request.FILES.items()) return HttpResponse(json.dumps(r)) def file_upload_echo_content(request): """ Simple view to echo back the content of uploaded files for tests. """ r = dict((k, f.read().decode('utf-8')) for k, f in request.FILES.items()) return HttpResponse(json.dumps(r)) def file_upload_quota(request): """ Dynamically add in an upload handler. """ request.upload_handlers.insert(0, QuotaUploadHandler()) return file_upload_echo(request) def file_upload_quota_broken(request): """ You can't change handlers after reading FILES; this view shouldn't work. """ response = file_upload_echo(request) request.upload_handlers.insert(0, QuotaUploadHandler()) return response def file_upload_getlist_count(request): """ Check the .getlist() function to ensure we receive the correct number of files. """ file_counts = {} for key in request.FILES.keys(): file_counts[key] = len(request.FILES.getlist(key)) return HttpResponse(json.dumps(file_counts)) def file_upload_errors(request): request.upload_handlers.insert(0, ErroringUploadHandler()) return file_upload_echo(request) def file_upload_filename_case_view(request): """ Check adding the file to the database will preserve the filename case. """ file = request.FILES['file_field'] obj = FileModel() obj.testfile.save(file.name, file) return HttpResponse('%d' % obj.pk) def file_upload_content_type_extra(request): """ Simple view to echo back extra content-type parameters. """ params = {} for file_name, uploadedfile in request.FILES.items(): params[file_name] = dict([ (k, smart_str(v)) for k, v in uploadedfile.content_type_extra.items() ]) return HttpResponse(json.dumps(params))
rlr/fjord
refs/heads/master
vendor/packages/Babel/babel/core.py
79
# -*- coding: utf-8 -*- """ babel.core ~~~~~~~~~~ Core locale representation and locale data access. :copyright: (c) 2013 by the Babel Team. :license: BSD, see LICENSE for more details. """ import os from babel import localedata from babel._compat import pickle, string_types __all__ = ['UnknownLocaleError', 'Locale', 'default_locale', 'negotiate_locale', 'parse_locale'] _global_data = None def _raise_no_data_error(): raise RuntimeError('The babel data files are not available. ' 'This usually happens because you are using ' 'a source checkout from Babel and you did ' 'not build the data files. Just make sure ' 'to run "python setup.py import_cldr" before ' 'installing the library.') def get_global(key): """Return the dictionary for the given key in the global data. The global data is stored in the ``babel/global.dat`` file and contains information independent of individual locales. >>> get_global('zone_aliases')['UTC'] u'Etc/GMT' >>> get_global('zone_territories')['Europe/Berlin'] u'DE' .. versionadded:: 0.9 :param key: the data key """ global _global_data if _global_data is None: dirname = os.path.join(os.path.dirname(__file__)) filename = os.path.join(dirname, 'global.dat') if not os.path.isfile(filename): _raise_no_data_error() fileobj = open(filename, 'rb') try: _global_data = pickle.load(fileobj) finally: fileobj.close() return _global_data.get(key, {}) LOCALE_ALIASES = { 'ar': 'ar_SY', 'bg': 'bg_BG', 'bs': 'bs_BA', 'ca': 'ca_ES', 'cs': 'cs_CZ', 'da': 'da_DK', 'de': 'de_DE', 'el': 'el_GR', 'en': 'en_US', 'es': 'es_ES', 'et': 'et_EE', 'fa': 'fa_IR', 'fi': 'fi_FI', 'fr': 'fr_FR', 'gl': 'gl_ES', 'he': 'he_IL', 'hu': 'hu_HU', 'id': 'id_ID', 'is': 'is_IS', 'it': 'it_IT', 'ja': 'ja_JP', 'km': 'km_KH', 'ko': 'ko_KR', 'lt': 'lt_LT', 'lv': 'lv_LV', 'mk': 'mk_MK', 'nl': 'nl_NL', 'nn': 'nn_NO', 'no': 'nb_NO', 'pl': 'pl_PL', 'pt': 'pt_PT', 'ro': 'ro_RO', 'ru': 'ru_RU', 'sk': 'sk_SK', 'sl': 'sl_SI', 'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA' } class UnknownLocaleError(Exception): """Exception thrown when a locale is requested for which no locale data is available. """ def __init__(self, identifier): """Create the exception. :param identifier: the identifier string of the unsupported locale """ Exception.__init__(self, 'unknown locale %r' % identifier) #: The identifier of the locale that could not be found. self.identifier = identifier class Locale(object): """Representation of a specific locale. >>> locale = Locale('en', 'US') >>> repr(locale) "Locale('en', territory='US')" >>> locale.display_name u'English (United States)' A `Locale` object can also be instantiated from a raw locale string: >>> locale = Locale.parse('en-US', sep='-') >>> repr(locale) "Locale('en', territory='US')" `Locale` objects provide access to a collection of locale data, such as territory and language names, number and date format patterns, and more: >>> locale.number_symbols['decimal'] u'.' If a locale is requested for which no locale data is available, an `UnknownLocaleError` is raised: >>> Locale.parse('en_DE') Traceback (most recent call last): ... UnknownLocaleError: unknown locale 'en_DE' For more information see :rfc:`3066`. """ def __init__(self, language, territory=None, script=None, variant=None): """Initialize the locale object from the given identifier components. >>> locale = Locale('en', 'US') >>> locale.language 'en' >>> locale.territory 'US' :param language: the language code :param territory: the territory (country or region) code :param script: the script code :param variant: the variant code :raise `UnknownLocaleError`: if no locale data is available for the requested locale """ #: the language code self.language = language #: the territory (country or region) code self.territory = territory #: the script code self.script = script #: the variant code self.variant = variant self.__data = None identifier = str(self) if not localedata.exists(identifier): raise UnknownLocaleError(identifier) @classmethod def default(cls, category=None, aliases=LOCALE_ALIASES): """Return the system default locale for the specified category. >>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LC_MESSAGES']: ... os.environ[name] = '' >>> os.environ['LANG'] = 'fr_FR.UTF-8' >>> Locale.default('LC_MESSAGES') Locale('fr', territory='FR') The following fallbacks to the variable are always considered: - ``LANGUAGE`` - ``LC_ALL`` - ``LC_CTYPE`` - ``LANG`` :param category: one of the ``LC_XXX`` environment variable names :param aliases: a dictionary of aliases for locale identifiers """ # XXX: use likely subtag expansion here instead of the # aliases dictionary. locale_string = default_locale(category, aliases=aliases) return cls.parse(locale_string) @classmethod def negotiate(cls, preferred, available, sep='_', aliases=LOCALE_ALIASES): """Find the best match between available and requested locale strings. >>> Locale.negotiate(['de_DE', 'en_US'], ['de_DE', 'de_AT']) Locale('de', territory='DE') >>> Locale.negotiate(['de_DE', 'en_US'], ['en', 'de']) Locale('de') >>> Locale.negotiate(['de_DE', 'de'], ['en_US']) You can specify the character used in the locale identifiers to separate the differnet components. This separator is applied to both lists. Also, case is ignored in the comparison: >>> Locale.negotiate(['de-DE', 'de'], ['en-us', 'de-de'], sep='-') Locale('de', territory='DE') :param preferred: the list of locale identifers preferred by the user :param available: the list of locale identifiers available :param aliases: a dictionary of aliases for locale identifiers """ identifier = negotiate_locale(preferred, available, sep=sep, aliases=aliases) if identifier: return Locale.parse(identifier, sep=sep) @classmethod def parse(cls, identifier, sep='_', resolve_likely_subtags=True): """Create a `Locale` instance for the given locale identifier. >>> l = Locale.parse('de-DE', sep='-') >>> l.display_name u'Deutsch (Deutschland)' If the `identifier` parameter is not a string, but actually a `Locale` object, that object is returned: >>> Locale.parse(l) Locale('de', territory='DE') This also can perform resolving of likely subtags which it does by default. This is for instance useful to figure out the most likely locale for a territory you can use ``'und'`` as the language tag: >>> Locale.parse('und_AT') Locale('de', territory='AT') :param identifier: the locale identifier string :param sep: optional component separator :param resolve_likely_subtags: if this is specified then a locale will have its likely subtag resolved if the locale otherwise does not exist. For instance ``zh_TW`` by itself is not a locale that exists but Babel can automatically expand it to the full form of ``zh_hant_TW``. Note that this expansion is only taking place if no locale exists otherwise. For instance there is a locale ``en`` that can exist by itself. :raise `ValueError`: if the string does not appear to be a valid locale identifier :raise `UnknownLocaleError`: if no locale data is available for the requested locale """ if identifier is None: return None elif isinstance(identifier, Locale): return identifier elif not isinstance(identifier, string_types): raise TypeError('Unxpected value for identifier: %r' % (identifier,)) parts = parse_locale(identifier, sep=sep) input_id = get_locale_identifier(parts) def _try_load(parts): try: return cls(*parts) except UnknownLocaleError: return None def _try_load_reducing(parts): # Success on first hit, return it. locale = _try_load(parts) if locale is not None: return locale # Now try without script and variant locale = _try_load(parts[:2]) if locale is not None: return locale locale = _try_load(parts) if locale is not None: return locale if not resolve_likely_subtags: raise UnknownLocaleError(input_id) # From here onwards is some very bad likely subtag resolving. This # whole logic is not entirely correct but good enough (tm) for the # time being. This has been added so that zh_TW does not cause # errors for people when they upgrade. Later we should properly # implement ICU like fuzzy locale objects and provide a way to # maximize and minimize locale tags. language, territory, script, variant = parts language = get_global('language_aliases').get(language, language) territory = get_global('territory_aliases').get(territory, territory) script = get_global('script_aliases').get(script, script) variant = get_global('variant_aliases').get(variant, variant) if territory == 'ZZ': territory = None if script == 'Zzzz': script = None parts = language, territory, script, variant # First match: try the whole identifier new_id = get_locale_identifier(parts) likely_subtag = get_global('likely_subtags').get(new_id) if likely_subtag is not None: locale = _try_load_reducing(parse_locale(likely_subtag)) if locale is not None: return locale # If we did not find anything so far, try again with a # simplified identifier that is just the language likely_subtag = get_global('likely_subtags').get(language) if likely_subtag is not None: language2, _, script2, variant2 = parse_locale(likely_subtag) locale = _try_load_reducing((language2, territory, script2, variant2)) if locale is not None: return locale raise UnknownLocaleError(input_id) def __eq__(self, other): for key in ('language', 'territory', 'script', 'variant'): if not hasattr(other, key): return False return (self.language == other.language) and \ (self.territory == other.territory) and \ (self.script == other.script) and \ (self.variant == other.variant) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): parameters = [''] for key in ('territory', 'script', 'variant'): value = getattr(self, key) if value is not None: parameters.append('%s=%r' % (key, value)) parameter_string = '%r' % self.language + ', '.join(parameters) return 'Locale(%s)' % parameter_string def __str__(self): return get_locale_identifier((self.language, self.territory, self.script, self.variant)) @property def _data(self): if self.__data is None: self.__data = localedata.LocaleDataDict(localedata.load(str(self))) return self.__data def get_display_name(self, locale=None): """Return the display name of the locale using the given locale. The display name will include the language, territory, script, and variant, if those are specified. >>> Locale('zh', 'CN', script='Hans').get_display_name('en') u'Chinese (Simplified, China)' :param locale: the locale to use """ if locale is None: locale = self locale = Locale.parse(locale) retval = locale.languages.get(self.language) if self.territory or self.script or self.variant: details = [] if self.script: details.append(locale.scripts.get(self.script)) if self.territory: details.append(locale.territories.get(self.territory)) if self.variant: details.append(locale.variants.get(self.variant)) details = filter(None, details) if details: retval += ' (%s)' % u', '.join(details) return retval display_name = property(get_display_name, doc="""\ The localized display name of the locale. >>> Locale('en').display_name u'English' >>> Locale('en', 'US').display_name u'English (United States)' >>> Locale('sv').display_name u'svenska' :type: `unicode` """) def get_language_name(self, locale=None): """Return the language of this locale in the given locale. >>> Locale('zh', 'CN', script='Hans').get_language_name('de') u'Chinesisch' .. versionadded:: 1.0 :param locale: the locale to use """ if locale is None: locale = self locale = Locale.parse(locale) return locale.languages.get(self.language) language_name = property(get_language_name, doc="""\ The localized language name of the locale. >>> Locale('en', 'US').language_name u'English' """) def get_territory_name(self, locale=None): """Return the territory name in the given locale.""" if locale is None: locale = self locale = Locale.parse(locale) return locale.territories.get(self.territory) territory_name = property(get_territory_name, doc="""\ The localized territory name of the locale if available. >>> Locale('de', 'DE').territory_name u'Deutschland' """) def get_script_name(self, locale=None): """Return the script name in the given locale.""" if locale is None: locale = self locale = Locale.parse(locale) return locale.scripts.get(self.script) script_name = property(get_script_name, doc="""\ The localized script name of the locale if available. >>> Locale('ms', 'SG', script='Latn').script_name u'Latin' """) @property def english_name(self): """The english display name of the locale. >>> Locale('de').english_name u'German' >>> Locale('de', 'DE').english_name u'German (Germany)' :type: `unicode`""" return self.get_display_name(Locale('en')) #{ General Locale Display Names @property def languages(self): """Mapping of language codes to translated language names. >>> Locale('de', 'DE').languages['ja'] u'Japanisch' See `ISO 639 <http://www.loc.gov/standards/iso639-2/>`_ for more information. """ return self._data['languages'] @property def scripts(self): """Mapping of script codes to translated script names. >>> Locale('en', 'US').scripts['Hira'] u'Hiragana' See `ISO 15924 <http://www.evertype.com/standards/iso15924/>`_ for more information. """ return self._data['scripts'] @property def territories(self): """Mapping of script codes to translated script names. >>> Locale('es', 'CO').territories['DE'] u'Alemania' See `ISO 3166 <http://www.iso.org/iso/en/prods-services/iso3166ma/>`_ for more information. """ return self._data['territories'] @property def variants(self): """Mapping of script codes to translated script names. >>> Locale('de', 'DE').variants['1901'] u'Alte deutsche Rechtschreibung' """ return self._data['variants'] #{ Number Formatting @property def currencies(self): """Mapping of currency codes to translated currency names. This only returns the generic form of the currency name, not the count specific one. If an actual number is requested use the :func:`babel.numbers.get_currency_name` function. >>> Locale('en').currencies['COP'] u'Colombian Peso' >>> Locale('de', 'DE').currencies['COP'] u'Kolumbianischer Peso' """ return self._data['currency_names'] @property def currency_symbols(self): """Mapping of currency codes to symbols. >>> Locale('en', 'US').currency_symbols['USD'] u'$' >>> Locale('es', 'CO').currency_symbols['USD'] u'US$' """ return self._data['currency_symbols'] @property def number_symbols(self): """Symbols used in number formatting. >>> Locale('fr', 'FR').number_symbols['decimal'] u',' """ return self._data['number_symbols'] @property def decimal_formats(self): """Locale patterns for decimal number formatting. >>> Locale('en', 'US').decimal_formats[None] <NumberPattern u'#,##0.###'> """ return self._data['decimal_formats'] @property def currency_formats(self): """Locale patterns for currency number formatting. >>> print Locale('en', 'US').currency_formats[None] <NumberPattern u'\\xa4#,##0.00'> """ return self._data['currency_formats'] @property def percent_formats(self): """Locale patterns for percent number formatting. >>> Locale('en', 'US').percent_formats[None] <NumberPattern u'#,##0%'> """ return self._data['percent_formats'] @property def scientific_formats(self): """Locale patterns for scientific number formatting. >>> Locale('en', 'US').scientific_formats[None] <NumberPattern u'#E0'> """ return self._data['scientific_formats'] #{ Calendar Information and Date Formatting @property def periods(self): """Locale display names for day periods (AM/PM). >>> Locale('en', 'US').periods['am'] u'AM' """ return self._data['periods'] @property def days(self): """Locale display names for weekdays. >>> Locale('de', 'DE').days['format']['wide'][3] u'Donnerstag' """ return self._data['days'] @property def months(self): """Locale display names for months. >>> Locale('de', 'DE').months['format']['wide'][10] u'Oktober' """ return self._data['months'] @property def quarters(self): """Locale display names for quarters. >>> Locale('de', 'DE').quarters['format']['wide'][1] u'1. Quartal' """ return self._data['quarters'] @property def eras(self): """Locale display names for eras. >>> Locale('en', 'US').eras['wide'][1] u'Anno Domini' >>> Locale('en', 'US').eras['abbreviated'][0] u'BC' """ return self._data['eras'] @property def time_zones(self): """Locale display names for time zones. >>> Locale('en', 'US').time_zones['Europe/London']['long']['daylight'] u'British Summer Time' >>> Locale('en', 'US').time_zones['America/St_Johns']['city'] u'St. John\u2019s' """ return self._data['time_zones'] @property def meta_zones(self): """Locale display names for meta time zones. Meta time zones are basically groups of different Olson time zones that have the same GMT offset and daylight savings time. >>> Locale('en', 'US').meta_zones['Europe_Central']['long']['daylight'] u'Central European Summer Time' .. versionadded:: 0.9 """ return self._data['meta_zones'] @property def zone_formats(self): """Patterns related to the formatting of time zones. >>> Locale('en', 'US').zone_formats['fallback'] u'%(1)s (%(0)s)' >>> Locale('pt', 'BR').zone_formats['region'] u'Hor\\xe1rio %s' .. versionadded:: 0.9 """ return self._data['zone_formats'] @property def first_week_day(self): """The first day of a week, with 0 being Monday. >>> Locale('de', 'DE').first_week_day 0 >>> Locale('en', 'US').first_week_day 6 """ return self._data['week_data']['first_day'] @property def weekend_start(self): """The day the weekend starts, with 0 being Monday. >>> Locale('de', 'DE').weekend_start 5 """ return self._data['week_data']['weekend_start'] @property def weekend_end(self): """The day the weekend ends, with 0 being Monday. >>> Locale('de', 'DE').weekend_end 6 """ return self._data['week_data']['weekend_end'] @property def min_week_days(self): """The minimum number of days in a week so that the week is counted as the first week of a year or month. >>> Locale('de', 'DE').min_week_days 4 """ return self._data['week_data']['min_days'] @property def date_formats(self): """Locale patterns for date formatting. >>> Locale('en', 'US').date_formats['short'] <DateTimePattern u'M/d/yy'> >>> Locale('fr', 'FR').date_formats['long'] <DateTimePattern u'd MMMM y'> """ return self._data['date_formats'] @property def time_formats(self): """Locale patterns for time formatting. >>> Locale('en', 'US').time_formats['short'] <DateTimePattern u'h:mm a'> >>> Locale('fr', 'FR').time_formats['long'] <DateTimePattern u'HH:mm:ss z'> """ return self._data['time_formats'] @property def datetime_formats(self): """Locale patterns for datetime formatting. >>> Locale('en').datetime_formats['full'] u"{1} 'at' {0}" >>> Locale('th').datetime_formats['medium'] u'{1}, {0}' """ return self._data['datetime_formats'] @property def plural_form(self): """Plural rules for the locale. >>> Locale('en').plural_form(1) 'one' >>> Locale('en').plural_form(0) 'other' >>> Locale('fr').plural_form(0) 'one' >>> Locale('ru').plural_form(100) 'many' """ return self._data['plural_form'] def default_locale(category=None, aliases=LOCALE_ALIASES): """Returns the system default locale for a given category, based on environment variables. >>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE']: ... os.environ[name] = '' >>> os.environ['LANG'] = 'fr_FR.UTF-8' >>> default_locale('LC_MESSAGES') 'fr_FR' The "C" or "POSIX" pseudo-locales are treated as aliases for the "en_US_POSIX" locale: >>> os.environ['LC_MESSAGES'] = 'POSIX' >>> default_locale('LC_MESSAGES') 'en_US_POSIX' The following fallbacks to the variable are always considered: - ``LANGUAGE`` - ``LC_ALL`` - ``LC_CTYPE`` - ``LANG`` :param category: one of the ``LC_XXX`` environment variable names :param aliases: a dictionary of aliases for locale identifiers """ varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG') for name in filter(None, varnames): locale = os.getenv(name) if locale: if name == 'LANGUAGE' and ':' in locale: # the LANGUAGE variable may contain a colon-separated list of # language codes; we just pick the language on the list locale = locale.split(':')[0] if locale in ('C', 'POSIX'): locale = 'en_US_POSIX' elif aliases and locale in aliases: locale = aliases[locale] try: return get_locale_identifier(parse_locale(locale)) except ValueError: pass def negotiate_locale(preferred, available, sep='_', aliases=LOCALE_ALIASES): """Find the best match between available and requested locale strings. >>> negotiate_locale(['de_DE', 'en_US'], ['de_DE', 'de_AT']) 'de_DE' >>> negotiate_locale(['de_DE', 'en_US'], ['en', 'de']) 'de' Case is ignored by the algorithm, the result uses the case of the preferred locale identifier: >>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at']) 'de_DE' >>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at']) 'de_DE' By default, some web browsers unfortunately do not include the territory in the locale identifier for many locales, and some don't even allow the user to easily add the territory. So while you may prefer using qualified locale identifiers in your web-application, they would not normally match the language-only locale sent by such browsers. To workaround that, this function uses a default mapping of commonly used langauge-only locale identifiers to identifiers including the territory: >>> negotiate_locale(['ja', 'en_US'], ['ja_JP', 'en_US']) 'ja_JP' Some browsers even use an incorrect or outdated language code, such as "no" for Norwegian, where the correct locale identifier would actually be "nb_NO" (Bokmål) or "nn_NO" (Nynorsk). The aliases are intended to take care of such cases, too: >>> negotiate_locale(['no', 'sv'], ['nb_NO', 'sv_SE']) 'nb_NO' You can override this default mapping by passing a different `aliases` dictionary to this function, or you can bypass the behavior althogher by setting the `aliases` parameter to `None`. :param preferred: the list of locale strings preferred by the user :param available: the list of locale strings available :param sep: character that separates the different parts of the locale strings :param aliases: a dictionary of aliases for locale identifiers """ available = [a.lower() for a in available if a] for locale in preferred: ll = locale.lower() if ll in available: return locale if aliases: alias = aliases.get(ll) if alias: alias = alias.replace('_', sep) if alias.lower() in available: return alias parts = locale.split(sep) if len(parts) > 1 and parts[0].lower() in available: return parts[0] return None def parse_locale(identifier, sep='_'): """Parse a locale identifier into a tuple of the form ``(language, territory, script, variant)``. >>> parse_locale('zh_CN') ('zh', 'CN', None, None) >>> parse_locale('zh_Hans_CN') ('zh', 'CN', 'Hans', None) The default component separator is "_", but a different separator can be specified using the `sep` parameter: >>> parse_locale('zh-CN', sep='-') ('zh', 'CN', None, None) If the identifier cannot be parsed into a locale, a `ValueError` exception is raised: >>> parse_locale('not_a_LOCALE_String') Traceback (most recent call last): ... ValueError: 'not_a_LOCALE_String' is not a valid locale identifier Encoding information and locale modifiers are removed from the identifier: >>> parse_locale('it_IT@euro') ('it', 'IT', None, None) >>> parse_locale('en_US.UTF-8') ('en', 'US', None, None) >>> parse_locale('de_DE.iso885915@euro') ('de', 'DE', None, None) See :rfc:`4646` for more information. :param identifier: the locale identifier string :param sep: character that separates the different components of the locale identifier :raise `ValueError`: if the string does not appear to be a valid locale identifier """ if '.' in identifier: # this is probably the charset/encoding, which we don't care about identifier = identifier.split('.', 1)[0] if '@' in identifier: # this is a locale modifier such as @euro, which we don't care about # either identifier = identifier.split('@', 1)[0] parts = identifier.split(sep) lang = parts.pop(0).lower() if not lang.isalpha(): raise ValueError('expected only letters, got %r' % lang) script = territory = variant = None if parts: if len(parts[0]) == 4 and parts[0].isalpha(): script = parts.pop(0).title() if parts: if len(parts[0]) == 2 and parts[0].isalpha(): territory = parts.pop(0).upper() elif len(parts[0]) == 3 and parts[0].isdigit(): territory = parts.pop(0) if parts: if len(parts[0]) == 4 and parts[0][0].isdigit() or \ len(parts[0]) >= 5 and parts[0][0].isalpha(): variant = parts.pop() if parts: raise ValueError('%r is not a valid locale identifier' % identifier) return lang, territory, script, variant def get_locale_identifier(tup, sep='_'): """The reverse of :func:`parse_locale`. It creates a locale identifier out of a ``(language, territory, script, variant)`` tuple. Items can be set to ``None`` and trailing ``None``\s can also be left out of the tuple. >>> get_locale_identifier(('de', 'DE', None, '1999')) 'de_DE_1999' .. versionadded:: 1.0 :param tup: the tuple as returned by :func:`parse_locale`. :param sep: the separator for the identifier. """ tup = tuple(tup[:4]) lang, territory, script, variant = tup + (None,) * (4 - len(tup)) return sep.join(filter(None, (lang, script, territory, variant)))
amitu/rtime
refs/heads/master
src/pyrtime/tests/test_frame.py
1
from __future__ import absolute_import, unicode_literals import pytest from rtime.datastructures import Frame from .utils import assert_similar_structure def test_init(): assert_similar_structure(Frame(), {}) assert_similar_structure(Frame(name='name'), {'name': 'name'}) assert_similar_structure(Frame(name='name', value=1.0), {'name': 'name', 'value': 1.0}) def test_name(): frame = Frame() frame.name = 'main_frame' assert 'main_frame' == frame.name assert_similar_structure(frame, {'name': 'main_frame'}) with pytest.raises(ValueError): frame.name = '' def test_stack(): frame = Frame(name='name') assert_similar_structure(frame.stack, []) assert_similar_structure(frame, {'name': 'name', 'stack': []}) def test_push_frame(): frame = Frame(name='main_frame') frame.push_frame(name='new_frame') assert_similar_structure(frame, {'name': 'main_frame', 'stack': [{'name': 'new_frame'}]}) def test_get_current_frame(): frame = Frame() assert_similar_structure(frame.get_current_frame(), {}) frame.push_frame(name='new_frame') assert_similar_structure(frame.get_current_frame(), {'name': 'new_frame'}) def test_add_frame_data(): frame = Frame() frame.push_frame(name='new_frame') frame.add_frame_data(value=1.0) assert_similar_structure(frame, {'stack': [{'name': 'new_frame'}], 'value': 1.0})
egbertbouman/tribler-g
refs/heads/master
Tribler/Core/NATFirewall/NatCheck.py
1
# Written by Lucia D'Acunto # see LICENSE.txt for license information import socket import sys DEBUG = False def Test1(udpsock, serveraddr): """ The client sends a request to a server asking it to send the response back to the address and port the request came from """ retVal = {"resp":False, "ex_ip":None, "ex_port":None} BUFSIZ = 1024 reply = "" request = "ping1" udpsock.sendto(request, serveraddr) try: reply, rcvaddr = udpsock.recvfrom(BUFSIZ) except socket.timeout: if DEBUG: print >> sys.stderr, "NATCheck:", "Connection attempt to %s timed out" % (serveraddr,) return retVal except ValueError, (strerror): if DEBUG: print >> sys.stderr, "NATCheck:", "Could not receive data: %s" % (strerror) return retVal except socket.error, (errno, strerror): if DEBUG: print >> sys.stderr, "NATCheck:", "Could not receive data: %s" % (strerror) return retVal ex_ip, ex_port = reply.split(":") retVal["resp"] = True retVal["ex_ip"] = ex_ip retVal["ex_port"] = ex_port return retVal def Test2(udpsock, serveraddr): """ The client sends a request asking to receive an echo from a different address and a different port on the address and port the request came from """ retVal = {"resp":False} BUFSIZ = 1024 request = "ping2" udpsock.sendto(request, serveraddr) try: reply, rcvaddr = udpsock.recvfrom(BUFSIZ) except socket.timeout: #if DEBUG: print >> sys.stderr, "NATCheck:", "Connection attempt to %s timed out" % (serveraddr,) return retVal except ValueError, (strerror): if DEBUG: print >> sys.stderr, "NATCheck:", "Could not receive data: %s" % (strerror) return retVal except socket.error, (errno, strerror): if DEBUG: print >> sys.stderr, "NATCheck:", "Could not receive data: %s" % (strerror) return retVal retVal["resp"] = True return retVal def Test3(udpsock, serveraddr): """ The client sends a request asking to receive an echo from the same address but from a different port on the address and port the request came from """ retVal = {"resp":False, "ex_ip":None, "ex_port":None} BUFSIZ = 1024 reply = "" request = "ping3" udpsock.sendto(request, serveraddr) try: reply, rcvaddr = udpsock.recvfrom(BUFSIZ) except socket.timeout: #if DEBUG: print >> sys.stderr, "NATCheck:", "Connection attempt to %s timed out" % (serveraddr,) return retVal except ValueError, (strerror): if DEBUG: print >> sys.stderr, "NATCheck:", "Could not receive data: %s" % (strerror) return retVal except socket.error, (errno, strerror): if DEBUG: print >> sys.stderr, "NATCheck:", "Could not receive data: %s" % (strerror) return retVal ex_ip, ex_port = reply.split(":") retVal["resp"] = True retVal["ex_ip"] = ex_ip retVal["ex_port"] = ex_port return retVal # Returns information about the NAT the client is behind def GetNATType(in_port, serveraddr1, serveraddr2): """ Returns the NAT type according to the STUN algorithm, as well as the external address (ip, port) and the internal address of the host """ serveraddr1 = ('stun1.tribler.org',6701) serveraddr2 = ('stun2.tribler.org',6702) nat_type, ex_ip, ex_port, in_ip = [-1, "Unknown"], "0.0.0.0", "0", "0.0.0.0" # Set up the socket udpsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) udpsock.settimeout(5) try: udpsock.bind(('',in_port)) except socket.error, err: print >> sys.stderr, "Couldn't bind a udp socket on port %d : %s" % (in_port, err) return (nat_type, ex_ip, ex_port, in_ip) try: # Get the internal IP address connectaddr = ('tribler.org',80) s = socket.socket() s.connect(connectaddr) in_ip = s.getsockname()[0] del s if DEBUG: print >> sys.stderr, "NATCheck: getting the internal ip address by connecting to tribler.org:80", in_ip except socket.error, err: print >> sys.stderr, "Couldn't connect to %s:%i" % (connectaddr[0], connectaddr[1]) return (nat_type, ex_ip, ex_port, in_ip) """ EXECUTE THE STUN ALGORITHM """ # Do Test I ret = Test1(udpsock, serveraddr1) if DEBUG: print >> sys.stderr, "NATCheck:", "Test I reported: " + str(ret) if ret["resp"] == False: nat_type[1] = "Blocked" else: ex_ip = ret["ex_ip"] ex_port = ret["ex_port"] if ret["ex_ip"] == in_ip: # No NAT: check for firewall if DEBUG: print >> sys.stderr, "NATCheck:", "No NAT" # Do Test II ret = Test2(udpsock, serveraddr1) if DEBUG: print >> sys.stderr, "NATCheck:", "Test II reported: " + str(ret) if ret["resp"] == True: nat_type[0] = 0 nat_type[1] = "Open Internet" else: if DEBUG: print >> sys.stderr, "NATCheck:", "There is a Firewall" # Do Test III ret = Test3(udpsock, serveraddr1) if DEBUG: print >> sys.stderr, "NATCheck:", "Test III reported: " + str(ret) if ret["resp"] == True: nat_type[0] = 2 nat_type[1] = "Restricted Cone Firewall" else: nat_type[0] = 3 nat_type[1] = "Port Restricted Cone Firewall" else: # There is a NAT if DEBUG: print >> sys.stderr, "NATCheck:", "There is a NAT" # Do Test II ret = Test2(udpsock, serveraddr1) if DEBUG: print >> sys.stderr, "NATCheck:", "Test II reported: " + str(ret) if ret["resp"] == True: nat_type[0] = 1 nat_type[1] = "Full Cone NAT" else: #Do Test I using a different echo server ret = Test1(udpsock, serveraddr2) if DEBUG: print >> sys.stderr, "NATCheck:", "Test I reported: " + str(ret) if ex_ip == ret["ex_ip"] and ex_port == ret["ex_port"]: # Public address is constant: consistent translation # Do Test III ret = Test3(udpsock, serveraddr1) if DEBUG: print >> sys.stderr, "NATCheck:", "Test III reported: " + str(ret) if ret["resp"] == True: nat_type[0] = 2 nat_type[1] = "Restricted Cone NAT" else: nat_type[0] = 3 nat_type[1] = "Port Restricted Cone NAT" else: nat_type[0] = -1 nat_type[1] = "Symmetric NAT" udpsock.close() return (nat_type, ex_ip, ex_port, in_ip)
sebadiaz/rethinkdb
refs/heads/next
test/rql_test/connections/http_support/jinja2/testsuite/__init__.py
404
# -*- coding: utf-8 -*- """ jinja2.testsuite ~~~~~~~~~~~~~~~~ All the unittests of Jinja2. These tests can be executed by either running run-tests.py using multiple Python versions at the same time. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ import os import re import sys import unittest from traceback import format_exception from jinja2 import loaders from jinja2._compat import PY2 here = os.path.dirname(os.path.abspath(__file__)) dict_loader = loaders.DictLoader({ 'justdict.html': 'FOO' }) package_loader = loaders.PackageLoader('jinja2.testsuite.res', 'templates') filesystem_loader = loaders.FileSystemLoader(here + '/res/templates') function_loader = loaders.FunctionLoader({'justfunction.html': 'FOO'}.get) choice_loader = loaders.ChoiceLoader([dict_loader, package_loader]) prefix_loader = loaders.PrefixLoader({ 'a': filesystem_loader, 'b': dict_loader }) class JinjaTestCase(unittest.TestCase): ### use only these methods for testing. If you need standard ### unittest method, wrap them! def setup(self): pass def teardown(self): pass def setUp(self): self.setup() def tearDown(self): self.teardown() def assert_equal(self, a, b): return self.assertEqual(a, b) def assert_raises(self, *args, **kwargs): return self.assertRaises(*args, **kwargs) def assert_traceback_matches(self, callback, expected_tb): try: callback() except Exception as e: tb = format_exception(*sys.exc_info()) if re.search(expected_tb.strip(), ''.join(tb)) is None: raise self.fail('Traceback did not match:\n\n%s\nexpected:\n%s' % (''.join(tb), expected_tb)) else: self.fail('Expected exception') def find_all_tests(suite): """Yields all the tests and their names from a given suite.""" suites = [suite] while suites: s = suites.pop() try: suites.extend(s) except TypeError: yield s, '%s.%s.%s' % ( s.__class__.__module__, s.__class__.__name__, s._testMethodName ) class BetterLoader(unittest.TestLoader): """A nicer loader that solves two problems. First of all we are setting up tests from different sources and we're doing this programmatically which breaks the default loading logic so this is required anyways. Secondly this loader has a nicer interpolation for test names than the default one so you can just do ``run-tests.py ViewTestCase`` and it will work. """ def getRootSuite(self): return suite() def loadTestsFromName(self, name, module=None): root = self.getRootSuite() if name == 'suite': return root all_tests = [] for testcase, testname in find_all_tests(root): if testname == name or \ testname.endswith('.' + name) or \ ('.' + name + '.') in testname or \ testname.startswith(name + '.'): all_tests.append(testcase) if not all_tests: raise LookupError('could not find test case for "%s"' % name) if len(all_tests) == 1: return all_tests[0] rv = unittest.TestSuite() for test in all_tests: rv.addTest(test) return rv def suite(): from jinja2.testsuite import ext, filters, tests, core_tags, \ loader, inheritance, imports, lexnparse, security, api, \ regression, debug, utils, bytecode_cache, doctests suite = unittest.TestSuite() suite.addTest(ext.suite()) suite.addTest(filters.suite()) suite.addTest(tests.suite()) suite.addTest(core_tags.suite()) suite.addTest(loader.suite()) suite.addTest(inheritance.suite()) suite.addTest(imports.suite()) suite.addTest(lexnparse.suite()) suite.addTest(security.suite()) suite.addTest(api.suite()) suite.addTest(regression.suite()) suite.addTest(debug.suite()) suite.addTest(utils.suite()) suite.addTest(bytecode_cache.suite()) # doctests will not run on python 3 currently. Too many issues # with that, do not test that on that platform. if PY2: suite.addTest(doctests.suite()) return suite def main(): """Runs the testsuite as command line application.""" try: unittest.main(testLoader=BetterLoader(), defaultTest='suite') except Exception as e: print('Error: %s' % e)
ettm2012/MissionPlanner
refs/heads/master
Lib/uu.py
62
#! /usr/bin/env python # Copyright 1994 by Lance Ellinghouse # Cathedral City, California Republic, United States of America. # All Rights Reserved # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose and without fee is hereby granted, # provided that the above copyright notice appear in all copies and that # both that copyright notice and this permission notice appear in # supporting documentation, and that the name of Lance Ellinghouse # not be used in advertising or publicity pertaining to distribution # of the software without specific, written prior permission. # LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO # THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE # FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # # Modified by Jack Jansen, CWI, July 1995: # - Use binascii module to do the actual line-by-line conversion # between ascii and binary. This results in a 1000-fold speedup. The C # version is still 5 times faster, though. # - Arguments more compliant with python standard """Implementation of the UUencode and UUdecode functions. encode(in_file, out_file [,name, mode]) decode(in_file [, out_file, mode]) """ import binascii import os import sys __all__ = ["Error", "encode", "decode"] class Error(Exception): pass def encode(in_file, out_file, name=None, mode=None): """Uuencode file""" # # If in_file is a pathname open it and change defaults # opened_files = [] try: if in_file == '-': in_file = sys.stdin elif isinstance(in_file, basestring): if name is None: name = os.path.basename(in_file) if mode is None: try: mode = os.stat(in_file).st_mode except AttributeError: pass in_file = open(in_file, 'rb') opened_files.append(in_file) # # Open out_file if it is a pathname # if out_file == '-': out_file = sys.stdout elif isinstance(out_file, basestring): out_file = open(out_file, 'wb') opened_files.append(out_file) # # Set defaults for name and mode # if name is None: name = '-' if mode is None: mode = 0666 # # Write the data # out_file.write('begin %o %s\n' % ((mode&0777),name)) data = in_file.read(45) while len(data) > 0: out_file.write(binascii.b2a_uu(data)) data = in_file.read(45) out_file.write(' \nend\n') finally: for f in opened_files: f.close() def decode(in_file, out_file=None, mode=None, quiet=0): """Decode uuencoded file""" # # Open the input file, if needed. # opened_files = [] if in_file == '-': in_file = sys.stdin elif isinstance(in_file, basestring): in_file = open(in_file) opened_files.append(in_file) try: # # Read until a begin is encountered or we've exhausted the file # while True: hdr = in_file.readline() if not hdr: raise Error('No valid begin line found in input file') if not hdr.startswith('begin'): continue hdrfields = hdr.split(' ', 2) if len(hdrfields) == 3 and hdrfields[0] == 'begin': try: int(hdrfields[1], 8) break except ValueError: pass if out_file is None: out_file = hdrfields[2].rstrip() if os.path.exists(out_file): raise Error('Cannot overwrite existing file: %s' % out_file) if mode is None: mode = int(hdrfields[1], 8) # # Open the output file # if out_file == '-': out_file = sys.stdout elif isinstance(out_file, basestring): fp = open(out_file, 'wb') try: os.path.chmod(out_file, mode) except AttributeError: pass out_file = fp opened_files.append(out_file) # # Main decoding loop # s = in_file.readline() while s and s.strip() != 'end': try: data = binascii.a2b_uu(s) except binascii.Error, v: # Workaround for broken uuencoders by /Fredrik Lundh nbytes = (((ord(s[0])-32) & 63) * 4 + 5) // 3 data = binascii.a2b_uu(s[:nbytes]) if not quiet: sys.stderr.write("Warning: %s\n" % v) out_file.write(data) s = in_file.readline() if not s: raise Error('Truncated input file') finally: for f in opened_files: f.close() def test(): """uuencode/uudecode main program""" import optparse parser = optparse.OptionParser(usage='usage: %prog [-d] [-t] [input [output]]') parser.add_option('-d', '--decode', dest='decode', help='Decode (instead of encode)?', default=False, action='store_true') parser.add_option('-t', '--text', dest='text', help='data is text, encoded format unix-compatible text?', default=False, action='store_true') (options, args) = parser.parse_args() if len(args) > 2: parser.error('incorrect number of arguments') sys.exit(1) input = sys.stdin output = sys.stdout if len(args) > 0: input = args[0] if len(args) > 1: output = args[1] if options.decode: if options.text: if isinstance(output, basestring): output = open(output, 'w') else: print sys.argv[0], ': cannot do -t to stdout' sys.exit(1) decode(input, output) else: if options.text: if isinstance(input, basestring): input = open(input, 'r') else: print sys.argv[0], ': cannot do -t from stdin' sys.exit(1) encode(input, output) if __name__ == '__main__': test()
smrjan/seldon-server
refs/heads/master
python/seldon/pipeline/tests/test_util.py
2
import unittest from .. import util as sutils from .. import basic_transforms as bt import pandas as pd import json import os.path from sklearn.pipeline import Pipeline from sklearn.externals import joblib import logging class Test_PipelineWrapper(unittest.TestCase): def _create_test_json(self,fname,as_list=False): f = open(fname,"w") if as_list: f.write("[") j = {} j["a"] = 1 j["b"] = 2 f.write(json.dumps(j,sort_keys=True)) if as_list: f.write(",\n") else: f.write("\n") j = {} j["a"] = 3 j["b"] = 4 f.write(json.dumps(j,sort_keys=True)) if as_list: f.write("]\n") else: f.write("\n") f.close() def test_load_json_folders(self): w = sutils.PipelineWrapper() data_folder = w.get_work_folder()+"/events" if not os.path.exists(data_folder): os.makedirs(data_folder) fname = data_folder+"/"+"test.json" self._create_test_json(fname) df = w.create_dataframe([data_folder]) print df def test_load_json_file(self): w = sutils.PipelineWrapper() fname = w.get_work_folder()+"/"+"test.json" self._create_test_json(fname,as_list=True) df = w.create_dataframe(fname) print df def test_save_dataframe(self): w = sutils.PipelineWrapper() df = pd.DataFrame.from_dict([{"a":"a b","b":"c d","c":3},{"a":"word1","b":"word2"}]) fname = w.get_work_folder()+"/"+"saved.json" w.save_dataframe(df,fname,"csv",csv_index=False) df2 = w.create_dataframe(fname,df_format="csv") from pandas.util.testing import assert_frame_equal assert_frame_equal(df.sort(axis=1), df2.sort(axis=1), check_names=True) def test_save_pipeline(self): w = sutils.PipelineWrapper() t = bt.IncludeFeaturesTransform(included=["a","b"]) transformers = [("include_features",t)] p = Pipeline(transformers) df = pd.DataFrame.from_dict([{"a":1,"b":2,"c":3}]) df2 = p.fit_transform(df) self.assertTrue(sorted(df2.columns) == sorted(["a","b"])) dest_folder = w.get_work_folder()+"/dest_pipeline" w.save_pipeline(p,dest_folder) p2 = w.load_pipeline(dest_folder) df3 = p2.transform(df) self.assertTrue(sorted(df3.columns) == sorted(["a","b"])) if __name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) unittest.main()
TaskEvolution/Task-Coach-Evolution
refs/heads/master
taskcoach/taskcoachlib/persistence/csv/__init__.py
1
''' Task Coach - Your friendly task manager Copyright (C) 2004-2013 Task Coach developers <developers@taskcoach.org> Task Coach is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Task Coach is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' # This is the csv package. This package contains classes to generate # comma separated values (cvs) format for exporting Task Coach data to csv # files. from generator import viewer2csv from writer import CSVWriter
dgoedkoop/QGIS
refs/heads/master
python/plugins/db_manager/db_manager_plugin.py
8
# -*- coding: utf-8 -*- """ /*************************************************************************** Name : DB Manager Description : Database manager plugin for QGIS Date : May 23, 2011 copyright : (C) 2011 by Giuseppe Sucameli email : brush.tyler@gmail.com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ from builtins import object from qgis.PyQt.QtCore import Qt from qgis.PyQt.QtWidgets import QAction, QApplication from qgis.PyQt.QtGui import QIcon from qgis.core import QgsProject, QgsMapLayer, QgsDataSourceUri, QgsApplication from . import resources_rc # NOQA class DBManagerPlugin(object): def __init__(self, iface): self.iface = iface self.dlg = None def initGui(self): self.action = QAction(QgsApplication.getThemeIcon('dbmanager.svg'), QApplication.translate("DBManagerPlugin", "DB Manager…"), self.iface.mainWindow()) self.action.setObjectName("dbManager") self.action.triggered.connect(self.run) # Add toolbar button and menu item if hasattr(self.iface, 'addDatabaseToolBarIcon'): self.iface.addDatabaseToolBarIcon(self.action) else: self.iface.addToolBarIcon(self.action) if hasattr(self.iface, 'addPluginToDatabaseMenu'): self.iface.addPluginToDatabaseMenu(QApplication.translate("DBManagerPlugin", None), self.action) else: self.iface.addPluginToMenu(QApplication.translate("DBManagerPlugin", "DB Manager"), self.action) self.layerAction = QAction(QgsApplication.getThemeIcon('dbmanager.svg'), QApplication.translate("DBManagerPlugin", "Update SQL Layer…"), self.iface.mainWindow()) self.layerAction.setObjectName("dbManagerUpdateSqlLayer") self.layerAction.triggered.connect(self.onUpdateSqlLayer) self.iface.addCustomActionForLayerType(self.layerAction, "", QgsMapLayer.VectorLayer, False) for l in list(QgsProject.instance().mapLayers().values()): self.onLayerWasAdded(l) QgsProject.instance().layerWasAdded.connect(self.onLayerWasAdded) def unload(self): # Remove the plugin menu item and icon if hasattr(self.iface, 'databaseMenu'): self.iface.databaseMenu().removeAction(self.action) else: self.iface.removePluginMenu(QApplication.translate("DBManagerPlugin", "DB Manager"), self.action) if hasattr(self.iface, 'removeDatabaseToolBarIcon'): self.iface.removeDatabaseToolBarIcon(self.action) else: self.iface.removeToolBarIcon(self.action) self.iface.removeCustomActionForLayerType(self.layerAction) QgsProject.instance().layerWasAdded.disconnect(self.onLayerWasAdded) if self.dlg is not None: self.dlg.close() def onLayerWasAdded(self, aMapLayer): # Be able to update every Db layer from Postgres, Spatialite and Oracle if hasattr(aMapLayer, 'dataProvider') and aMapLayer.dataProvider().name() in ['postgres', 'spatialite', 'oracle']: self.iface.addCustomActionForLayer(self.layerAction, aMapLayer) # virtual has QUrl source # url = QUrl(QUrl.fromPercentEncoding(l.source())) # url.queryItemValue('query') # url.queryItemValue('uid') # url.queryItemValue('geometry') def onUpdateSqlLayer(self): # Be able to update every Db layer from Postgres, Spatialite and Oracle l = self.iface.activeLayer() if l.dataProvider().name() in ['postgres', 'spatialite', 'oracle']: self.run() self.dlg.runSqlLayerWindow(l) # virtual has QUrl source # url = QUrl(QUrl.fromPercentEncoding(l.source())) # url.queryItemValue('query') # url.queryItemValue('uid') # url.queryItemValue('geometry') def run(self): # keep opened only one instance if self.dlg is None: from .db_manager import DBManager self.dlg = DBManager(self.iface) self.dlg.destroyed.connect(self.onDestroyed) self.dlg.show() self.dlg.raise_() self.dlg.setWindowState(self.dlg.windowState() & ~Qt.WindowMinimized) self.dlg.activateWindow() def onDestroyed(self, obj): self.dlg = None
ribag/ganeti-experiments
refs/heads/topic-cli-quote
test/py/ganeti.hypervisor.hv_xen_unittest.py
2
#!/usr/bin/python # # Copyright (C) 2011, 2013 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. """Script for testing ganeti.hypervisor.hv_xen""" import string # pylint: disable=W0402 import unittest import tempfile import shutil import random import os import mock from ganeti import constants from ganeti import objects from ganeti import pathutils from ganeti import hypervisor from ganeti import utils from ganeti import errors from ganeti import compat from ganeti.hypervisor import hv_base from ganeti.hypervisor import hv_xen import testutils # Map from hypervisor class to hypervisor name HVCLASS_TO_HVNAME = utils.InvertDict(hypervisor._HYPERVISOR_MAP) class TestConsole(unittest.TestCase): def test(self): hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL} for cls in [hv_xen.XenPvmHypervisor(), hv_xen.XenHvmHypervisor()]: instance = objects.Instance(name="xen.example.com", primary_node="node24828-uuid") node = objects.Node(name="node24828", uuid="node24828-uuid", ndparams={}) group = objects.NodeGroup(name="group52341", ndparams={}) cons = cls.GetInstanceConsole(instance, node, group, hvparams, {}) self.assertEqual(cons.Validate(), None) self.assertEqual(cons.kind, constants.CONS_SSH) self.assertEqual(cons.host, node.name) self.assertEqual(cons.command[-1], instance.name) class TestCreateConfigCpus(unittest.TestCase): def testEmpty(self): for cpu_mask in [None, ""]: self.assertEqual(hv_xen._CreateConfigCpus(cpu_mask), "cpus = [ ]") def testAll(self): self.assertEqual(hv_xen._CreateConfigCpus(constants.CPU_PINNING_ALL), None) def testOne(self): self.assertEqual(hv_xen._CreateConfigCpus("9"), "cpu = \"9\"") def testMultiple(self): self.assertEqual(hv_xen._CreateConfigCpus("0-2,4,5-5:3:all"), ("cpus = [ \"0,1,2,4,5\", \"3\", \"%s\" ]" % constants.CPU_PINNING_ALL_XEN)) class TestGetCommand(testutils.GanetiTestCase): def testCommandExplicit(self): """Test the case when the command is given as class parameter explicitly. """ expected_cmd = "xl" hv = hv_xen.XenHypervisor(_cmd=constants.XEN_CMD_XL) self.assertEqual(hv._GetCommand(None), expected_cmd) def testCommandInvalid(self): """Test the case an invalid command is given as class parameter explicitly. """ hv = hv_xen.XenHypervisor(_cmd="invalidcommand") self.assertRaises(errors.ProgrammerError, hv._GetCommand, None) def testCommandHvparams(self): expected_cmd = "xl" test_hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL} hv = hv_xen.XenHypervisor() self.assertEqual(hv._GetCommand(test_hvparams), expected_cmd) def testCommandHvparamsInvalid(self): test_hvparams = {} hv = hv_xen.XenHypervisor() self.assertRaises(errors.HypervisorError, hv._GetCommand, test_hvparams) def testCommandHvparamsCmdInvalid(self): test_hvparams = {constants.HV_XEN_CMD: "invalidcommand"} hv = hv_xen.XenHypervisor() self.assertRaises(errors.ProgrammerError, hv._GetCommand, test_hvparams) class TestParseInstanceList(testutils.GanetiTestCase): def test(self): data = testutils.ReadTestData("xen-xm-list-4.0.1-dom0-only.txt") # Exclude node self.assertEqual(hv_xen._ParseInstanceList(data.splitlines(), False), []) # Include node result = hv_xen._ParseInstanceList(data.splitlines(), True) self.assertEqual(len(result), 1) self.assertEqual(len(result[0]), 6) # Name self.assertEqual(result[0][0], hv_xen._DOM0_NAME) # ID self.assertEqual(result[0][1], 0) # Memory self.assertEqual(result[0][2], 1023) # VCPUs self.assertEqual(result[0][3], 1) # State self.assertEqual(result[0][4], hv_base.HvInstanceState.RUNNING) # Time self.assertAlmostEqual(result[0][5], 121152.6) def testWrongLineFormat(self): tests = [ ["three fields only"], ["name InvalidID 128 1 r----- 12345"], ] for lines in tests: try: hv_xen._ParseInstanceList(["Header would be here"] + lines, False) except errors.HypervisorError, err: self.assertTrue("Can't parse instance list" in str(err)) else: self.fail("Exception was not raised") class TestGetInstanceList(testutils.GanetiTestCase): def _Fail(self): return utils.RunResult(constants.EXIT_FAILURE, None, "stdout", "stderr", None, NotImplemented, NotImplemented) def testTimeout(self): fn = testutils.CallCounter(self._Fail) try: hv_xen._GetRunningInstanceList(fn, False, _timeout=0.1) except errors.HypervisorError, err: self.assertTrue("timeout exceeded" in str(err)) else: self.fail("Exception was not raised") self.assertTrue(fn.Count() < 10, msg="'xm list' was called too many times") def _Success(self, stdout): return utils.RunResult(constants.EXIT_SUCCESS, None, stdout, "", None, NotImplemented, NotImplemented) def testSuccess(self): data = testutils.ReadTestData("xen-xm-list-4.0.1-four-instances.txt") fn = testutils.CallCounter(compat.partial(self._Success, data)) result = hv_xen._GetRunningInstanceList(fn, True, _timeout=0.1) self.assertEqual(len(result), 4) self.assertEqual(map(compat.fst, result), [ "Domain-0", "server01.example.com", "web3106215069.example.com", "testinstance.example.com", ]) self.assertEqual(fn.Count(), 1) class TestParseNodeInfo(testutils.GanetiTestCase): def testEmpty(self): self.assertEqual(hv_xen._ParseNodeInfo(""), {}) def testUnknownInput(self): data = "\n".join([ "foo bar", "something else goes", "here", ]) self.assertEqual(hv_xen._ParseNodeInfo(data), {}) def testBasicInfo(self): data = testutils.ReadTestData("xen-xm-info-4.0.1.txt") result = hv_xen._ParseNodeInfo(data) self.assertEqual(result, { "cpu_nodes": 1, "cpu_sockets": 2, "cpu_total": 4, "hv_version": (4, 0), "memory_free": 8004, "memory_total": 16378, }) class TestMergeInstanceInfo(testutils.GanetiTestCase): def testEmpty(self): self.assertEqual(hv_xen._MergeInstanceInfo({}, []), {}) def _FakeXmList(self, include_node): return [ (hv_xen._DOM0_NAME, NotImplemented, 4096, 7, NotImplemented, NotImplemented), ("inst1.example.com", NotImplemented, 2048, 4, NotImplemented, NotImplemented), ] def testMissingNodeInfo(self): instance_list = self._FakeXmList(True) result = hv_xen._MergeInstanceInfo({}, instance_list) self.assertEqual(result, { "memory_dom0": 4096, "cpu_dom0": 7, }) def testWithNodeInfo(self): info = testutils.ReadTestData("xen-xm-info-4.0.1.txt") instance_list = self._FakeXmList(True) result = hv_xen._GetNodeInfo(info, instance_list) self.assertEqual(result, { "cpu_nodes": 1, "cpu_sockets": 2, "cpu_total": 4, "cpu_dom0": 7, "hv_version": (4, 0), "memory_dom0": 4096, "memory_free": 8004, "memory_hv": 2230, "memory_total": 16378, }) class TestGetConfigFileDiskData(unittest.TestCase): def testLetterCount(self): self.assertEqual(len(hv_xen._DISK_LETTERS), 26) def testNoDisks(self): self.assertEqual(hv_xen._GetConfigFileDiskData([], "hd"), []) def testManyDisks(self): for offset in [0, 1, 10]: disks = [(objects.Disk(dev_type=constants.DT_PLAIN), "/tmp/disk/%s" % idx, NotImplemented) for idx in range(len(hv_xen._DISK_LETTERS) + offset)] if offset == 0: result = hv_xen._GetConfigFileDiskData(disks, "hd") self.assertEqual(result, [ "'phy:/tmp/disk/%s,hd%s,r'" % (idx, string.ascii_lowercase[idx]) for idx in range(len(hv_xen._DISK_LETTERS) + offset) ]) else: try: hv_xen._GetConfigFileDiskData(disks, "hd") except errors.HypervisorError, err: self.assertEqual(str(err), "Too many disks") else: self.fail("Exception was not raised") def testTwoLvDisksWithMode(self): disks = [ (objects.Disk(dev_type=constants.DT_PLAIN, mode=constants.DISK_RDWR), "/tmp/diskFirst", NotImplemented), (objects.Disk(dev_type=constants.DT_PLAIN, mode=constants.DISK_RDONLY), "/tmp/diskLast", NotImplemented), ] result = hv_xen._GetConfigFileDiskData(disks, "hd") self.assertEqual(result, [ "'phy:/tmp/diskFirst,hda,w'", "'phy:/tmp/diskLast,hdb,r'", ]) def testFileDisks(self): disks = [ (objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDWR, logical_id=[constants.FD_LOOP]), "/tmp/diskFirst", NotImplemented), (objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDONLY, logical_id=[constants.FD_BLKTAP]), "/tmp/diskTwo", NotImplemented), (objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDWR, logical_id=[constants.FD_LOOP]), "/tmp/diskThree", NotImplemented), (objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDONLY, logical_id=[constants.FD_BLKTAP2]), "/tmp/diskFour", NotImplemented), (objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDWR, logical_id=[constants.FD_BLKTAP]), "/tmp/diskLast", NotImplemented), ] result = hv_xen._GetConfigFileDiskData(disks, "sd") self.assertEqual(result, [ "'file:/tmp/diskFirst,sda,w'", "'tap:aio:/tmp/diskTwo,sdb,r'", "'file:/tmp/diskThree,sdc,w'", "'tap2:tapdisk:aio:/tmp/diskFour,sdd,r'", "'tap:aio:/tmp/diskLast,sde,w'", ]) def testInvalidFileDisk(self): disks = [ (objects.Disk(dev_type=constants.DT_FILE, mode=constants.DISK_RDWR, logical_id=["#unknown#"]), "/tmp/diskinvalid", NotImplemented), ] self.assertRaises(KeyError, hv_xen._GetConfigFileDiskData, disks, "sd") class TestXenHypervisorRunXen(unittest.TestCase): XEN_SUB_CMD = "help" def testCommandUnknown(self): cmd = "#unknown command#" self.assertFalse(cmd in constants.KNOWN_XEN_COMMANDS) hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=NotImplemented, _cmd=cmd) self.assertRaises(errors.ProgrammerError, hv._RunXen, [], None) def testCommandNoHvparams(self): hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=NotImplemented) hvparams = None self.assertRaises(errors.HypervisorError, hv._RunXen, [self.XEN_SUB_CMD], hvparams) def testCommandFromHvparams(self): expected_xen_cmd = "xl" hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL} mock_run_cmd = mock.Mock() hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=mock_run_cmd) hv._RunXen([self.XEN_SUB_CMD], hvparams=hvparams) mock_run_cmd.assert_called_with([expected_xen_cmd, self.XEN_SUB_CMD]) class TestXenHypervisorGetInstanceList(unittest.TestCase): RESULT_OK = utils.RunResult(0, None, "", "", "", None, None) XEN_LIST = "list" def testNoHvparams(self): expected_xen_cmd = "xm" mock_run_cmd = mock.Mock(return_value=self.RESULT_OK) hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=mock_run_cmd) self.assertRaises(errors.HypervisorError, hv._GetInstanceList, True, None) def testFromHvparams(self): expected_xen_cmd = "xl" hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL} mock_run_cmd = mock.Mock(return_value=self.RESULT_OK) hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=mock_run_cmd) hv._GetInstanceList(True, hvparams) mock_run_cmd.assert_called_with([expected_xen_cmd, self.XEN_LIST]) class TestXenHypervisorListInstances(unittest.TestCase): RESULT_OK = utils.RunResult(0, None, "", "", "", None, None) XEN_LIST = "list" def testNoHvparams(self): expected_xen_cmd = "xm" mock_run_cmd = mock.Mock(return_value=self.RESULT_OK) hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=mock_run_cmd) self.assertRaises(errors.HypervisorError, hv.ListInstances) def testHvparamsXl(self): expected_xen_cmd = "xl" hvparams = {constants.HV_XEN_CMD: constants.XEN_CMD_XL} mock_run_cmd = mock.Mock(return_value=self.RESULT_OK) hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=mock_run_cmd) hv.ListInstances(hvparams=hvparams) mock_run_cmd.assert_called_with([expected_xen_cmd, self.XEN_LIST]) class TestXenHypervisorCheckToolstack(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp() self.cfg_name = "xen_config" self.cfg_path = utils.PathJoin(self.tmpdir, self.cfg_name) self.hv = hv_xen.XenHypervisor() def tearDown(self): shutil.rmtree(self.tmpdir) def testBinaryNotFound(self): RESULT_FAILED = utils.RunResult(1, None, "", "", "", None, None) mock_run_cmd = mock.Mock(return_value=RESULT_FAILED) hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=mock_run_cmd) result = hv._CheckToolstackBinary("xl") self.assertFalse(result) def testCheckToolstackXlConfigured(self): RESULT_OK = utils.RunResult(0, None, "", "", "", None, None) mock_run_cmd = mock.Mock(return_value=RESULT_OK) hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=mock_run_cmd) result = hv._CheckToolstackXlConfigured() self.assertTrue(result) def testCheckToolstackXlNotConfigured(self): RESULT_FAILED = utils.RunResult( 1, None, "", "ERROR: A different toolstack (xm) has been selected!", "", None, None) mock_run_cmd = mock.Mock(return_value=RESULT_FAILED) hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=mock_run_cmd) result = hv._CheckToolstackXlConfigured() self.assertFalse(result) def testCheckToolstackXlFails(self): RESULT_FAILED = utils.RunResult( 1, None, "", "ERROR: The pink bunny hid the binary.", "", None, None) mock_run_cmd = mock.Mock(return_value=RESULT_FAILED) hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=mock_run_cmd) self.assertRaises(errors.HypervisorError, hv._CheckToolstackXlConfigured) class TestXenHypervisorWriteConfigFile(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmpdir) def testWriteError(self): cfgdir = utils.PathJoin(self.tmpdir, "foobar") hv = hv_xen.XenHypervisor(_cfgdir=cfgdir, _run_cmd_fn=NotImplemented, _cmd=NotImplemented) self.assertFalse(os.path.exists(cfgdir)) try: hv._WriteConfigFile("name", "data") except errors.HypervisorError, err: self.assertTrue(str(err).startswith("Cannot write Xen instance")) else: self.fail("Exception was not raised") class TestXenHypervisorVerify(unittest.TestCase): def setUp(self): output = testutils.ReadTestData("xen-xm-info-4.0.1.txt") self._result_ok = utils.RunResult(0, None, output, "", "", None, None) def testVerify(self): hvparams = {constants.HV_XEN_CMD : constants.XEN_CMD_XL} mock_run_cmd = mock.Mock(return_value=self._result_ok) hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=mock_run_cmd) hv._CheckToolstack = mock.Mock(return_value=True) result = hv.Verify(hvparams) self.assertTrue(result is None) def testVerifyToolstackNotOk(self): hvparams = {constants.HV_XEN_CMD : constants.XEN_CMD_XL} mock_run_cmd = mock.Mock(return_value=self._result_ok) hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=mock_run_cmd) hv._CheckToolstack = mock.Mock() hv._CheckToolstack.side_effect = errors.HypervisorError("foo") result = hv.Verify(hvparams) self.assertTrue(result is not None) def testVerifyFailing(self): result_failed = utils.RunResult(1, None, "", "", "", None, None) mock_run_cmd = mock.Mock(return_value=result_failed) hv = hv_xen.XenHypervisor(_cfgdir=NotImplemented, _run_cmd_fn=mock_run_cmd) hv._CheckToolstack = mock.Mock(return_value=True) result = hv.Verify() self.assertTrue(result is not None) class _TestXenHypervisor(object): TARGET = NotImplemented CMD = NotImplemented HVNAME = NotImplemented VALID_HVPARAMS = {constants.HV_XEN_CMD: constants.XEN_CMD_XL} def setUp(self): super(_TestXenHypervisor, self).setUp() self.tmpdir = tempfile.mkdtemp() self.vncpw = "".join(random.sample(string.ascii_letters, 10)) self.vncpw_path = utils.PathJoin(self.tmpdir, "vncpw") utils.WriteFile(self.vncpw_path, data=self.vncpw) def tearDown(self): super(_TestXenHypervisor, self).tearDown() shutil.rmtree(self.tmpdir) def _GetHv(self, run_cmd=NotImplemented): return self.TARGET(_cfgdir=self.tmpdir, _run_cmd_fn=run_cmd, _cmd=self.CMD) def _SuccessCommand(self, stdout, cmd): self.assertEqual(cmd[0], self.CMD) return utils.RunResult(constants.EXIT_SUCCESS, None, stdout, "", None, NotImplemented, NotImplemented) def _FailingCommand(self, cmd): self.assertEqual(cmd[0], self.CMD) return utils.RunResult(constants.EXIT_FAILURE, None, "", "This command failed", None, NotImplemented, NotImplemented) def _FakeTcpPing(self, expected, result, target, port, **kwargs): self.assertEqual((target, port), expected) return result def testReadingNonExistentConfigFile(self): hv = self._GetHv() try: hv._ReadConfigFile("inst15780.example.com") except errors.HypervisorError, err: self.assertTrue(str(err).startswith("Failed to load Xen config file:")) else: self.fail("Exception was not raised") def testRemovingAutoConfigFile(self): name = "inst8206.example.com" cfgfile = utils.PathJoin(self.tmpdir, name) autodir = utils.PathJoin(self.tmpdir, "auto") autocfgfile = utils.PathJoin(autodir, name) os.mkdir(autodir) utils.WriteFile(autocfgfile, data="") hv = self._GetHv() self.assertTrue(os.path.isfile(autocfgfile)) hv._WriteConfigFile(name, "content") self.assertFalse(os.path.exists(autocfgfile)) self.assertEqual(utils.ReadFile(cfgfile), "content") def _XenList(self, cmd): self.assertEqual(cmd, [self.CMD, "list"]) # TODO: Use actual data from "xl" command output = testutils.ReadTestData("xen-xm-list-4.0.1-four-instances.txt") return self._SuccessCommand(output, cmd) def testGetInstanceInfo(self): hv = self._GetHv(run_cmd=self._XenList) (name, instid, memory, vcpus, state, runtime) = \ hv.GetInstanceInfo("server01.example.com") self.assertEqual(name, "server01.example.com") self.assertEqual(instid, 1) self.assertEqual(memory, 1024) self.assertEqual(vcpus, 1) self.assertEqual(state, hv_base.HvInstanceState.RUNNING) self.assertAlmostEqual(runtime, 167643.2) def testGetInstanceInfoDom0(self): hv = self._GetHv(run_cmd=self._XenList) # TODO: Not sure if this is actually used anywhere (can't find it), but the # code supports querying for Dom0 (name, instid, memory, vcpus, state, runtime) = \ hv.GetInstanceInfo(hv_xen._DOM0_NAME) self.assertEqual(name, "Domain-0") self.assertEqual(instid, 0) self.assertEqual(memory, 1023) self.assertEqual(vcpus, 1) self.assertEqual(state, hv_base.HvInstanceState.RUNNING) self.assertAlmostEqual(runtime, 154706.1) def testGetInstanceInfoUnknown(self): hv = self._GetHv(run_cmd=self._XenList) result = hv.GetInstanceInfo("unknown.example.com") self.assertTrue(result is None) def testGetAllInstancesInfo(self): hv = self._GetHv(run_cmd=self._XenList) result = hv.GetAllInstancesInfo() self.assertEqual(map(compat.fst, result), [ "server01.example.com", "web3106215069.example.com", "testinstance.example.com", ]) def testListInstances(self): hv = self._GetHv(run_cmd=self._XenList) self.assertEqual(hv.ListInstances(), [ "server01.example.com", "web3106215069.example.com", "testinstance.example.com", ]) def _StartInstanceCommand(self, inst, paused, failcreate, cmd): if cmd == [self.CMD, "info"]: output = testutils.ReadTestData("xen-xm-info-4.0.1.txt") elif cmd == [self.CMD, "list"]: output = testutils.ReadTestData("xen-xm-list-4.0.1-dom0-only.txt") elif cmd[:2] == [self.CMD, "create"]: args = cmd[2:] cfgfile = utils.PathJoin(self.tmpdir, inst.name) if paused: self.assertEqual(args, ["-p", cfgfile]) else: self.assertEqual(args, [cfgfile]) if failcreate: return self._FailingCommand(cmd) output = "" else: self.fail("Unhandled command: %s" % (cmd, )) return self._SuccessCommand(output, cmd) def _MakeInstance(self): # Copy default parameters bep = objects.FillDict(constants.BEC_DEFAULTS, {}) hvp = objects.FillDict(constants.HVC_DEFAULTS[self.HVNAME], {}) # Override default VNC password file path if constants.HV_VNC_PASSWORD_FILE in hvp: hvp[constants.HV_VNC_PASSWORD_FILE] = self.vncpw_path disks = [ (objects.Disk(dev_type=constants.DT_PLAIN, mode=constants.DISK_RDWR), utils.PathJoin(self.tmpdir, "disk0"), NotImplemented), (objects.Disk(dev_type=constants.DT_PLAIN, mode=constants.DISK_RDONLY), utils.PathJoin(self.tmpdir, "disk1"), NotImplemented), ] inst = objects.Instance(name="server01.example.com", hvparams=hvp, beparams=bep, osparams={}, nics=[], os="deb1", disks=map(compat.fst, disks)) inst.UpgradeConfig() return (inst, disks) def testStartInstance(self): (inst, disks) = self._MakeInstance() pathutils.LOG_XEN_DIR = self.tmpdir for failcreate in [False, True]: for paused in [False, True]: run_cmd = compat.partial(self._StartInstanceCommand, inst, paused, failcreate) hv = self._GetHv(run_cmd=run_cmd) # Ensure instance is not listed self.assertTrue(inst.name not in hv.ListInstances()) # Remove configuration cfgfile = utils.PathJoin(self.tmpdir, inst.name) utils.RemoveFile(cfgfile) if failcreate: self.assertRaises(errors.HypervisorError, hv.StartInstance, inst, disks, paused) # Check whether a stale config file is left behind self.assertFalse(os.path.exists(cfgfile)) else: hv.StartInstance(inst, disks, paused) # Check if configuration was updated lines = utils.ReadFile(cfgfile).splitlines() if constants.HV_VNC_PASSWORD_FILE in inst.hvparams: self.assertTrue(("vncpasswd = '%s'" % self.vncpw) in lines) else: extra = inst.hvparams[constants.HV_KERNEL_ARGS] self.assertTrue(("extra = '%s'" % extra) in lines) def _StopInstanceCommand(self, instance_name, force, fail, full_cmd): # Remove the timeout (and its number of seconds) if it's there if full_cmd[:1][0] == "timeout": cmd = full_cmd[2:] else: cmd = full_cmd # Test the actual command if (cmd == [self.CMD, "list"]): output = "Name ID Mem VCPUs State Time(s)\n" \ "Domain-0 0 1023 1 r----- 142691.0\n" \ "%s 417 128 1 r----- 3.2\n" % instance_name elif cmd[:2] == [self.CMD, "destroy"]: self.assertEqual(cmd[2:], [instance_name]) output = "" elif not force and cmd[:3] == [self.CMD, "shutdown", "-w"]: self.assertEqual(cmd[3:], [instance_name]) output = "" else: self.fail("Unhandled command: %s" % (cmd, )) if fail: # Simulate a failing command return self._FailingCommand(cmd) else: return self._SuccessCommand(output, cmd) def testStopInstance(self): name = "inst4284.example.com" cfgfile = utils.PathJoin(self.tmpdir, name) cfgdata = "config file content\n" for force in [False, True]: for fail in [False, True]: utils.WriteFile(cfgfile, data=cfgdata) run_cmd = compat.partial(self._StopInstanceCommand, name, force, fail) hv = self._GetHv(run_cmd=run_cmd) self.assertTrue(os.path.isfile(cfgfile)) if fail: try: hv._StopInstance(name, force, None, constants.DEFAULT_SHUTDOWN_TIMEOUT) except errors.HypervisorError, err: self.assertTrue(str(err).startswith("listing instances failed"), msg=str(err)) else: self.fail("Exception was not raised") self.assertEqual(utils.ReadFile(cfgfile), cfgdata, msg=("Configuration was removed when stopping" " instance failed")) else: hv._StopInstance(name, force, None, constants.DEFAULT_SHUTDOWN_TIMEOUT) self.assertFalse(os.path.exists(cfgfile)) def _MigrateNonRunningInstCmd(self, cmd): if cmd == [self.CMD, "list"]: output = testutils.ReadTestData("xen-xm-list-4.0.1-dom0-only.txt") else: self.fail("Unhandled command: %s" % (cmd, )) return self._SuccessCommand(output, cmd) def testMigrateInstanceNotRunning(self): name = "nonexistinginstance.example.com" target = constants.IP4_ADDRESS_LOCALHOST port = 14618 hv = self._GetHv(run_cmd=self._MigrateNonRunningInstCmd) for live in [False, True]: try: hv._MigrateInstance(NotImplemented, name, target, port, live, self.VALID_HVPARAMS, _ping_fn=NotImplemented) except errors.HypervisorError, err: self.assertEqual(str(err), "Instance not running, cannot migrate") else: self.fail("Exception was not raised") def _MigrateInstTargetUnreachCmd(self, cmd): if cmd == [self.CMD, "list"]: output = testutils.ReadTestData("xen-xm-list-4.0.1-four-instances.txt") else: self.fail("Unhandled command: %s" % (cmd, )) return self._SuccessCommand(output, cmd) def testMigrateTargetUnreachable(self): name = "server01.example.com" target = constants.IP4_ADDRESS_LOCALHOST port = 28349 hv = self._GetHv(run_cmd=self._MigrateInstTargetUnreachCmd) hvparams = {constants.HV_XEN_CMD: self.CMD} for live in [False, True]: if self.CMD == constants.XEN_CMD_XL: # TODO: Detect unreachable targets pass else: try: hv._MigrateInstance(NotImplemented, name, target, port, live, hvparams, _ping_fn=compat.partial(self._FakeTcpPing, (target, port), False)) except errors.HypervisorError, err: wanted = "Remote host %s not" % target self.assertTrue(str(err).startswith(wanted)) else: self.fail("Exception was not raised") def _MigrateInstanceCmd(self, cluster_name, instance_name, target, port, live, fail, cmd): if cmd == [self.CMD, "list"]: output = testutils.ReadTestData("xen-xm-list-4.0.1-four-instances.txt") elif cmd[:2] == [self.CMD, "migrate"]: if self.CMD == constants.XEN_CMD_XM: args = ["-p", str(port)] if live: args.append("-l") elif self.CMD == constants.XEN_CMD_XL: args = [ "-s", constants.XL_SSH_CMD % cluster_name, "-C", utils.PathJoin(self.tmpdir, instance_name), ] else: self.fail("Unknown Xen command '%s'" % self.CMD) args.extend([instance_name, target]) self.assertEqual(cmd[2:], args) if fail: return self._FailingCommand(cmd) output = "" else: self.fail("Unhandled command: %s" % (cmd, )) return self._SuccessCommand(output, cmd) def testMigrateInstance(self): clustername = "cluster.example.com" instname = "server01.example.com" target = constants.IP4_ADDRESS_LOCALHOST port = 22364 hvparams = {constants.HV_XEN_CMD: self.CMD} for live in [False, True]: for fail in [False, True]: ping_fn = \ testutils.CallCounter(compat.partial(self._FakeTcpPing, (target, port), True)) run_cmd = \ compat.partial(self._MigrateInstanceCmd, clustername, instname, target, port, live, fail) hv = self._GetHv(run_cmd=run_cmd) if fail: try: hv._MigrateInstance(clustername, instname, target, port, live, hvparams, _ping_fn=ping_fn) except errors.HypervisorError, err: self.assertTrue(str(err).startswith("Failed to migrate instance")) else: self.fail("Exception was not raised") else: hv._MigrateInstance(clustername, instname, target, port, live, hvparams, _ping_fn=ping_fn) if self.CMD == constants.XEN_CMD_XM: expected_pings = 1 else: expected_pings = 0 self.assertEqual(ping_fn.Count(), expected_pings) def _GetNodeInfoCmd(self, fail, cmd): if cmd == [self.CMD, "info"]: if fail: return self._FailingCommand(cmd) else: output = testutils.ReadTestData("xen-xm-info-4.0.1.txt") elif cmd == [self.CMD, "list"]: if fail: self.fail("'xm list' shouldn't be called when 'xm info' failed") else: output = testutils.ReadTestData("xen-xm-list-4.0.1-four-instances.txt") else: self.fail("Unhandled command: %s" % (cmd, )) return self._SuccessCommand(output, cmd) def testGetNodeInfo(self): run_cmd = compat.partial(self._GetNodeInfoCmd, False) hv = self._GetHv(run_cmd=run_cmd) result = hv.GetNodeInfo() self.assertEqual(result["hv_version"], (4, 0)) self.assertEqual(result["memory_free"], 8004) def testGetNodeInfoFailing(self): run_cmd = compat.partial(self._GetNodeInfoCmd, True) hv = self._GetHv(run_cmd=run_cmd) self.assertTrue(hv.GetNodeInfo() is None) def _MakeTestClass(cls, cmd): """Makes a class for testing. The returned class has structure as shown in the following pseudo code: class Test{cls.__name__}{cmd}(_TestXenHypervisor, unittest.TestCase): TARGET = {cls} CMD = {cmd} HVNAME = {Hypervisor name retrieved using class} @type cls: class @param cls: Hypervisor class to be tested @type cmd: string @param cmd: Hypervisor command @rtype: tuple @return: Class name and class object (not instance) """ name = "Test%sCmd%s" % (cls.__name__, cmd.title()) bases = (_TestXenHypervisor, unittest.TestCase) hvname = HVCLASS_TO_HVNAME[cls] return (name, type(name, bases, dict(TARGET=cls, CMD=cmd, HVNAME=hvname))) # Create test classes programmatically instead of manually to reduce the risk # of forgetting some combinations for cls in [hv_xen.XenPvmHypervisor, hv_xen.XenHvmHypervisor]: for cmd in constants.KNOWN_XEN_COMMANDS: (name, testcls) = _MakeTestClass(cls, cmd) assert name not in locals() locals()[name] = testcls if __name__ == "__main__": testutils.GanetiTestProgram()
drnextgis/QGIS
refs/heads/master
python/ext-libs/pygments/lexers/modula2.py
23
# -*- coding: utf-8 -*- """ pygments.lexers.modula2 ~~~~~~~~~~~~~~~~~~~~~~~ Multi-Dialect Lexer for Modula-2. :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include from pygments.util import get_bool_opt, get_list_opt from pygments.token import Text, Comment, Operator, Keyword, Name, \ String, Number, Punctuation, Error __all__ = ['Modula2Lexer'] # Multi-Dialect Modula-2 Lexer class Modula2Lexer(RegexLexer): """ For `Modula-2 <http://www.modula2.org/>`_ source code. The Modula-2 lexer supports several dialects. By default, it operates in fallback mode, recognising the *combined* literals, punctuation symbols and operators of all supported dialects, and the *combined* reserved words and builtins of PIM Modula-2, ISO Modula-2 and Modula-2 R10, while not differentiating between library defined identifiers. To select a specific dialect, a dialect option may be passed or a dialect tag may be embedded into a source file. Dialect Options: `m2pim` Select PIM Modula-2 dialect. `m2iso` Select ISO Modula-2 dialect. `m2r10` Select Modula-2 R10 dialect. `objm2` Select Objective Modula-2 dialect. The PIM and ISO dialect options may be qualified with a language extension. Language Extensions: `+aglet` Select Aglet Modula-2 extensions, available with m2iso. `+gm2` Select GNU Modula-2 extensions, available with m2pim. `+p1` Select p1 Modula-2 extensions, available with m2iso. `+xds` Select XDS Modula-2 extensions, available with m2iso. Passing a Dialect Option via Unix Commandline Interface Dialect options may be passed to the lexer using the `dialect` key. Only one such option should be passed. If multiple dialect options are passed, the first valid option is used, any subsequent options are ignored. Examples: `$ pygmentize -O full,dialect=m2iso -f html -o /path/to/output /path/to/input` Use ISO dialect to render input to HTML output `$ pygmentize -O full,dialect=m2iso+p1 -f rtf -o /path/to/output /path/to/input` Use ISO dialect with p1 extensions to render input to RTF output Embedding a Dialect Option within a source file A dialect option may be embedded in a source file in form of a dialect tag, a specially formatted comment that specifies a dialect option. Dialect Tag EBNF:: dialectTag : OpeningCommentDelim Prefix dialectOption ClosingCommentDelim ; dialectOption : 'm2pim' | 'm2iso' | 'm2r10' | 'objm2' | 'm2iso+aglet' | 'm2pim+gm2' | 'm2iso+p1' | 'm2iso+xds' ; Prefix : '!' ; OpeningCommentDelim : '(*' ; ClosingCommentDelim : '*)' ; No whitespace is permitted between the tokens of a dialect tag. In the event that a source file contains multiple dialect tags, the first tag that contains a valid dialect option will be used and any subsequent dialect tags will be ignored. Ideally, a dialect tag should be placed at the beginning of a source file. An embedded dialect tag overrides a dialect option set via command line. Examples: ``(*!m2r10*) DEFINITION MODULE Foobar; ...`` Use Modula2 R10 dialect to render this source file. ``(*!m2pim+gm2*) DEFINITION MODULE Bazbam; ...`` Use PIM dialect with GNU extensions to render this source file. Algol Publication Mode: In Algol publication mode, source text is rendered for publication of algorithms in scientific papers and academic texts, following the format of the Revised Algol-60 Language Report. It is activated by passing one of two corresponding styles as an option: `algol` render reserved words lowercase underline boldface and builtins lowercase boldface italic `algol_nu` render reserved words lowercase boldface (no underlining) and builtins lowercase boldface italic The lexer automatically performs the required lowercase conversion when this mode is activated. Example: ``$ pygmentize -O full,style=algol -f latex -o /path/to/output /path/to/input`` Render input file in Algol publication mode to LaTeX output. Rendering Mode of First Class ADT Identifiers: The rendering of standard library first class ADT identifiers is controlled by option flag "treat_stdlib_adts_as_builtins". When this option is turned on, standard library ADT identifiers are rendered as builtins. When it is turned off, they are rendered as ordinary library identifiers. `treat_stdlib_adts_as_builtins` (default: On) The option is useful for dialects that support ADTs as first class objects and provide ADTs in the standard library that would otherwise be built-in. At present, only Modula-2 R10 supports library ADTs as first class objects and therefore, no ADT identifiers are defined for any other dialects. Example: ``$ pygmentize -O full,dialect=m2r10,treat_stdlib_adts_as_builtins=Off ...`` Render standard library ADTs as ordinary library types. .. versionadded:: 1.3 .. versionchanged:: 2.1 Added multi-dialect support. """ name = 'Modula-2' aliases = ['modula2', 'm2'] filenames = ['*.def', '*.mod'] mimetypes = ['text/x-modula2'] flags = re.MULTILINE | re.DOTALL tokens = { 'whitespace': [ (r'\n+', Text), # blank lines (r'\s+', Text), # whitespace ], 'dialecttags': [ # PIM Dialect Tag (r'\(\*!m2pim\*\)', Comment.Special), # ISO Dialect Tag (r'\(\*!m2iso\*\)', Comment.Special), # M2R10 Dialect Tag (r'\(\*!m2r10\*\)', Comment.Special), # ObjM2 Dialect Tag (r'\(\*!objm2\*\)', Comment.Special), # Aglet Extensions Dialect Tag (r'\(\*!m2iso\+aglet\*\)', Comment.Special), # GNU Extensions Dialect Tag (r'\(\*!m2pim\+gm2\*\)', Comment.Special), # p1 Extensions Dialect Tag (r'\(\*!m2iso\+p1\*\)', Comment.Special), # XDS Extensions Dialect Tag (r'\(\*!m2iso\+xds\*\)', Comment.Special), ], 'identifiers': [ (r'([a-zA-Z_$][\w$]*)', Name), ], 'prefixed_number_literals': [ # # Base-2, whole number (r'0b[01]+(\'[01]+)*', Number.Bin), # # Base-16, whole number (r'0[ux][0-9A-F]+(\'[0-9A-F]+)*', Number.Hex), ], 'plain_number_literals': [ # # Base-10, real number with exponent (r'[0-9]+(\'[0-9]+)*' # integral part r'\.[0-9]+(\'[0-9]+)*' # fractional part r'[eE][+-]?[0-9]+(\'[0-9]+)*', # exponent Number.Float), # # Base-10, real number without exponent (r'[0-9]+(\'[0-9]+)*' # integral part r'\.[0-9]+(\'[0-9]+)*', # fractional part Number.Float), # # Base-10, whole number (r'[0-9]+(\'[0-9]+)*', Number.Integer), ], 'suffixed_number_literals': [ # # Base-8, whole number (r'[0-7]+B', Number.Oct), # # Base-8, character code (r'[0-7]+C', Number.Oct), # # Base-16, number (r'[0-9A-F]+H', Number.Hex), ], 'string_literals': [ (r"'(\\\\|\\'|[^'])*'", String), # single quoted string (r'"(\\\\|\\"|[^"])*"', String), # double quoted string ], 'digraph_operators': [ # Dot Product Operator (r'\*\.', Operator), # Array Concatenation Operator (r'\+>', Operator), # M2R10 + ObjM2 # Inequality Operator (r'<>', Operator), # ISO + PIM # Less-Or-Equal, Subset (r'<=', Operator), # Greater-Or-Equal, Superset (r'>=', Operator), # Identity Operator (r'==', Operator), # M2R10 + ObjM2 # Type Conversion Operator (r'::', Operator), # M2R10 + ObjM2 # Assignment Symbol (r':=', Operator), # Postfix Increment Mutator (r'\+\+', Operator), # M2R10 + ObjM2 # Postfix Decrement Mutator (r'--', Operator), # M2R10 + ObjM2 ], 'unigraph_operators': [ # Arithmetic Operators (r'[+-]', Operator), (r'[*/]', Operator), # ISO 80000-2 compliant Set Difference Operator (r'\\', Operator), # M2R10 + ObjM2 # Relational Operators (r'[=#<>]', Operator), # Dereferencing Operator (r'\^', Operator), # Dereferencing Operator Synonym (r'@', Operator), # ISO # Logical AND Operator Synonym (r'&', Operator), # PIM + ISO # Logical NOT Operator Synonym (r'~', Operator), # PIM + ISO # Smalltalk Message Prefix (r'`', Operator), # ObjM2 ], 'digraph_punctuation': [ # Range Constructor (r'\.\.', Punctuation), # Opening Chevron Bracket (r'<<', Punctuation), # M2R10 + ISO # Closing Chevron Bracket (r'>>', Punctuation), # M2R10 + ISO # Blueprint Punctuation (r'->', Punctuation), # M2R10 + ISO # Distinguish |# and # in M2 R10 (r'\|#', Punctuation), # Distinguish ## and # in M2 R10 (r'##', Punctuation), # Distinguish |* and * in M2 R10 (r'\|\*', Punctuation), ], 'unigraph_punctuation': [ # Common Punctuation (r'[\(\)\[\]{},.:;\|]', Punctuation), # Case Label Separator Synonym (r'!', Punctuation), # ISO # Blueprint Punctuation (r'\?', Punctuation), # M2R10 + ObjM2 ], 'comments': [ # Single Line Comment (r'^//.*?\n', Comment.Single), # M2R10 + ObjM2 # Block Comment (r'\(\*([^$].*?)\*\)', Comment.Multiline), # Template Block Comment (r'/\*(.*?)\*/', Comment.Multiline), # M2R10 + ObjM2 ], 'pragmas': [ # ISO Style Pragmas (r'<\*.*?\*>', Comment.Preproc), # ISO, M2R10 + ObjM2 # Pascal Style Pragmas (r'\(\*\$.*?\*\)', Comment.Preproc), # PIM ], 'root': [ include('whitespace'), include('dialecttags'), include('pragmas'), include('comments'), include('identifiers'), include('suffixed_number_literals'), # PIM + ISO include('prefixed_number_literals'), # M2R10 + ObjM2 include('plain_number_literals'), include('string_literals'), include('digraph_punctuation'), include('digraph_operators'), include('unigraph_punctuation'), include('unigraph_operators'), ] } # C o m m o n D a t a s e t s # Common Reserved Words Dataset common_reserved_words = ( # 37 common reserved words 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV', 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'FOR', 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD', 'MODULE', 'NOT', 'OF', 'OR', 'POINTER', 'PROCEDURE', 'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE', ) # Common Builtins Dataset common_builtins = ( # 16 common builtins 'ABS', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'FALSE', 'INTEGER', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NIL', 'ODD', 'ORD', 'REAL', 'TRUE', ) # Common Pseudo-Module Builtins Dataset common_pseudo_builtins = ( # 4 common pseudo builtins 'ADDRESS', 'BYTE', 'WORD', 'ADR' ) # P I M M o d u l a - 2 D a t a s e t s # Lexemes to Mark as Error Tokens for PIM Modula-2 pim_lexemes_to_reject = ( '!', '`', '@', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', '+>', '->', '<<', '>>', '|#', '##', ) # PIM Modula-2 Additional Reserved Words Dataset pim_additional_reserved_words = ( # 3 additional reserved words 'EXPORT', 'QUALIFIED', 'WITH', ) # PIM Modula-2 Additional Builtins Dataset pim_additional_builtins = ( # 16 additional builtins 'BITSET', 'CAP', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', 'HALT', 'HIGH', 'INC', 'INCL', 'NEW', 'NIL', 'PROC', 'SIZE', 'TRUNC', 'VAL', ) # PIM Modula-2 Additional Pseudo-Module Builtins Dataset pim_additional_pseudo_builtins = ( # 5 additional pseudo builtins 'SYSTEM', 'PROCESS', 'TSIZE', 'NEWPROCESS', 'TRANSFER', ) # I S O M o d u l a - 2 D a t a s e t s # Lexemes to Mark as Error Tokens for ISO Modula-2 iso_lexemes_to_reject = ( '`', '$', '%', '?', '\\', '==', '++', '--', '::', '*.', '+>', '->', '<<', '>>', '|#', '##', ) # ISO Modula-2 Additional Reserved Words Dataset iso_additional_reserved_words = ( # 9 additional reserved words (ISO 10514-1) 'EXCEPT', 'EXPORT', 'FINALLY', 'FORWARD', 'PACKEDSET', 'QUALIFIED', 'REM', 'RETRY', 'WITH', # 10 additional reserved words (ISO 10514-2 & ISO 10514-3) 'ABSTRACT', 'AS', 'CLASS', 'GUARD', 'INHERIT', 'OVERRIDE', 'READONLY', 'REVEAL', 'TRACED', 'UNSAFEGUARDED', ) # ISO Modula-2 Additional Builtins Dataset iso_additional_builtins = ( # 26 additional builtins (ISO 10514-1) 'BITSET', 'CAP', 'CMPLX', 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FLOAT', 'HALT', 'HIGH', 'IM', 'INC', 'INCL', 'INT', 'INTERRUPTIBLE', 'LENGTH', 'LFLOAT', 'LONGCOMPLEX', 'NEW', 'PROC', 'PROTECTION', 'RE', 'SIZE', 'TRUNC', 'UNINTERRUBTIBLE', 'VAL', # 5 additional builtins (ISO 10514-2 & ISO 10514-3) 'CREATE', 'DESTROY', 'EMPTY', 'ISMEMBER', 'SELF', ) # ISO Modula-2 Additional Pseudo-Module Builtins Dataset iso_additional_pseudo_builtins = ( # 14 additional builtins (SYSTEM) 'SYSTEM', 'BITSPERLOC', 'LOCSPERBYTE', 'LOCSPERWORD', 'LOC', 'ADDADR', 'SUBADR', 'DIFADR', 'MAKEADR', 'ADR', 'ROTATE', 'SHIFT', 'CAST', 'TSIZE', # 13 additional builtins (COROUTINES) 'COROUTINES', 'ATTACH', 'COROUTINE', 'CURRENT', 'DETACH', 'HANDLER', 'INTERRUPTSOURCE', 'IOTRANSFER', 'IsATTACHED', 'LISTEN', 'NEWCOROUTINE', 'PROT', 'TRANSFER', # 9 additional builtins (EXCEPTIONS) 'EXCEPTIONS', 'AllocateSource', 'CurrentNumber', 'ExceptionNumber', 'ExceptionSource', 'GetMessage', 'IsCurrentSource', 'IsExceptionalExecution', 'RAISE', # 3 additional builtins (TERMINATION) 'TERMINATION', 'IsTerminating', 'HasHalted', # 4 additional builtins (M2EXCEPTION) 'M2EXCEPTION', 'M2Exceptions', 'M2Exception', 'IsM2Exception', 'indexException', 'rangeException', 'caseSelectException', 'invalidLocation', 'functionException', 'wholeValueException', 'wholeDivException', 'realValueException', 'realDivException', 'complexValueException', 'complexDivException', 'protException', 'sysException', 'coException', 'exException', ) # M o d u l a - 2 R 1 0 D a t a s e t s # Lexemes to Mark as Error Tokens for Modula-2 R10 m2r10_lexemes_to_reject = ( '!', '`', '@', '$', '%', '&', '<>', ) # Modula-2 R10 reserved words in addition to the common set m2r10_additional_reserved_words = ( # 12 additional reserved words 'ALIAS', 'ARGLIST', 'BLUEPRINT', 'COPY', 'GENLIB', 'INDETERMINATE', 'NEW', 'NONE', 'OPAQUE', 'REFERENTIAL', 'RELEASE', 'RETAIN', # 2 additional reserved words with symbolic assembly option 'ASM', 'REG', ) # Modula-2 R10 builtins in addition to the common set m2r10_additional_builtins = ( # 26 additional builtins 'CARDINAL', 'COUNT', 'EMPTY', 'EXISTS', 'INSERT', 'LENGTH', 'LONGCARD', 'OCTET', 'PTR', 'PRED', 'READ', 'READNEW', 'REMOVE', 'RETRIEVE', 'SORT', 'STORE', 'SUBSET', 'SUCC', 'TLIMIT', 'TMAX', 'TMIN', 'TRUE', 'TSIZE', 'UNICHAR', 'WRITE', 'WRITEF', ) # Modula-2 R10 Additional Pseudo-Module Builtins Dataset m2r10_additional_pseudo_builtins = ( # 13 additional builtins (TPROPERTIES) 'TPROPERTIES', 'PROPERTY', 'LITERAL', 'TPROPERTY', 'TLITERAL', 'TBUILTIN', 'TDYN', 'TREFC', 'TNIL', 'TBASE', 'TPRECISION', 'TMAXEXP', 'TMINEXP', # 4 additional builtins (CONVERSION) 'CONVERSION', 'TSXFSIZE', 'SXF', 'VAL', # 35 additional builtins (UNSAFE) 'UNSAFE', 'CAST', 'INTRINSIC', 'AVAIL', 'ADD', 'SUB', 'ADDC', 'SUBC', 'FETCHADD', 'FETCHSUB', 'SHL', 'SHR', 'ASHR', 'ROTL', 'ROTR', 'ROTLC', 'ROTRC', 'BWNOT', 'BWAND', 'BWOR', 'BWXOR', 'BWNAND', 'BWNOR', 'SETBIT', 'TESTBIT', 'LSBIT', 'MSBIT', 'CSBITS', 'BAIL', 'HALT', 'TODO', 'FFI', 'ADDR', 'VARGLIST', 'VARGC', # 11 additional builtins (ATOMIC) 'ATOMIC', 'INTRINSIC', 'AVAIL', 'SWAP', 'CAS', 'INC', 'DEC', 'BWAND', 'BWNAND', 'BWOR', 'BWXOR', # 7 additional builtins (COMPILER) 'COMPILER', 'DEBUG', 'MODNAME', 'PROCNAME', 'LINENUM', 'DEFAULT', 'HASH', # 5 additional builtins (ASSEMBLER) 'ASSEMBLER', 'REGISTER', 'SETREG', 'GETREG', 'CODE', ) # O b j e c t i v e M o d u l a - 2 D a t a s e t s # Lexemes to Mark as Error Tokens for Objective Modula-2 objm2_lexemes_to_reject = ( '!', '$', '%', '&', '<>', ) # Objective Modula-2 Extensions # reserved words in addition to Modula-2 R10 objm2_additional_reserved_words = ( # 16 additional reserved words 'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD', 'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC', 'SUPER', 'TRY', ) # Objective Modula-2 Extensions # builtins in addition to Modula-2 R10 objm2_additional_builtins = ( # 3 additional builtins 'OBJECT', 'NO', 'YES', ) # Objective Modula-2 Extensions # pseudo-module builtins in addition to Modula-2 R10 objm2_additional_pseudo_builtins = ( # None ) # A g l e t M o d u l a - 2 D a t a s e t s # Aglet Extensions # reserved words in addition to ISO Modula-2 aglet_additional_reserved_words = ( # None ) # Aglet Extensions # builtins in addition to ISO Modula-2 aglet_additional_builtins = ( # 9 additional builtins 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16', 'CARDINAL32', 'INTEGER8', 'INTEGER16', 'INTEGER32', ) # Aglet Modula-2 Extensions # pseudo-module builtins in addition to ISO Modula-2 aglet_additional_pseudo_builtins = ( # None ) # G N U M o d u l a - 2 D a t a s e t s # GNU Extensions # reserved words in addition to PIM Modula-2 gm2_additional_reserved_words = ( # 10 additional reserved words 'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__', '__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE', ) # GNU Extensions # builtins in addition to PIM Modula-2 gm2_additional_builtins = ( # 21 additional builtins 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16', 'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96', 'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64', 'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW', ) # GNU Extensions # pseudo-module builtins in addition to PIM Modula-2 gm2_additional_pseudo_builtins = ( # None ) # p 1 M o d u l a - 2 D a t a s e t s # p1 Extensions # reserved words in addition to ISO Modula-2 p1_additional_reserved_words = ( # None ) # p1 Extensions # builtins in addition to ISO Modula-2 p1_additional_builtins = ( # None ) # p1 Modula-2 Extensions # pseudo-module builtins in addition to ISO Modula-2 p1_additional_pseudo_builtins = ( # 1 additional builtin 'BCD', ) # X D S M o d u l a - 2 D a t a s e t s # XDS Extensions # reserved words in addition to ISO Modula-2 xds_additional_reserved_words = ( # 1 additional reserved word 'SEQ', ) # XDS Extensions # builtins in addition to ISO Modula-2 xds_additional_builtins = ( # 9 additional builtins 'ASH', 'ASSERT', 'DIFFADR_TYPE', 'ENTIER', 'INDEX', 'LEN', 'LONGCARD', 'SHORTCARD', 'SHORTINT', ) # XDS Modula-2 Extensions # pseudo-module builtins in addition to ISO Modula-2 xds_additional_pseudo_builtins = ( # 22 additional builtins (SYSTEM) 'PROCESS', 'NEWPROCESS', 'BOOL8', 'BOOL16', 'BOOL32', 'CARD8', 'CARD16', 'CARD32', 'INT8', 'INT16', 'INT32', 'REF', 'MOVE', 'FILL', 'GET', 'PUT', 'CC', 'int', 'unsigned', 'size_t', 'void' # 3 additional builtins (COMPILER) 'COMPILER', 'OPTION', 'EQUATION' ) # P I M S t a n d a r d L i b r a r y D a t a s e t s # PIM Modula-2 Standard Library Modules Dataset pim_stdlib_module_identifiers = ( 'Terminal', 'FileSystem', 'InOut', 'RealInOut', 'MathLib0', 'Storage', ) # PIM Modula-2 Standard Library Types Dataset pim_stdlib_type_identifiers = ( 'Flag', 'FlagSet', 'Response', 'Command', 'Lock', 'Permission', 'MediumType', 'File', 'FileProc', 'DirectoryProc', 'FileCommand', 'DirectoryCommand', ) # PIM Modula-2 Standard Library Procedures Dataset pim_stdlib_proc_identifiers = ( 'Read', 'BusyRead', 'ReadAgain', 'Write', 'WriteString', 'WriteLn', 'Create', 'Lookup', 'Close', 'Delete', 'Rename', 'SetRead', 'SetWrite', 'SetModify', 'SetOpen', 'Doio', 'SetPos', 'GetPos', 'Length', 'Reset', 'Again', 'ReadWord', 'WriteWord', 'ReadChar', 'WriteChar', 'CreateMedium', 'DeleteMedium', 'AssignName', 'DeassignName', 'ReadMedium', 'LookupMedium', 'OpenInput', 'OpenOutput', 'CloseInput', 'CloseOutput', 'ReadString', 'ReadInt', 'ReadCard', 'ReadWrd', 'WriteInt', 'WriteCard', 'WriteOct', 'WriteHex', 'WriteWrd', 'ReadReal', 'WriteReal', 'WriteFixPt', 'WriteRealOct', 'sqrt', 'exp', 'ln', 'sin', 'cos', 'arctan', 'entier', 'ALLOCATE', 'DEALLOCATE', ) # PIM Modula-2 Standard Library Variables Dataset pim_stdlib_var_identifiers = ( 'Done', 'termCH', 'in', 'out' ) # PIM Modula-2 Standard Library Constants Dataset pim_stdlib_const_identifiers = ( 'EOL', ) # I S O S t a n d a r d L i b r a r y D a t a s e t s # ISO Modula-2 Standard Library Modules Dataset iso_stdlib_module_identifiers = ( # TO DO ) # ISO Modula-2 Standard Library Types Dataset iso_stdlib_type_identifiers = ( # TO DO ) # ISO Modula-2 Standard Library Procedures Dataset iso_stdlib_proc_identifiers = ( # TO DO ) # ISO Modula-2 Standard Library Variables Dataset iso_stdlib_var_identifiers = ( # TO DO ) # ISO Modula-2 Standard Library Constants Dataset iso_stdlib_const_identifiers = ( # TO DO ) # M 2 R 1 0 S t a n d a r d L i b r a r y D a t a s e t s # Modula-2 R10 Standard Library ADTs Dataset m2r10_stdlib_adt_identifiers = ( 'BCD', 'LONGBCD', 'BITSET', 'SHORTBITSET', 'LONGBITSET', 'LONGLONGBITSET', 'COMPLEX', 'LONGCOMPLEX', 'SHORTCARD', 'LONGLONGCARD', 'SHORTINT', 'LONGLONGINT', 'POSINT', 'SHORTPOSINT', 'LONGPOSINT', 'LONGLONGPOSINT', 'BITSET8', 'BITSET16', 'BITSET32', 'BITSET64', 'BITSET128', 'BS8', 'BS16', 'BS32', 'BS64', 'BS128', 'CARDINAL8', 'CARDINAL16', 'CARDINAL32', 'CARDINAL64', 'CARDINAL128', 'CARD8', 'CARD16', 'CARD32', 'CARD64', 'CARD128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64', 'INTEGER128', 'INT8', 'INT16', 'INT32', 'INT64', 'INT128', 'STRING', 'UNISTRING', ) # Modula-2 R10 Standard Library Blueprints Dataset m2r10_stdlib_blueprint_identifiers = ( 'ProtoRoot', 'ProtoComputational', 'ProtoNumeric', 'ProtoScalar', 'ProtoNonScalar', 'ProtoCardinal', 'ProtoInteger', 'ProtoReal', 'ProtoComplex', 'ProtoVector', 'ProtoTuple', 'ProtoCompArray', 'ProtoCollection', 'ProtoStaticArray', 'ProtoStaticSet', 'ProtoStaticString', 'ProtoArray', 'ProtoString', 'ProtoSet', 'ProtoMultiSet', 'ProtoDictionary', 'ProtoMultiDict', 'ProtoExtension', 'ProtoIO', 'ProtoCardMath', 'ProtoIntMath', 'ProtoRealMath', ) # Modula-2 R10 Standard Library Modules Dataset m2r10_stdlib_module_identifiers = ( 'ASCII', 'BooleanIO', 'CharIO', 'UnicharIO', 'OctetIO', 'CardinalIO', 'LongCardIO', 'IntegerIO', 'LongIntIO', 'RealIO', 'LongRealIO', 'BCDIO', 'LongBCDIO', 'CardMath', 'LongCardMath', 'IntMath', 'LongIntMath', 'RealMath', 'LongRealMath', 'BCDMath', 'LongBCDMath', 'FileIO', 'FileSystem', 'Storage', 'IOSupport', ) # Modula-2 R10 Standard Library Types Dataset m2r10_stdlib_type_identifiers = ( 'File', 'Status', # TO BE COMPLETED ) # Modula-2 R10 Standard Library Procedures Dataset m2r10_stdlib_proc_identifiers = ( 'ALLOCATE', 'DEALLOCATE', 'SIZE', # TO BE COMPLETED ) # Modula-2 R10 Standard Library Variables Dataset m2r10_stdlib_var_identifiers = ( 'stdIn', 'stdOut', 'stdErr', ) # Modula-2 R10 Standard Library Constants Dataset m2r10_stdlib_const_identifiers = ( 'pi', 'tau', ) # D i a l e c t s # Dialect modes dialects = ( 'unknown', 'm2pim', 'm2iso', 'm2r10', 'objm2', 'm2iso+aglet', 'm2pim+gm2', 'm2iso+p1', 'm2iso+xds', ) # D a t a b a s e s # Lexemes to Mark as Errors Database lexemes_to_reject_db = { # Lexemes to reject for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Lexemes to reject for PIM Modula-2 'm2pim': ( pim_lexemes_to_reject, ), # Lexemes to reject for ISO Modula-2 'm2iso': ( iso_lexemes_to_reject, ), # Lexemes to reject for Modula-2 R10 'm2r10': ( m2r10_lexemes_to_reject, ), # Lexemes to reject for Objective Modula-2 'objm2': ( objm2_lexemes_to_reject, ), # Lexemes to reject for Aglet Modula-2 'm2iso+aglet': ( iso_lexemes_to_reject, ), # Lexemes to reject for GNU Modula-2 'm2pim+gm2': ( pim_lexemes_to_reject, ), # Lexemes to reject for p1 Modula-2 'm2iso+p1': ( iso_lexemes_to_reject, ), # Lexemes to reject for XDS Modula-2 'm2iso+xds': ( iso_lexemes_to_reject, ), } # Reserved Words Database reserved_words_db = { # Reserved words for unknown dialect 'unknown': ( common_reserved_words, pim_additional_reserved_words, iso_additional_reserved_words, m2r10_additional_reserved_words, ), # Reserved words for PIM Modula-2 'm2pim': ( common_reserved_words, pim_additional_reserved_words, ), # Reserved words for Modula-2 R10 'm2iso': ( common_reserved_words, iso_additional_reserved_words, ), # Reserved words for ISO Modula-2 'm2r10': ( common_reserved_words, m2r10_additional_reserved_words, ), # Reserved words for Objective Modula-2 'objm2': ( common_reserved_words, m2r10_additional_reserved_words, objm2_additional_reserved_words, ), # Reserved words for Aglet Modula-2 Extensions 'm2iso+aglet': ( common_reserved_words, iso_additional_reserved_words, aglet_additional_reserved_words, ), # Reserved words for GNU Modula-2 Extensions 'm2pim+gm2': ( common_reserved_words, pim_additional_reserved_words, gm2_additional_reserved_words, ), # Reserved words for p1 Modula-2 Extensions 'm2iso+p1': ( common_reserved_words, iso_additional_reserved_words, p1_additional_reserved_words, ), # Reserved words for XDS Modula-2 Extensions 'm2iso+xds': ( common_reserved_words, iso_additional_reserved_words, xds_additional_reserved_words, ), } # Builtins Database builtins_db = { # Builtins for unknown dialect 'unknown': ( common_builtins, pim_additional_builtins, iso_additional_builtins, m2r10_additional_builtins, ), # Builtins for PIM Modula-2 'm2pim': ( common_builtins, pim_additional_builtins, ), # Builtins for ISO Modula-2 'm2iso': ( common_builtins, iso_additional_builtins, ), # Builtins for ISO Modula-2 'm2r10': ( common_builtins, m2r10_additional_builtins, ), # Builtins for Objective Modula-2 'objm2': ( common_builtins, m2r10_additional_builtins, objm2_additional_builtins, ), # Builtins for Aglet Modula-2 Extensions 'm2iso+aglet': ( common_builtins, iso_additional_builtins, aglet_additional_builtins, ), # Builtins for GNU Modula-2 Extensions 'm2pim+gm2': ( common_builtins, pim_additional_builtins, gm2_additional_builtins, ), # Builtins for p1 Modula-2 Extensions 'm2iso+p1': ( common_builtins, iso_additional_builtins, p1_additional_builtins, ), # Builtins for XDS Modula-2 Extensions 'm2iso+xds': ( common_builtins, iso_additional_builtins, xds_additional_builtins, ), } # Pseudo-Module Builtins Database pseudo_builtins_db = { # Builtins for unknown dialect 'unknown': ( common_pseudo_builtins, pim_additional_pseudo_builtins, iso_additional_pseudo_builtins, m2r10_additional_pseudo_builtins, ), # Builtins for PIM Modula-2 'm2pim': ( common_pseudo_builtins, pim_additional_pseudo_builtins, ), # Builtins for ISO Modula-2 'm2iso': ( common_pseudo_builtins, iso_additional_pseudo_builtins, ), # Builtins for ISO Modula-2 'm2r10': ( common_pseudo_builtins, m2r10_additional_pseudo_builtins, ), # Builtins for Objective Modula-2 'objm2': ( common_pseudo_builtins, m2r10_additional_pseudo_builtins, objm2_additional_pseudo_builtins, ), # Builtins for Aglet Modula-2 Extensions 'm2iso+aglet': ( common_pseudo_builtins, iso_additional_pseudo_builtins, aglet_additional_pseudo_builtins, ), # Builtins for GNU Modula-2 Extensions 'm2pim+gm2': ( common_pseudo_builtins, pim_additional_pseudo_builtins, gm2_additional_pseudo_builtins, ), # Builtins for p1 Modula-2 Extensions 'm2iso+p1': ( common_pseudo_builtins, iso_additional_pseudo_builtins, p1_additional_pseudo_builtins, ), # Builtins for XDS Modula-2 Extensions 'm2iso+xds': ( common_pseudo_builtins, iso_additional_pseudo_builtins, xds_additional_pseudo_builtins, ), } # Standard Library ADTs Database stdlib_adts_db = { # Empty entry for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Standard Library ADTs for PIM Modula-2 'm2pim': ( # No first class library types ), # Standard Library ADTs for ISO Modula-2 'm2iso': ( # No first class library types ), # Standard Library ADTs for Modula-2 R10 'm2r10': ( m2r10_stdlib_adt_identifiers, ), # Standard Library ADTs for Objective Modula-2 'objm2': ( m2r10_stdlib_adt_identifiers, ), # Standard Library ADTs for Aglet Modula-2 'm2iso+aglet': ( # No first class library types ), # Standard Library ADTs for GNU Modula-2 'm2pim+gm2': ( # No first class library types ), # Standard Library ADTs for p1 Modula-2 'm2iso+p1': ( # No first class library types ), # Standard Library ADTs for XDS Modula-2 'm2iso+xds': ( # No first class library types ), } # Standard Library Modules Database stdlib_modules_db = { # Empty entry for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Standard Library Modules for PIM Modula-2 'm2pim': ( pim_stdlib_module_identifiers, ), # Standard Library Modules for ISO Modula-2 'm2iso': ( iso_stdlib_module_identifiers, ), # Standard Library Modules for Modula-2 R10 'm2r10': ( m2r10_stdlib_blueprint_identifiers, m2r10_stdlib_module_identifiers, m2r10_stdlib_adt_identifiers, ), # Standard Library Modules for Objective Modula-2 'objm2': ( m2r10_stdlib_blueprint_identifiers, m2r10_stdlib_module_identifiers, ), # Standard Library Modules for Aglet Modula-2 'm2iso+aglet': ( iso_stdlib_module_identifiers, ), # Standard Library Modules for GNU Modula-2 'm2pim+gm2': ( pim_stdlib_module_identifiers, ), # Standard Library Modules for p1 Modula-2 'm2iso+p1': ( iso_stdlib_module_identifiers, ), # Standard Library Modules for XDS Modula-2 'm2iso+xds': ( iso_stdlib_module_identifiers, ), } # Standard Library Types Database stdlib_types_db = { # Empty entry for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Standard Library Types for PIM Modula-2 'm2pim': ( pim_stdlib_type_identifiers, ), # Standard Library Types for ISO Modula-2 'm2iso': ( iso_stdlib_type_identifiers, ), # Standard Library Types for Modula-2 R10 'm2r10': ( m2r10_stdlib_type_identifiers, ), # Standard Library Types for Objective Modula-2 'objm2': ( m2r10_stdlib_type_identifiers, ), # Standard Library Types for Aglet Modula-2 'm2iso+aglet': ( iso_stdlib_type_identifiers, ), # Standard Library Types for GNU Modula-2 'm2pim+gm2': ( pim_stdlib_type_identifiers, ), # Standard Library Types for p1 Modula-2 'm2iso+p1': ( iso_stdlib_type_identifiers, ), # Standard Library Types for XDS Modula-2 'm2iso+xds': ( iso_stdlib_type_identifiers, ), } # Standard Library Procedures Database stdlib_procedures_db = { # Empty entry for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Standard Library Procedures for PIM Modula-2 'm2pim': ( pim_stdlib_proc_identifiers, ), # Standard Library Procedures for ISO Modula-2 'm2iso': ( iso_stdlib_proc_identifiers, ), # Standard Library Procedures for Modula-2 R10 'm2r10': ( m2r10_stdlib_proc_identifiers, ), # Standard Library Procedures for Objective Modula-2 'objm2': ( m2r10_stdlib_proc_identifiers, ), # Standard Library Procedures for Aglet Modula-2 'm2iso+aglet': ( iso_stdlib_proc_identifiers, ), # Standard Library Procedures for GNU Modula-2 'm2pim+gm2': ( pim_stdlib_proc_identifiers, ), # Standard Library Procedures for p1 Modula-2 'm2iso+p1': ( iso_stdlib_proc_identifiers, ), # Standard Library Procedures for XDS Modula-2 'm2iso+xds': ( iso_stdlib_proc_identifiers, ), } # Standard Library Variables Database stdlib_variables_db = { # Empty entry for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Standard Library Variables for PIM Modula-2 'm2pim': ( pim_stdlib_var_identifiers, ), # Standard Library Variables for ISO Modula-2 'm2iso': ( iso_stdlib_var_identifiers, ), # Standard Library Variables for Modula-2 R10 'm2r10': ( m2r10_stdlib_var_identifiers, ), # Standard Library Variables for Objective Modula-2 'objm2': ( m2r10_stdlib_var_identifiers, ), # Standard Library Variables for Aglet Modula-2 'm2iso+aglet': ( iso_stdlib_var_identifiers, ), # Standard Library Variables for GNU Modula-2 'm2pim+gm2': ( pim_stdlib_var_identifiers, ), # Standard Library Variables for p1 Modula-2 'm2iso+p1': ( iso_stdlib_var_identifiers, ), # Standard Library Variables for XDS Modula-2 'm2iso+xds': ( iso_stdlib_var_identifiers, ), } # Standard Library Constants Database stdlib_constants_db = { # Empty entry for unknown dialect 'unknown': ( # LEAVE THIS EMPTY ), # Standard Library Constants for PIM Modula-2 'm2pim': ( pim_stdlib_const_identifiers, ), # Standard Library Constants for ISO Modula-2 'm2iso': ( iso_stdlib_const_identifiers, ), # Standard Library Constants for Modula-2 R10 'm2r10': ( m2r10_stdlib_const_identifiers, ), # Standard Library Constants for Objective Modula-2 'objm2': ( m2r10_stdlib_const_identifiers, ), # Standard Library Constants for Aglet Modula-2 'm2iso+aglet': ( iso_stdlib_const_identifiers, ), # Standard Library Constants for GNU Modula-2 'm2pim+gm2': ( pim_stdlib_const_identifiers, ), # Standard Library Constants for p1 Modula-2 'm2iso+p1': ( iso_stdlib_const_identifiers, ), # Standard Library Constants for XDS Modula-2 'm2iso+xds': ( iso_stdlib_const_identifiers, ), } # M e t h o d s # initialise a lexer instance def __init__(self, **options): # # check dialect options # dialects = get_list_opt(options, 'dialect', []) # for dialect_option in dialects: if dialect_option in self.dialects[1:-1]: # valid dialect option found self.set_dialect(dialect_option) break # # Fallback Mode (DEFAULT) else: # no valid dialect option self.set_dialect('unknown') # self.dialect_set_by_tag = False # # check style options # styles = get_list_opt(options, 'style', []) # # use lowercase mode for Algol style if 'algol' in styles or 'algol_nu' in styles: self.algol_publication_mode = True else: self.algol_publication_mode = False # # Check option flags # self.treat_stdlib_adts_as_builtins = get_bool_opt( options, 'treat_stdlib_adts_as_builtins', True) # # call superclass initialiser RegexLexer.__init__(self, **options) # Set lexer to a specified dialect def set_dialect(self, dialect_id): # # if __debug__: # print 'entered set_dialect with arg: ', dialect_id # # check dialect name against known dialects if dialect_id not in self.dialects: dialect = 'unknown' # default else: dialect = dialect_id # # compose lexemes to reject set lexemes_to_reject_set = set() # add each list of reject lexemes for this dialect for list in self.lexemes_to_reject_db[dialect]: lexemes_to_reject_set.update(set(list)) # # compose reserved words set reswords_set = set() # add each list of reserved words for this dialect for list in self.reserved_words_db[dialect]: reswords_set.update(set(list)) # # compose builtins set builtins_set = set() # add each list of builtins for this dialect excluding reserved words for list in self.builtins_db[dialect]: builtins_set.update(set(list).difference(reswords_set)) # # compose pseudo-builtins set pseudo_builtins_set = set() # add each list of builtins for this dialect excluding reserved words for list in self.pseudo_builtins_db[dialect]: pseudo_builtins_set.update(set(list).difference(reswords_set)) # # compose ADTs set adts_set = set() # add each list of ADTs for this dialect excluding reserved words for list in self.stdlib_adts_db[dialect]: adts_set.update(set(list).difference(reswords_set)) # # compose modules set modules_set = set() # add each list of builtins for this dialect excluding builtins for list in self.stdlib_modules_db[dialect]: modules_set.update(set(list).difference(builtins_set)) # # compose types set types_set = set() # add each list of types for this dialect excluding builtins for list in self.stdlib_types_db[dialect]: types_set.update(set(list).difference(builtins_set)) # # compose procedures set procedures_set = set() # add each list of procedures for this dialect excluding builtins for list in self.stdlib_procedures_db[dialect]: procedures_set.update(set(list).difference(builtins_set)) # # compose variables set variables_set = set() # add each list of variables for this dialect excluding builtins for list in self.stdlib_variables_db[dialect]: variables_set.update(set(list).difference(builtins_set)) # # compose constants set constants_set = set() # add each list of constants for this dialect excluding builtins for list in self.stdlib_constants_db[dialect]: constants_set.update(set(list).difference(builtins_set)) # # update lexer state self.dialect = dialect self.lexemes_to_reject = lexemes_to_reject_set self.reserved_words = reswords_set self.builtins = builtins_set self.pseudo_builtins = pseudo_builtins_set self.adts = adts_set self.modules = modules_set self.types = types_set self.procedures = procedures_set self.variables = variables_set self.constants = constants_set # # if __debug__: # print 'exiting set_dialect' # print ' self.dialect: ', self.dialect # print ' self.lexemes_to_reject: ', self.lexemes_to_reject # print ' self.reserved_words: ', self.reserved_words # print ' self.builtins: ', self.builtins # print ' self.pseudo_builtins: ', self.pseudo_builtins # print ' self.adts: ', self.adts # print ' self.modules: ', self.modules # print ' self.types: ', self.types # print ' self.procedures: ', self.procedures # print ' self.variables: ', self.variables # print ' self.types: ', self.types # print ' self.constants: ', self.constants # Extracts a dialect name from a dialect tag comment string and checks # the extracted name against known dialects. If a match is found, the # matching name is returned, otherwise dialect id 'unknown' is returned def get_dialect_from_dialect_tag(self, dialect_tag): # # if __debug__: # print 'entered get_dialect_from_dialect_tag with arg: ', dialect_tag # # constants left_tag_delim = '(*!' right_tag_delim = '*)' left_tag_delim_len = len(left_tag_delim) right_tag_delim_len = len(right_tag_delim) indicator_start = left_tag_delim_len indicator_end = -(right_tag_delim_len) # # check comment string for dialect indicator if len(dialect_tag) > (left_tag_delim_len + right_tag_delim_len) \ and dialect_tag.startswith(left_tag_delim) \ and dialect_tag.endswith(right_tag_delim): # # if __debug__: # print 'dialect tag found' # # extract dialect indicator indicator = dialect_tag[indicator_start:indicator_end] # # if __debug__: # print 'extracted: ', indicator # # check against known dialects for index in range(1, len(self.dialects)): # # if __debug__: # print 'dialects[', index, ']: ', self.dialects[index] # if indicator == self.dialects[index]: # # if __debug__: # print 'matching dialect found' # # indicator matches known dialect return indicator else: # indicator does not match any dialect return 'unknown' # default else: # invalid indicator string return 'unknown' # default # intercept the token stream, modify token attributes and return them def get_tokens_unprocessed(self, text): for index, token, value in RegexLexer.get_tokens_unprocessed(self, text): # # check for dialect tag if dialect has not been set by tag if not self.dialect_set_by_tag and token == Comment.Special: indicated_dialect = self.get_dialect_from_dialect_tag(value) if indicated_dialect != 'unknown': # token is a dialect indicator # reset reserved words and builtins self.set_dialect(indicated_dialect) self.dialect_set_by_tag = True # # check for reserved words, predefined and stdlib identifiers if token is Name: if value in self.reserved_words: token = Keyword.Reserved if self.algol_publication_mode: value = value.lower() # elif value in self.builtins: token = Name.Builtin if self.algol_publication_mode: value = value.lower() # elif value in self.pseudo_builtins: token = Name.Builtin.Pseudo if self.algol_publication_mode: value = value.lower() # elif value in self.adts: if not self.treat_stdlib_adts_as_builtins: token = Name.Namespace else: token = Name.Builtin.Pseudo if self.algol_publication_mode: value = value.lower() # elif value in self.modules: token = Name.Namespace # elif value in self.types: token = Name.Class # elif value in self.procedures: token = Name.Function # elif value in self.variables: token = Name.Variable # elif value in self.constants: token = Name.Constant # elif token in Number: # # mark prefix number literals as error for PIM and ISO dialects if self.dialect not in ('unknown', 'm2r10', 'objm2'): if "'" in value or value[0:2] in ('0b', '0x', '0u'): token = Error # elif self.dialect in ('m2r10', 'objm2'): # mark base-8 number literals as errors for M2 R10 and ObjM2 if token is Number.Oct: token = Error # mark suffix base-16 literals as errors for M2 R10 and ObjM2 elif token is Number.Hex and 'H' in value: token = Error # mark real numbers with E as errors for M2 R10 and ObjM2 elif token is Number.Float and 'E' in value: token = Error # elif token in Comment: # # mark single line comment as error for PIM and ISO dialects if token is Comment.Single: if self.dialect not in ('unknown', 'm2r10', 'objm2'): token = Error # if token is Comment.Preproc: # mark ISO pragma as error for PIM dialects if value.startswith('<*') and \ self.dialect.startswith('m2pim'): token = Error # mark PIM pragma as comment for other dialects elif value.startswith('(*$') and \ self.dialect != 'unknown' and \ not self.dialect.startswith('m2pim'): token = Comment.Multiline # else: # token is neither Name nor Comment # # mark lexemes matching the dialect's error token set as errors if value in self.lexemes_to_reject: token = Error # # substitute lexemes when in Algol mode if self.algol_publication_mode: if value == '#': value = u'≠' elif value == '<=': value = u'≤' elif value == '>=': value = u'≥' elif value == '==': value = u'≡' elif value == '*.': value = u'•' # return result yield index, token, value