repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
tungvx/deploy
refs/heads/master
.google_appengine/lib/django_1_2/django/contrib/admindocs/utils.py
314
"Misc. utility functions/classes for admin documentation generator." import re from email.Parser import HeaderParser from email.Errors import HeaderParseError from django.utils.safestring import mark_safe from django.core.urlresolvers import reverse from django.utils.encoding import smart_str try: import docutils.core import docutils.nodes import docutils.parsers.rst.roles except ImportError: docutils_is_available = False else: docutils_is_available = True def trim_docstring(docstring): """ Uniformly trims leading/trailing whitespace from docstrings. Based on http://www.python.org/peps/pep-0257.html#handling-docstring-indentation """ if not docstring or not docstring.strip(): return '' # Convert tabs to spaces and split into lines lines = docstring.expandtabs().splitlines() indent = min([len(line) - len(line.lstrip()) for line in lines if line.lstrip()]) trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]] return "\n".join(trimmed).strip() def parse_docstring(docstring): """ Parse out the parts of a docstring. Returns (title, body, metadata). """ docstring = trim_docstring(docstring) parts = re.split(r'\n{2,}', docstring) title = parts[0] if len(parts) == 1: body = '' metadata = {} else: parser = HeaderParser() try: metadata = parser.parsestr(parts[-1]) except HeaderParseError: metadata = {} body = "\n\n".join(parts[1:]) else: metadata = dict(metadata.items()) if metadata: body = "\n\n".join(parts[1:-1]) else: body = "\n\n".join(parts[1:]) return title, body, metadata def parse_rst(text, default_reference_context, thing_being_parsed=None): """ Convert the string from reST to an XHTML fragment. """ overrides = { 'doctitle_xform' : True, 'inital_header_level' : 3, "default_reference_context" : default_reference_context, "link_base" : reverse('django-admindocs-docroot').rstrip('/') } if thing_being_parsed: thing_being_parsed = smart_str("<%s>" % thing_being_parsed) parts = docutils.core.publish_parts(text, source_path=thing_being_parsed, destination_path=None, writer_name='html', settings_overrides=overrides) return mark_safe(parts['fragment']) # # reST roles # ROLES = { 'model' : '%s/models/%s/', 'view' : '%s/views/%s/', 'template' : '%s/templates/%s/', 'filter' : '%s/filters/#%s', 'tag' : '%s/tags/#%s', } def create_reference_role(rolename, urlbase): def _role(name, rawtext, text, lineno, inliner, options=None, content=None): if options is None: options = {} if content is None: content = [] node = docutils.nodes.reference(rawtext, text, refuri=(urlbase % (inliner.document.settings.link_base, text.lower())), **options) return [node], [] docutils.parsers.rst.roles.register_canonical_role(rolename, _role) def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None): if options is None: options = {} if content is None: content = [] context = inliner.document.settings.default_reference_context node = docutils.nodes.reference(rawtext, text, refuri=(ROLES[context] % (inliner.document.settings.link_base, text.lower())), **options) return [node], [] if docutils_is_available: docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role) docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE = 'cmsreference' for name, urlbase in ROLES.items(): create_reference_role(name, urlbase)
anryko/ansible
refs/heads/devel
lib/ansible/modules/packaging/os/homebrew.py
9
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Andrew Dunham <andrew@du.nham.ca> # (c) 2013, Daniel Jaouen <dcj24@cornell.edu> # (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com> # # Based on macports (Jimmy Tang <jcftang@gmail.com>) # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: homebrew author: - "Indrajit Raychaudhuri (@indrajitr)" - "Daniel Jaouen (@danieljaouen)" - "Andrew Dunham (@andrew-d)" requirements: - "python >= 2.6" - homebrew must already be installed on the target system short_description: Package manager for Homebrew description: - Manages Homebrew packages version_added: "1.1" options: name: description: - list of names of packages to install/remove aliases: ['pkg', 'package', 'formula'] type: list elements: str path: description: - "A ':' separated list of paths to search for 'brew' executable. Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system." default: '/usr/local/bin' state: description: - state of the package choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'unlinked' ] default: present update_homebrew: description: - update homebrew itself first type: bool default: 'no' aliases: ['update-brew'] upgrade_all: description: - upgrade all homebrew packages type: bool default: 'no' aliases: ['upgrade'] install_options: description: - options flags to install a package aliases: ['options'] version_added: "1.4" notes: - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option. ''' EXAMPLES = ''' # Install formula foo with 'brew' in default path (C(/usr/local/bin)) - homebrew: name: foo state: present # Install formula foo with 'brew' in alternate path C(/my/other/location/bin) - homebrew: name: foo path: /my/other/location/bin state: present # Update homebrew first and install formula foo with 'brew' in default path - homebrew: name: foo state: present update_homebrew: yes # Update homebrew first and upgrade formula foo to latest available with 'brew' in default path - homebrew: name: foo state: latest update_homebrew: yes # Update homebrew and upgrade all packages - homebrew: update_homebrew: yes upgrade_all: yes # Miscellaneous other examples - homebrew: name: foo state: head - homebrew: name: foo state: linked - homebrew: name: foo state: absent - homebrew: name: foo,bar state: absent - homebrew: name: foo state: present install_options: with-baz,enable-debug ''' import os.path import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import iteritems, string_types # exceptions -------------------------------------------------------------- {{{ class HomebrewException(Exception): pass # /exceptions ------------------------------------------------------------- }}} # utils ------------------------------------------------------------------- {{{ def _create_regex_group(s): lines = (line.strip() for line in s.split('\n') if line.strip()) chars = filter(None, (line.split('#')[0].strip() for line in lines)) group = r'[^' + r''.join(chars) + r']' return re.compile(group) # /utils ------------------------------------------------------------------ }}} class Homebrew(object): '''A class to manage Homebrew packages.''' # class regexes ------------------------------------------------ {{{ VALID_PATH_CHARS = r''' \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) \s # spaces : # colons {sep} # the OS-specific path separator . # dots - # dashes '''.format(sep=os.path.sep) VALID_BREW_PATH_CHARS = r''' \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) \s # spaces {sep} # the OS-specific path separator . # dots - # dashes '''.format(sep=os.path.sep) VALID_PACKAGE_CHARS = r''' \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) . # dots / # slash (for taps) \+ # plusses - # dashes : # colons (for URLs) @ # at-sign ''' INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS) INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS) INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS) # /class regexes ----------------------------------------------- }}} # class validations -------------------------------------------- {{{ @classmethod def valid_path(cls, path): ''' `path` must be one of: - list of paths - a string containing only: - alphanumeric characters - dashes - dots - spaces - colons - os.path.sep ''' if isinstance(path, string_types): return not cls.INVALID_PATH_REGEX.search(path) try: iter(path) except TypeError: return False else: paths = path return all(cls.valid_brew_path(path_) for path_ in paths) @classmethod def valid_brew_path(cls, brew_path): ''' `brew_path` must be one of: - None - a string containing only: - alphanumeric characters - dashes - dots - spaces - os.path.sep ''' if brew_path is None: return True return ( isinstance(brew_path, string_types) and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) ) @classmethod def valid_package(cls, package): '''A valid package is either None or alphanumeric.''' if package is None: return True return ( isinstance(package, string_types) and not cls.INVALID_PACKAGE_REGEX.search(package) ) @classmethod def valid_state(cls, state): ''' A valid state is one of: - None - installed - upgraded - head - linked - unlinked - absent ''' if state is None: return True else: return ( isinstance(state, string_types) and state.lower() in ( 'installed', 'upgraded', 'head', 'linked', 'unlinked', 'absent', ) ) @classmethod def valid_module(cls, module): '''A valid module is an instance of AnsibleModule.''' return isinstance(module, AnsibleModule) # /class validations ------------------------------------------- }}} # class properties --------------------------------------------- {{{ @property def module(self): return self._module @module.setter def module(self, module): if not self.valid_module(module): self._module = None self.failed = True self.message = 'Invalid module: {0}.'.format(module) raise HomebrewException(self.message) else: self._module = module return module @property def path(self): return self._path @path.setter def path(self, path): if not self.valid_path(path): self._path = [] self.failed = True self.message = 'Invalid path: {0}.'.format(path) raise HomebrewException(self.message) else: if isinstance(path, string_types): self._path = path.split(':') else: self._path = path return path @property def brew_path(self): return self._brew_path @brew_path.setter def brew_path(self, brew_path): if not self.valid_brew_path(brew_path): self._brew_path = None self.failed = True self.message = 'Invalid brew_path: {0}.'.format(brew_path) raise HomebrewException(self.message) else: self._brew_path = brew_path return brew_path @property def params(self): return self._params @params.setter def params(self, params): self._params = self.module.params return self._params @property def current_package(self): return self._current_package @current_package.setter def current_package(self, package): if not self.valid_package(package): self._current_package = None self.failed = True self.message = 'Invalid package: {0}.'.format(package) raise HomebrewException(self.message) else: self._current_package = package return package # /class properties -------------------------------------------- }}} def __init__(self, module, path, packages=None, state=None, update_homebrew=False, upgrade_all=False, install_options=None): if not install_options: install_options = list() self._setup_status_vars() self._setup_instance_vars(module=module, path=path, packages=packages, state=state, update_homebrew=update_homebrew, upgrade_all=upgrade_all, install_options=install_options, ) self._prep() # prep --------------------------------------------------------- {{{ def _setup_status_vars(self): self.failed = False self.changed = False self.changed_count = 0 self.unchanged_count = 0 self.message = '' def _setup_instance_vars(self, **kwargs): for key, val in iteritems(kwargs): setattr(self, key, val) def _prep(self): self._prep_brew_path() def _prep_brew_path(self): if not self.module: self.brew_path = None self.failed = True self.message = 'AnsibleModule not set.' raise HomebrewException(self.message) self.brew_path = self.module.get_bin_path( 'brew', required=True, opt_dirs=self.path, ) if not self.brew_path: self.brew_path = None self.failed = True self.message = 'Unable to locate homebrew executable.' raise HomebrewException('Unable to locate homebrew executable.') return self.brew_path def _status(self): return (self.failed, self.changed, self.message) # /prep -------------------------------------------------------- }}} def run(self): try: self._run() except HomebrewException: pass if not self.failed and (self.changed_count + self.unchanged_count > 1): self.message = "Changed: %d, Unchanged: %d" % ( self.changed_count, self.unchanged_count, ) (failed, changed, message) = self._status() return (failed, changed, message) # checks ------------------------------------------------------- {{{ def _current_package_is_installed(self): if not self.valid_package(self.current_package): self.failed = True self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) cmd = [ "{brew_path}".format(brew_path=self.brew_path), "info", self.current_package, ] rc, out, err = self.module.run_command(cmd) for line in out.split('\n'): if ( re.search(r'Built from source', line) or re.search(r'Poured from bottle', line) ): return True return False def _current_package_is_outdated(self): if not self.valid_package(self.current_package): return False rc, out, err = self.module.run_command([ self.brew_path, 'outdated', self.current_package, ]) return rc != 0 def _current_package_is_installed_from_head(self): if not Homebrew.valid_package(self.current_package): return False elif not self._current_package_is_installed(): return False rc, out, err = self.module.run_command([ self.brew_path, 'info', self.current_package, ]) try: version_info = [line for line in out.split('\n') if line][0] except IndexError: return False return version_info.split(' ')[-1] == 'HEAD' # /checks ------------------------------------------------------ }}} # commands ----------------------------------------------------- {{{ def _run(self): if self.update_homebrew: self._update_homebrew() if self.upgrade_all: self._upgrade_all() if self.packages: if self.state == 'installed': return self._install_packages() elif self.state == 'upgraded': return self._upgrade_packages() elif self.state == 'head': return self._install_packages() elif self.state == 'linked': return self._link_packages() elif self.state == 'unlinked': return self._unlink_packages() elif self.state == 'absent': return self._uninstall_packages() # updated -------------------------------- {{{ def _update_homebrew(self): if self.module.check_mode: self.changed = True self.message = 'Homebrew would be updated.' raise HomebrewException(self.message) rc, out, err = self.module.run_command([ self.brew_path, 'update', ]) if rc == 0: if out and isinstance(out, string_types): already_updated = any( re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) for s in out.split('\n') if s ) if not already_updated: self.changed = True self.message = 'Homebrew updated successfully.' else: self.message = 'Homebrew already up-to-date.' return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) # /updated ------------------------------- }}} # _upgrade_all --------------------------- {{{ def _upgrade_all(self): if self.module.check_mode: self.changed = True self.message = 'Homebrew packages would be upgraded.' raise HomebrewException(self.message) rc, out, err = self.module.run_command([ self.brew_path, 'upgrade', ]) if rc == 0: if not out: self.message = 'Homebrew packages already upgraded.' else: self.changed = True self.message = 'Homebrew upgraded.' return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) # /_upgrade_all -------------------------- }}} # installed ------------------------------ {{{ def _install_current_package(self): if not self.valid_package(self.current_package): self.failed = True self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) if self._current_package_is_installed(): self.unchanged_count += 1 self.message = 'Package already installed: {0}'.format( self.current_package, ) return True if self.module.check_mode: self.changed = True self.message = 'Package would be installed: {0}'.format( self.current_package ) raise HomebrewException(self.message) if self.state == 'head': head = '--HEAD' else: head = None opts = ( [self.brew_path, 'install'] + self.install_options + [self.current_package, head] ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if self._current_package_is_installed(): self.changed_count += 1 self.changed = True self.message = 'Package installed: {0}'.format(self.current_package) return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) def _install_packages(self): for package in self.packages: self.current_package = package self._install_current_package() return True # /installed ----------------------------- }}} # upgraded ------------------------------- {{{ def _upgrade_current_package(self): command = 'upgrade' if not self.valid_package(self.current_package): self.failed = True self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) if not self._current_package_is_installed(): command = 'install' if self._current_package_is_installed() and not self._current_package_is_outdated(): self.message = 'Package is already upgraded: {0}'.format( self.current_package, ) self.unchanged_count += 1 return True if self.module.check_mode: self.changed = True self.message = 'Package would be upgraded: {0}'.format( self.current_package ) raise HomebrewException(self.message) opts = ( [self.brew_path, command] + self.install_options + [self.current_package] ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if self._current_package_is_installed() and not self._current_package_is_outdated(): self.changed_count += 1 self.changed = True self.message = 'Package upgraded: {0}'.format(self.current_package) return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) def _upgrade_all_packages(self): opts = ( [self.brew_path, 'upgrade'] + self.install_options ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if rc == 0: self.changed = True self.message = 'All packages upgraded.' return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) def _upgrade_packages(self): if not self.packages: self._upgrade_all_packages() else: for package in self.packages: self.current_package = package self._upgrade_current_package() return True # /upgraded ------------------------------ }}} # uninstalled ---------------------------- {{{ def _uninstall_current_package(self): if not self.valid_package(self.current_package): self.failed = True self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) if not self._current_package_is_installed(): self.unchanged_count += 1 self.message = 'Package already uninstalled: {0}'.format( self.current_package, ) return True if self.module.check_mode: self.changed = True self.message = 'Package would be uninstalled: {0}'.format( self.current_package ) raise HomebrewException(self.message) opts = ( [self.brew_path, 'uninstall', '--force'] + self.install_options + [self.current_package] ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if not self._current_package_is_installed(): self.changed_count += 1 self.changed = True self.message = 'Package uninstalled: {0}'.format(self.current_package) return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) def _uninstall_packages(self): for package in self.packages: self.current_package = package self._uninstall_current_package() return True # /uninstalled ----------------------------- }}} # linked --------------------------------- {{{ def _link_current_package(self): if not self.valid_package(self.current_package): self.failed = True self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) if not self._current_package_is_installed(): self.failed = True self.message = 'Package not installed: {0}.'.format(self.current_package) raise HomebrewException(self.message) if self.module.check_mode: self.changed = True self.message = 'Package would be linked: {0}'.format( self.current_package ) raise HomebrewException(self.message) opts = ( [self.brew_path, 'link'] + self.install_options + [self.current_package] ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if rc == 0: self.changed_count += 1 self.changed = True self.message = 'Package linked: {0}'.format(self.current_package) return True else: self.failed = True self.message = 'Package could not be linked: {0}.'.format(self.current_package) raise HomebrewException(self.message) def _link_packages(self): for package in self.packages: self.current_package = package self._link_current_package() return True # /linked -------------------------------- }}} # unlinked ------------------------------- {{{ def _unlink_current_package(self): if not self.valid_package(self.current_package): self.failed = True self.message = 'Invalid package: {0}.'.format(self.current_package) raise HomebrewException(self.message) if not self._current_package_is_installed(): self.failed = True self.message = 'Package not installed: {0}.'.format(self.current_package) raise HomebrewException(self.message) if self.module.check_mode: self.changed = True self.message = 'Package would be unlinked: {0}'.format( self.current_package ) raise HomebrewException(self.message) opts = ( [self.brew_path, 'unlink'] + self.install_options + [self.current_package] ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if rc == 0: self.changed_count += 1 self.changed = True self.message = 'Package unlinked: {0}'.format(self.current_package) return True else: self.failed = True self.message = 'Package could not be unlinked: {0}.'.format(self.current_package) raise HomebrewException(self.message) def _unlink_packages(self): for package in self.packages: self.current_package = package self._unlink_current_package() return True # /unlinked ------------------------------ }}} # /commands ---------------------------------------------------- }}} def main(): module = AnsibleModule( argument_spec=dict( name=dict( aliases=["pkg", "package", "formula"], required=False, type='list', elements='str', ), path=dict( default="/usr/local/bin", required=False, type='path', ), state=dict( default="present", choices=[ "present", "installed", "latest", "upgraded", "head", "linked", "unlinked", "absent", "removed", "uninstalled", ], ), update_homebrew=dict( default=False, aliases=["update-brew"], type='bool', ), upgrade_all=dict( default=False, aliases=["upgrade"], type='bool', ), install_options=dict( default=None, aliases=['options'], type='list', ) ), supports_check_mode=True, ) module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') p = module.params if p['name']: packages = p['name'] else: packages = None path = p['path'] if path: path = path.split(':') state = p['state'] if state in ('present', 'installed'): state = 'installed' if state in ('head', ): state = 'head' if state in ('latest', 'upgraded'): state = 'upgraded' if state == 'linked': state = 'linked' if state == 'unlinked': state = 'unlinked' if state in ('absent', 'removed', 'uninstalled'): state = 'absent' update_homebrew = p['update_homebrew'] upgrade_all = p['upgrade_all'] p['install_options'] = p['install_options'] or [] install_options = ['--{0}'.format(install_option) for install_option in p['install_options']] brew = Homebrew(module=module, path=path, packages=packages, state=state, update_homebrew=update_homebrew, upgrade_all=upgrade_all, install_options=install_options) (failed, changed, message) = brew.run() if failed: module.fail_json(msg=message) else: module.exit_json(changed=changed, msg=message) if __name__ == '__main__': main()
mhvk/astropy
refs/heads/placeholder
astropy/nddata/tests/test_ccddata.py
5
# Licensed under a 3-clause BSD style license - see LICENSE.rst import textwrap import numpy as np import pytest from astropy.io import fits from astropy.nddata.nduncertainty import ( StdDevUncertainty, MissingDataAssociationException, VarianceUncertainty, InverseVariance) from astropy import units as u from astropy import log from astropy.wcs import WCS, FITSFixedWarning from astropy.utils import NumpyRNGContext from astropy.utils.data import (get_pkg_data_filename, get_pkg_data_filenames, get_pkg_data_contents) from astropy.utils.exceptions import AstropyWarning from astropy.nddata.ccddata import CCDData from astropy.nddata import _testing as nd_testing from astropy.table import Table DEFAULT_DATA_SIZE = 100 with NumpyRNGContext(123): _random_array = np.random.normal(size=[DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE]) def create_ccd_data(): """ Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE with units of ADU. """ data = _random_array.copy() fake_meta = {'my_key': 42, 'your_key': 'not 42'} ccd = CCDData(data, unit=u.adu) ccd.header = fake_meta return ccd def test_ccddata_empty(): with pytest.raises(TypeError): CCDData() # empty initializer should fail def test_ccddata_must_have_unit(): with pytest.raises(ValueError): CCDData(np.zeros([2, 2])) def test_ccddata_unit_cannot_be_set_to_none(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.unit = None def test_ccddata_meta_header_conflict(): with pytest.raises(ValueError) as exc: CCDData([1, 2, 3], unit='', meta={1: 1}, header={2: 2}) assert "can't have both header and meta." in str(exc.value) def test_ccddata_simple(): ccd_data = create_ccd_data() assert ccd_data.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert ccd_data.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert ccd_data.dtype == np.dtype(float) def test_ccddata_init_with_string_electron_unit(): ccd = CCDData(np.zeros([2, 2]), unit="electron") assert ccd.unit is u.electron def test_initialize_from_FITS(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdulist = fits.HDUList([hdu]) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) cd = CCDData.read(filename, unit=u.electron) assert cd.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE) assert cd.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE assert np.issubdtype(cd.data.dtype, np.floating) for k, v in hdu.header.items(): assert cd.meta[k] == v def test_initialize_from_fits_with_unit_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = u.adu.to_string() filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu # An explicit unit in the read overrides any unit in the FITS file ccd2 = CCDData.read(filename, unit="photon") assert ccd2.unit is u.photon def test_initialize_from_fits_with_ADU_in_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['bunit'] = 'ADU' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) # ccd should pick up the unit adu from the fits header...did it? assert ccd.unit is u.adu def test_initialize_from_fits_with_invalid_unit_in_header(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'definetely-not-a-unit' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) with pytest.raises(ValueError): CCDData.read(filename) def test_initialize_from_fits_with_technically_invalid_but_not_really(tmpdir): hdu = fits.PrimaryHDU(np.ones((2, 2))) hdu.header['bunit'] = 'ELECTRONS/S' filename = tmpdir.join('afile.fits').strpath hdu.writeto(filename) ccd = CCDData.read(filename) assert ccd.unit == u.electron/u.s def test_initialize_from_fits_with_data_in_different_extension(tmpdir): fake_img = np.arange(4).reshape(2, 2) hdu1 = fits.PrimaryHDU() hdu2 = fits.ImageHDU(fake_img) hdus = fits.HDUList([hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img) # check that the header is the combined header assert hdu2.header + hdu1.header == ccd.header def test_initialize_from_fits_with_extension(tmpdir): fake_img1 = np.zeros([2, 2]) fake_img2 = np.arange(4).reshape(2, 2) hdu0 = fits.PrimaryHDU() hdu1 = fits.ImageHDU(fake_img1) hdu2 = fits.ImageHDU(fake_img2) hdus = fits.HDUList([hdu0, hdu1, hdu2]) filename = tmpdir.join('afile.fits').strpath hdus.writeto(filename) ccd = CCDData.read(filename, hdu=2, unit='adu') # ccd should pick up the unit adu from the fits header...did it? np.testing.assert_array_equal(ccd.data, fake_img2) def test_write_unit_to_hdu(): ccd_data = create_ccd_data() ccd_unit = ccd_data.unit hdulist = ccd_data.to_hdu() assert 'bunit' in hdulist[0].header assert hdulist[0].header['bunit'] == ccd_unit.to_string() def test_initialize_from_FITS_bad_keyword_raises_error(tmpdir): # There are two fits.open keywords that are not permitted in ccdproc: # do_not_scale_image_data and scale_back ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True) with pytest.raises(TypeError): CCDData.read(filename, unit=ccd_data.unit, scale_back=True) def test_ccddata_writer(tmpdir): ccd_data = create_ccd_data() filename = tmpdir.join('test.fits').strpath ccd_data.write(filename) ccd_disk = CCDData.read(filename, unit=ccd_data.unit) np.testing.assert_array_equal(ccd_data.data, ccd_disk.data) def test_ccddata_meta_is_case_sensitive(): ccd_data = create_ccd_data() key = 'SoMeKEY' ccd_data.meta[key] = 10 assert key.lower() not in ccd_data.meta assert key.upper() not in ccd_data.meta assert key in ccd_data.meta def test_ccddata_meta_is_not_fits_header(): ccd_data = create_ccd_data() ccd_data.meta = {'OBSERVER': 'Edwin Hubble'} assert not isinstance(ccd_data.meta, fits.Header) def test_fromMEF(tmpdir): ccd_data = create_ccd_data() hdu = fits.PrimaryHDU(ccd_data) hdu2 = fits.PrimaryHDU(2 * ccd_data.data) hdulist = fits.HDUList(hdu) hdulist.append(hdu2) filename = tmpdir.join('afile.fits').strpath hdulist.writeto(filename) # by default, we reading from the first extension cd = CCDData.read(filename, unit=u.electron) np.testing.assert_array_equal(cd.data, ccd_data.data) # but reading from the second should work too cd = CCDData.read(filename, hdu=1, unit=u.electron) np.testing.assert_array_equal(cd.data, 2 * ccd_data.data) def test_metafromheader(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromdict(): dic = {'OBSERVER': 'Edwin Hubble', 'EXPTIME': 3600} d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron) assert d1.meta['OBSERVER'] == 'Edwin Hubble' def test_header2meta(): hdr = fits.header.Header() hdr['observer'] = 'Edwin Hubble' hdr['exptime'] = '3600' d1 = CCDData(np.ones((5, 5)), unit=u.electron) d1.header = hdr assert d1.meta['OBSERVER'] == 'Edwin Hubble' assert d1.header['OBSERVER'] == 'Edwin Hubble' def test_metafromstring_fail(): hdr = 'this is not a valid header' with pytest.raises(TypeError): CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu) def test_setting_bad_uncertainty_raises_error(): ccd_data = create_ccd_data() with pytest.raises(TypeError): # Uncertainty is supposed to be an instance of NDUncertainty ccd_data.uncertainty = 10 def test_setting_uncertainty_with_array(): ccd_data = create_ccd_data() ccd_data.uncertainty = None fake_uncertainty = np.sqrt(np.abs(ccd_data.data)) ccd_data.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty) def test_setting_uncertainty_wrong_shape_raises_error(): ccd_data = create_ccd_data() with pytest.raises(ValueError): ccd_data.uncertainty = np.zeros([3, 4]) def test_to_hdu(): ccd_data = create_ccd_data() ccd_data.meta = {'observer': 'Edwin Hubble'} fits_hdulist = ccd_data.to_hdu() assert isinstance(fits_hdulist, fits.HDUList) for k, v in ccd_data.meta.items(): assert fits_hdulist[0].header[k] == v np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data) def test_copy(): ccd_data = create_ccd_data() ccd_copy = ccd_data.copy() np.testing.assert_array_equal(ccd_copy.data, ccd_data.data) assert ccd_copy.unit == ccd_data.unit assert ccd_copy.meta == ccd_data.meta @pytest.mark.parametrize('operation,affects_uncertainty', [ ("multiply", True), ("divide", True), ]) @pytest.mark.parametrize('operand', [ 2.0, 2 * u.dimensionless_unscaled, 2 * u.photon / u.adu, ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_mult_div_overload(operand, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): # Need the "1 *" below to force arguments to be Quantity to work around # astropy/astropy#2377 expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit assert result.unit == expected_unit else: assert result.unit == ccd_data.unit @pytest.mark.parametrize('operation,affects_uncertainty', [ ("add", False), ("subtract", False), ]) @pytest.mark.parametrize('operand,expect_failure', [ (2.0, u.UnitsError), # fail--units don't match image (2 * u.dimensionless_unscaled, u.UnitsError), # same (2 * u.adu, False), ]) @pytest.mark.parametrize('with_uncertainty', [ True, False]) def test_add_sub_overload(operand, expect_failure, with_uncertainty, operation, affects_uncertainty): ccd_data = create_ccd_data() if with_uncertainty: ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) method = getattr(ccd_data, operation) np_method = getattr(np, operation) if expect_failure: with pytest.raises(expect_failure): result = method(operand) return else: result = method(operand) assert result is not ccd_data assert isinstance(result, CCDData) assert (result.uncertainty is None or isinstance(result.uncertainty, StdDevUncertainty)) try: op_value = operand.value except AttributeError: op_value = operand np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value)) if with_uncertainty: if affects_uncertainty: np.testing.assert_array_equal(result.uncertainty.array, np_method(ccd_data.uncertainty.array, op_value)) else: np.testing.assert_array_equal(result.uncertainty.array, ccd_data.uncertainty.array) else: assert result.uncertainty is None if isinstance(operand, u.Quantity): assert (result.unit == ccd_data.unit and result.unit == operand.unit) else: assert result.unit == ccd_data.unit def test_arithmetic_overload_fails(): ccd_data = create_ccd_data() with pytest.raises(TypeError): ccd_data.multiply("five") with pytest.raises(TypeError): ccd_data.divide("five") with pytest.raises(TypeError): ccd_data.add("five") with pytest.raises(TypeError): ccd_data.subtract("five") def test_arithmetic_no_wcs_compare(): ccd = CCDData(np.ones((10, 10)), unit='') assert ccd.add(ccd, compare_wcs=None).wcs is None assert ccd.subtract(ccd, compare_wcs=None).wcs is None assert ccd.multiply(ccd, compare_wcs=None).wcs is None assert ccd.divide(ccd, compare_wcs=None).wcs is None def test_arithmetic_with_wcs_compare(): def return_true(_, __): return True wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=2) ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=wcs1) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=wcs2) nd_testing.assert_wcs_seem_equal( ccd1.add(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.subtract(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.multiply(ccd2, compare_wcs=return_true).wcs, wcs1) nd_testing.assert_wcs_seem_equal( ccd1.divide(ccd2, compare_wcs=return_true).wcs, wcs1) def test_arithmetic_with_wcs_compare_fail(): def return_false(_, __): return False ccd1 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) ccd2 = CCDData(np.ones((10, 10)), unit='', wcs=WCS()) with pytest.raises(ValueError): ccd1.add(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.subtract(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.multiply(ccd2, compare_wcs=return_false) with pytest.raises(ValueError): ccd1.divide(ccd2, compare_wcs=return_false) def test_arithmetic_overload_ccddata_operand(): ccd_data = create_ccd_data() ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data)) operand = ccd_data.copy() result = ccd_data.add(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 2 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.subtract(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, 0 * ccd_data.data) np.testing.assert_array_almost_equal_nulp( result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array ) result = ccd_data.multiply(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, ccd_data.data ** 2) expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) result = ccd_data.divide(operand) assert len(result.meta) == 0 np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data)) expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array) np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty) def test_arithmetic_overload_differing_units(): a = np.array([1, 2, 3]) * u.m b = np.array([1, 2, 3]) * u.cm ccddata = CCDData(a) # TODO: Could also be parametrized. res = ccddata.add(b) np.testing.assert_array_almost_equal(res.data, np.add(a, b).value) assert res.unit == np.add(a, b).unit res = ccddata.subtract(b) np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value) assert res.unit == np.subtract(a, b).unit res = ccddata.multiply(b) np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value) assert res.unit == np.multiply(a, b).unit res = ccddata.divide(b) np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value) assert res.unit == np.divide(a, b).unit def test_arithmetic_add_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.add(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.add(np.arange(3)) def test_arithmetic_subtract_with_array(): ccd = CCDData(np.ones((3, 3)), unit='') res = ccd.subtract(np.arange(3)) np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3) ccd = CCDData(np.ones((3, 3)), unit='adu') with pytest.raises(ValueError): ccd.subtract(np.arange(3)) def test_arithmetic_multiply_with_array(): ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m) res = ccd.multiply(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3) assert res.unit == ccd.unit def test_arithmetic_divide_with_array(): ccd = CCDData(np.ones((3, 3)), unit=u.m) res = ccd.divide(np.ones((3, 3)) * 2) np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3) assert res.unit == ccd.unit def test_history_preserved_if_metadata_is_fits_header(tmpdir): fake_img = np.zeros([2, 2]) hdu = fits.PrimaryHDU(fake_img) hdu.header['history'] = 'one' hdu.header['history'] = 'two' hdu.header['history'] = 'three' assert len(hdu.header['history']) == 3 tmp_file = tmpdir.join('temp.fits').strpath hdu.writeto(tmp_file) ccd_read = CCDData.read(tmp_file, unit="adu") assert ccd_read.header['history'] == hdu.header['history'] def test_infol_logged_if_unit_in_fits_header(tmpdir): ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') ccd_data.write(tmpfile.strpath) log.setLevel('INFO') explicit_unit_name = "photon" with log.log_to_list() as log_list: _ = CCDData.read(tmpfile.strpath, unit=explicit_unit_name) assert explicit_unit_name in log_list[0].message def test_wcs_attribute(tmpdir): """ Check that WCS attribute gets added to header, and that if a CCDData object is created from a FITS file with a header, and the WCS attribute is modified, then the CCDData object is turned back into an hdu, the WCS object overwrites the old WCS information in the header. """ ccd_data = create_ccd_data() tmpfile = tmpdir.join('temp.fits') # This wcs example is taken from the astropy.wcs docs. wcs = WCS(naxis=2) wcs.wcs.crpix = np.array(ccd_data.shape) / 2 wcs.wcs.cdelt = np.array([-0.066667, 0.066667]) wcs.wcs.crval = [0, -90] wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"] wcs.wcs.set_pv([(2, 1, 45.0)]) ccd_data.header = ccd_data.to_hdu()[0].header ccd_data.header.extend(wcs.to_header(), useblanks=False) ccd_data.write(tmpfile.strpath) # Get the header length after it has been extended by the WCS keywords original_header_length = len(ccd_data.header) ccd_new = CCDData.read(tmpfile.strpath) # WCS attribute should be set for ccd_new assert ccd_new.wcs is not None # WCS attribute should be equal to wcs above. assert ccd_new.wcs.wcs == wcs.wcs # Converting CCDData object with wcs to an hdu shouldn't # create duplicate wcs-related entries in the header. ccd_new_hdu = ccd_new.to_hdu()[0] assert len(ccd_new_hdu.header) == original_header_length # Making a CCDData with WCS (but not WCS in the header) should lead to # WCS information in the header when it is converted to an HDU. ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu") hdu = ccd_wcs_not_in_header.to_hdu()[0] wcs_header = wcs.to_header() for k in wcs_header.keys(): # Skip these keywords if they are in the WCS header because they are # not WCS-specific. if k in ['', 'COMMENT', 'HISTORY']: continue # No keyword from the WCS should be in the header. assert k not in ccd_wcs_not_in_header.header # Every keyword in the WCS should be in the header of the HDU assert hdu.header[k] == wcs_header[k] # Now check that if WCS of a CCDData is modified, then the CCDData is # converted to an HDU, the WCS keywords in the header are overwritten # with the appropriate keywords from the header. # # ccd_new has a WCS and WCS keywords in the header, so try modifying # the WCS. ccd_new.wcs.wcs.cdelt *= 2 ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0] assert ccd_new_hdu_mod_wcs.header['CDELT1'] == ccd_new.wcs.wcs.cdelt[0] assert ccd_new_hdu_mod_wcs.header['CDELT2'] == ccd_new.wcs.wcs.cdelt[1] def test_wcs_keywords_removed_from_header(): """ Test, for the file included with the nddata tests, that WCS keywords are properly removed from header. """ from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) data_file = get_pkg_data_filename('data/sip-wcs.fits') ccd = CCDData.read(data_file) with pytest.warns(AstropyWarning, match=r'Some non-standard WCS keywords were excluded'): wcs_header = ccd.wcs.to_header() assert not (set(wcs_header) & set(ccd.meta) - keepers) # Make sure that exceptions are not raised when trying to remove missing # keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'. data_file1 = get_pkg_data_filename('../../io/fits/tests/data/o4sp040b0_raw.fits') with pytest.warns(FITSFixedWarning, match=r"'unitfix' made the change"): ccd = CCDData.read(data_file1, unit='count') def test_wcs_SIP_coefficient_keywords_removed(): # If SIP polynomials are present, check that no more polynomial # coefficients remain in the header. See #8598 # The SIP paper is ambiguous as to whether keywords like # A_0_0 can appear in the header for a 2nd order or higher # polynomial. The paper clearly says that the corrections # are only for quadratic or higher order, so A_0_0 and the like # should be zero if they are present, but they apparently can be # there (or at least astrometry.net produces them). # astropy WCS does not write those coefficients, so they were # not being removed from the header even though they are WCS-related. data_file = get_pkg_data_filename('data/sip-wcs.fits') test_keys = ['A_0_0', 'B_0_1'] # Make sure the keywords added to this file for testing are there with fits.open(data_file) as hdu: for key in test_keys: assert key in hdu[0].header ccd = CCDData.read(data_file) # Now the test...the two keywords above should have been removed. for key in test_keys: assert key not in ccd.header @pytest.mark.filterwarnings('ignore') def test_wcs_keyword_removal_for_wcs_test_files(): """ Test, for the WCS test files, that keyword removal works as expected. Those cover a much broader range of WCS types than test_wcs_keywords_removed_from_header. Includes regression test for #8597 """ from astropy.nddata.ccddata import _generate_wcs_and_update_header from astropy.nddata.ccddata import (_KEEP_THESE_KEYWORDS_IN_HEADER, _CDs, _PCs) keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER) wcs_headers = get_pkg_data_filenames('../../wcs/tests/data', pattern='*.hdr') for hdr in wcs_headers: # Skip the files that are expected to be bad... if ('invalid' in hdr or 'nonstandard' in hdr or 'segfault' in hdr or 'chandra-pixlist-wcs' in hdr): continue header_string = get_pkg_data_contents(hdr) header = fits.Header.fromstring(header_string) wcs = WCS(header_string) header_from_wcs = wcs.to_header(relax=True) new_header, new_wcs = _generate_wcs_and_update_header(header) new_wcs_header = new_wcs.to_header(relax=True) # Make sure all of the WCS-related keywords generated by astropy # have been removed. assert not (set(new_header) & set(new_wcs_header) - keepers) # Check that new_header contains no remaining WCS information. # Specifically, check that # 1. The combination of new_header and new_wcs does not contain # both PCi_j and CDi_j keywords. See #8597. # Check for 1 final_header = new_header + new_wcs_header final_header_set = set(final_header) if _PCs & final_header_set: assert not (_CDs & final_header_set) elif _CDs & final_header_set: assert not (_PCs & final_header_set) # Check that the new wcs is the same as the old. for k, v in new_wcs_header.items(): if isinstance(v, str): assert header_from_wcs[k] == v else: np.testing.assert_almost_equal(header_from_wcs[k], v) def test_read_wcs_not_creatable(tmpdir): # The following Header can't be converted to a WCS object. See also #6499. hdr_txt_example_WCS = textwrap.dedent(''' SIMPLE = T / Fits standard BITPIX = 16 / Bits per pixel NAXIS = 2 / Number of axes NAXIS1 = 1104 / Axis length NAXIS2 = 4241 / Axis length CRVAL1 = 164.98110962 / Physical value of the reference pixel X CRVAL2 = 44.34089279 / Physical value of the reference pixel Y CRPIX1 = -34.0 / Reference pixel in X (pixel) CRPIX2 = 2041.0 / Reference pixel in Y (pixel) CDELT1 = 0.10380000 / X Scale projected on detector (#/pix) CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix) CTYPE1 = 'RA---TAN' / Pixel coordinate system CTYPE2 = 'WAVELENGTH' / Pixel coordinate system CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1 CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2 CD1_1 = 0.20760000 / Pixel Coordinate translation matrix CD1_2 = 0.00000000 / Pixel Coordinate translation matrix CD2_1 = 0.00000000 / Pixel Coordinate translation matrix CD2_2 = 0.10380000 / Pixel Coordinate translation matrix C2YPE1 = 'RA---TAN' / Pixel coordinate system C2YPE2 = 'DEC--TAN' / Pixel coordinate system C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1 C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2 RADECSYS= 'FK5 ' / The equatorial coordinate system ''') hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep='\n') hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)]) filename = tmpdir.join('afile.fits').strpath hdul.writeto(filename) # The hdr cannot be converted to a WCS object because of an # InconsistentAxisTypesError but it should still open the file ccd = CCDData.read(filename, unit='adu') assert ccd.wcs is None def test_header(): ccd_data = create_ccd_data() a = {'Observer': 'Hubble'} ccd = CCDData(ccd_data, header=a) assert ccd.meta == a def test_wcs_arithmetic(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs result = ccd_data.multiply(1.0) nd_testing.assert_wcs_seem_equal(result.wcs, wcs) @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_wcs_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.wcs = WCS(naxis=2) method = getattr(ccd_data, operation) result = method(ccd_data2) nd_testing.assert_wcs_seem_equal(result.wcs, ccd_data.wcs) assert ccd_data2.wcs is None def test_wcs_sip_handling(): """ Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive a roundtrip unchanged. """ data_file = get_pkg_data_filename('data/sip-wcs.fits') def check_wcs_ctypes(header): expected_wcs_ctypes = { 'CTYPE1': 'RA---TAN-SIP', 'CTYPE2': 'DEC--TAN-SIP' } return [header[k] == v for k, v in expected_wcs_ctypes.items()] ccd_original = CCDData.read(data_file) # After initialization the keywords should be in the WCS, not in the # meta. with fits.open(data_file) as raw: good_ctype = check_wcs_ctypes(raw[0].header) assert all(good_ctype) ccd_new = ccd_original.to_hdu() good_ctype = check_wcs_ctypes(ccd_new[0].header) assert all(good_ctype) # Try converting to header with wcs_relax=False and # the header should contain the CTYPE keywords without # the -SIP ccd_no_relax = ccd_original.to_hdu(wcs_relax=False) good_ctype = check_wcs_ctypes(ccd_no_relax[0].header) assert not any(good_ctype) assert ccd_no_relax[0].header['CTYPE1'] == 'RA---TAN' assert ccd_no_relax[0].header['CTYPE2'] == 'DEC--TAN' @pytest.mark.parametrize('operation', ['multiply', 'divide', 'add', 'subtract']) def test_mask_arithmetic_ccd(operation): ccd_data = create_ccd_data() ccd_data2 = ccd_data.copy() ccd_data.mask = (ccd_data.data > 0) method = getattr(ccd_data, operation) result = method(ccd_data2) np.testing.assert_equal(result.mask, ccd_data.mask) def test_write_read_multiextensionfits_mask_default(tmpdir): # Test that if a mask is present the mask is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_default( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) @pytest.mark.parametrize( 'uncertainty_type', [StdDevUncertainty, VarianceUncertainty, InverseVariance]) def test_write_read_multiextensionfits_uncertainty_different_uncertainty_key( tmpdir, uncertainty_type): # Test that if a uncertainty is present it is saved and loaded by default. ccd_data = create_ccd_data() ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, key_uncertainty_type='Blah') ccd_after = CCDData.read(filename, key_uncertainty_type='Blah') assert ccd_after.uncertainty is not None assert type(ccd_after.uncertainty) is uncertainty_type np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_write_read_multiextensionfits_not(tmpdir): # Test that writing mask and uncertainty can be disabled ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None) ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None def test_write_read_multiextensionfits_custom_ext_names(tmpdir): # Test writing mask, uncertainty in another extension than default ccd_data = create_ccd_data() ccd_data.mask = ccd_data.data > 10 ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10) filename = tmpdir.join('afile.fits').strpath ccd_data.write(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') # Try reading with defaults extension names ccd_after = CCDData.read(filename) assert ccd_after.uncertainty is None assert ccd_after.mask is None # Try reading with custom extension names ccd_after = CCDData.read(filename, hdu_mask='Fun', hdu_uncertainty='NoFun') assert ccd_after.uncertainty is not None assert ccd_after.mask is not None np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask) np.testing.assert_array_equal(ccd_data.uncertainty.array, ccd_after.uncertainty.array) def test_read_old_style_multiextensionfits(tmpdir): # Regression test for https://github.com/astropy/ccdproc/issues/664 # # Prior to astropy 3.1 there was no uncertainty type saved # in the multiextension fits files generated by CCDData # because the uncertainty had to be StandardDevUncertainty. # # Current version should be able to read those in. # size = 4 # Value of the variables below are not important to the test. data = np.zeros([size, size]) mask = data > 0.9 uncert = np.sqrt(data) ccd = CCDData(data=data, mask=mask, uncertainty=uncert, unit='adu') # We'll create the file manually to ensure we have the # right extension names and no uncertainty type. hdulist = ccd.to_hdu() del hdulist[2].header['UTYPE'] file_name = tmpdir.join('old_ccddata_mef.fits').strpath hdulist.writeto(file_name) ccd = CCDData.read(file_name) assert isinstance(ccd.uncertainty, StdDevUncertainty) def test_wcs(): ccd_data = create_ccd_data() wcs = WCS(naxis=2) ccd_data.wcs = wcs assert ccd_data.wcs is wcs def test_recognized_fits_formats_for_read_write(tmpdir): # These are the extensions that are supposed to be supported. ccd_data = create_ccd_data() supported_extensions = ['fit', 'fits', 'fts'] for ext in supported_extensions: path = tmpdir.join(f"test.{ext}") ccd_data.write(path.strpath) from_disk = CCDData.read(path.strpath) assert (ccd_data.data == from_disk.data).all() def test_stddevuncertainty_compat_descriptor_no_parent(): with pytest.raises(MissingDataAssociationException): StdDevUncertainty(np.ones((10, 10))).parent_nddata def test_stddevuncertainty_compat_descriptor_no_weakref(): # TODO: Remove this test if astropy 1.0 isn't supported anymore # This test might create a Memoryleak on purpose, so the last lines after # the assert are IMPORTANT cleanup. ccd = CCDData(np.ones((10, 10)), unit='') uncert = StdDevUncertainty(np.ones((10, 10))) uncert._parent_nddata = ccd assert uncert.parent_nddata is ccd uncert._parent_nddata = None # https://github.com/astropy/astropy/issues/7595 def test_read_returns_image(tmpdir): # Test if CCData.read returns a image when reading a fits file containing # a table and image, in that order. tbl = Table(np.ones(10).reshape(5, 2)) img = np.ones((5, 5)) hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()), fits.ImageHDU(img)]) filename = tmpdir.join('table_image.fits').strpath hdul.writeto(filename) ccd = CCDData.read(filename, unit='adu') # Expecting to get (5, 5), the size of the image assert ccd.data.shape == (5, 5) # https://github.com/astropy/astropy/issues/9664 def test_sliced_ccdata_to_hdu(): wcs = WCS(naxis=2) wcs.wcs.crpix = 10, 10 ccd = CCDData(np.ones((10, 10)), wcs=wcs, unit='pixel') trimmed = ccd[2:-2, 2:-2] hdul = trimmed.to_hdu() assert isinstance(hdul, fits.HDUList) assert hdul[0].header['CRPIX1'] == 8 assert hdul[0].header['CRPIX2'] == 8
manisandro/QGIS
refs/heads/master
python/plugins/processing/algs/gdal/PointsAlongLines.py
5
# -*- coding: utf-8 -*- """ *************************************************************************** PointsAlongLines.py --------------------- Date : Janaury 2015 Copyright : (C) 2015 by Giovanni Manghi Email : giovanni dot manghi at naturalgis dot pt *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Giovanni Manghi' __date__ = 'January 2015' __copyright__ = '(C) 2015, Giovanni Manghi' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from qgis.core import (QgsProcessingException, QgsProcessingParameterFeatureSource, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterVectorDestination, QgsProcessingParameterDefinition, QgsProcessing) from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm from processing.algs.gdal.GdalUtils import GdalUtils from processing.tools.system import isWindows class PointsAlongLines(GdalAlgorithm): INPUT = 'INPUT' GEOMETRY = 'GEOMETRY' DISTANCE = 'DISTANCE' OPTIONS = 'OPTIONS' OUTPUT = 'OUTPUT' def __init__(self): super().__init__() def initAlgorithm(self, config=None): self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT, self.tr('Input layer'), [QgsProcessing.TypeVectorLine])) self.addParameter(QgsProcessingParameterString(self.GEOMETRY, self.tr('Geometry column name'), defaultValue='geometry')) self.addParameter(QgsProcessingParameterNumber(self.DISTANCE, self.tr('Distance from line start represented as fraction of line length'), type=QgsProcessingParameterNumber.Double, minValue=0, maxValue=1, defaultValue=0.5)) options_param = QgsProcessingParameterString(self.OPTIONS, self.tr('Additional creation options'), defaultValue='', optional=True) options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced) self.addParameter(options_param) self.addParameter(QgsProcessingParameterVectorDestination(self.OUTPUT, self.tr('Points along lines'), QgsProcessing.TypeVectorPoint)) def name(self): return 'pointsalonglines' def displayName(self): return self.tr('Points along lines') def group(self): return self.tr('Vector geoprocessing') def groupId(self): return 'vectorgeoprocessing' def commandName(self): return 'ogr2ogr' def getConsoleCommands(self, parameters, context, feedback, executing=True): source = self.parameterAsSource(parameters, self.INPUT, context) if source is None: raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT)) fields = source.fields() ogrLayer, layerName = self.getOgrCompatibleSource(self.INPUT, parameters, context, feedback, executing) distance = self.parameterAsDouble(parameters, self.DISTANCE, context) geometry = self.parameterAsString(parameters, self.GEOMETRY, context) outFile = self.parameterAsOutputLayer(parameters, self.OUTPUT, context) self.setOutputValue(self.OUTPUT, outFile) options = self.parameterAsString(parameters, self.OPTIONS, context) output, outputFormat = GdalUtils.ogrConnectionStringAndFormat(outFile, context) other_fields = [] for f in fields: if f.name() == geometry: continue other_fields.append(f.name()) if other_fields: other_fields = ',*' else: other_fields = '' arguments = [] arguments.append(output) arguments.append(ogrLayer) arguments.append('-dialect') arguments.append('sqlite') arguments.append('-sql') sql = "SELECT ST_Line_Interpolate_Point({}, {}) AS {}{} FROM '{}'".format(geometry, distance, geometry, other_fields, layerName) arguments.append(sql) if options: arguments.append(options) if outputFormat: arguments.append('-f {}'.format(outputFormat)) return ['ogr2ogr', GdalUtils.escapeAndJoin(arguments)]
xxxVxxx/troposphere
refs/heads/master
troposphere/elasticache.py
9
# Copyright (c) 2013, Mark Peek <mark@peek.org> # All rights reserved. # # See LICENSE file for full license. from . import AWSObject, Ref, GetAtt from .validators import boolean, integer, network_port class CacheCluster(AWSObject): resource_type = "AWS::ElastiCache::CacheCluster" props = { 'AutoMinorVersionUpgrade': (boolean, False), 'AZMode': (basestring, False), 'CacheNodeType': (basestring, True), 'CacheParameterGroupName': (basestring, False), 'CacheSecurityGroupNames': ([basestring, Ref], False), 'CacheSubnetGroupName': (basestring, False), 'ClusterName': (basestring, False), 'Engine': (basestring, True), 'EngineVersion': (basestring, False), 'NotificationTopicArn': (basestring, False), 'NumCacheNodes': (integer, True), 'Port': (integer, False), 'PreferredAvailabilityZone': (basestring, False), 'PreferredAvailabilityZones': ([basestring], False), 'PreferredMaintenanceWindow': (basestring, False), 'SnapshotArns': ([basestring, Ref], False), 'SnapshotName': (basestring, False), 'SnapshotRetentionLimit': (integer, False), 'SnapshotWindow': (basestring, False), 'VpcSecurityGroupIds': ([basestring, Ref, GetAtt], False), } def validate(self): # Check that AZMode is "cross-az" if more than one Availability zone # is specified in PreferredAvailabilityZones preferred_azs = self.properties.get('PreferredAvailabilityZones') if preferred_azs is not None and \ isinstance(preferred_azs, list) and \ len(preferred_azs) > 1: if self.properties.get('AZMode') != 'cross-az': raise ValueError('AZMode must be "cross-az" if more than one a' 'vailability zone is specified in PreferredAv' 'ailabilityZones: http://docs.aws.amazon.com/' 'AWSCloudFormation/latest/UserGuide/aws-prope' 'rties-elasticache-cache-cluster.html#cfn-ela' 'sticache-cachecluster-azmode') return True class ParameterGroup(AWSObject): resource_type = "AWS::ElastiCache::ParameterGroup" props = { 'CacheParameterGroupFamily': (basestring, True), 'Description': (basestring, True), 'Properties': (dict, True), } class SecurityGroup(AWSObject): resource_type = "AWS::ElastiCache::SecurityGroup" props = { 'Description': (basestring, False), } class SecurityGroupIngress(AWSObject): resource_type = "AWS::ElastiCache::SecurityGroupIngress" props = { 'CacheSecurityGroupName': (basestring, True), 'EC2SecurityGroupName': (basestring, True), 'EC2SecurityGroupOwnerId': (basestring, False), } class SubnetGroup(AWSObject): resource_type = "AWS::ElastiCache::SubnetGroup" props = { 'Description': (basestring, True), 'SubnetIds': (list, True), } class ReplicationGroup(AWSObject): resource_type = "AWS::ElastiCache::ReplicationGroup" props = { 'AutomaticFailoverEnabled': (boolean, False), 'AutoMinorVersionUpgrade': (boolean, False), 'CacheNodeType': (basestring, True), 'CacheParameterGroupName': (basestring, False), 'CacheSecurityGroupNames': ([basestring], False), 'CacheSubnetGroupName': (basestring, False), 'Engine': (basestring, True), 'EngineVersion': (basestring, False), 'NotificationTopicArn': ([basestring, Ref], False), 'NumCacheClusters': (integer, True), 'Port': (network_port, False), 'PreferredCacheClusterAZs': ([basestring], False), 'PreferredMaintenanceWindow': (basestring, False), 'ReplicationGroupDescription': (basestring, True), 'SecurityGroupIds': ([basestring, Ref], False), 'SnapshotArns': ([basestring], False), 'SnapshotRetentionLimit': (integer, False), 'SnapshotWindow': (basestring, False), }
lmazuel/azure-sdk-for-python
refs/heads/master
azure-servicefabric/azure/servicefabric/models/monitoring_policy_description.py
1
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class MonitoringPolicyDescription(Model): """Describes the parameters for monitoring an upgrade in Monitored mode. :param failure_action: The compensating action to perform when a Monitored upgrade encounters monitoring policy or health policy violations. Possible values include: 'Invalid', 'Rollback', 'Manual' :type failure_action: str or ~azure.servicefabric.models.FailureAction :param health_check_wait_duration_in_milliseconds: The amount of time to wait after completing an upgrade domain before applying health policies. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type health_check_wait_duration_in_milliseconds: str :param health_check_stable_duration_in_milliseconds: The amount of time that the application or cluster must remain healthy before the upgrade proceeds to the next upgrade domain. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type health_check_stable_duration_in_milliseconds: str :param health_check_retry_timeout_in_milliseconds: The amount of time to retry health evaluation when the application or cluster is unhealthy before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type health_check_retry_timeout_in_milliseconds: str :param upgrade_timeout_in_milliseconds: The amount of time the overall upgrade has to complete before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type upgrade_timeout_in_milliseconds: str :param upgrade_domain_timeout_in_milliseconds: The amount of time each upgrade domain has to complete before FailureAction is executed. It is first interpreted as a string representing an ISO 8601 duration. If that fails, then it is interpreted as a number representing the total number of milliseconds. :type upgrade_domain_timeout_in_milliseconds: str """ _attribute_map = { 'failure_action': {'key': 'FailureAction', 'type': 'str'}, 'health_check_wait_duration_in_milliseconds': {'key': 'HealthCheckWaitDurationInMilliseconds', 'type': 'str'}, 'health_check_stable_duration_in_milliseconds': {'key': 'HealthCheckStableDurationInMilliseconds', 'type': 'str'}, 'health_check_retry_timeout_in_milliseconds': {'key': 'HealthCheckRetryTimeoutInMilliseconds', 'type': 'str'}, 'upgrade_timeout_in_milliseconds': {'key': 'UpgradeTimeoutInMilliseconds', 'type': 'str'}, 'upgrade_domain_timeout_in_milliseconds': {'key': 'UpgradeDomainTimeoutInMilliseconds', 'type': 'str'}, } def __init__(self, failure_action=None, health_check_wait_duration_in_milliseconds=None, health_check_stable_duration_in_milliseconds=None, health_check_retry_timeout_in_milliseconds=None, upgrade_timeout_in_milliseconds=None, upgrade_domain_timeout_in_milliseconds=None): super(MonitoringPolicyDescription, self).__init__() self.failure_action = failure_action self.health_check_wait_duration_in_milliseconds = health_check_wait_duration_in_milliseconds self.health_check_stable_duration_in_milliseconds = health_check_stable_duration_in_milliseconds self.health_check_retry_timeout_in_milliseconds = health_check_retry_timeout_in_milliseconds self.upgrade_timeout_in_milliseconds = upgrade_timeout_in_milliseconds self.upgrade_domain_timeout_in_milliseconds = upgrade_domain_timeout_in_milliseconds
vrenaville/ngo-addons-backport
refs/heads/master
addons/note/tests/test_note.py
427
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.tests import common class TestNote(common.TransactionCase): def test_bug_lp_1156215(self): """ensure any users can create new users""" cr, uid = self.cr, self.uid IMD = self.registry('ir.model.data') Users = self.registry('res.users') _, demo_user = IMD.get_object_reference(cr, uid, 'base', 'user_demo') _, group_id = IMD.get_object_reference(cr, uid, 'base', 'group_erp_manager') Users.write(cr, uid, [demo_user], { 'groups_id': [(4, group_id)], }) # must not fail Users.create(cr, demo_user, { 'name': 'test bug lp:1156215', 'login': 'lp_1156215', })
am2012/android_kernel_motorola_msm8610
refs/heads/cm-12.0
scripts/tracing/draw_functrace.py
14679
#!/usr/bin/python """ Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com> Licensed under the terms of the GNU GPL License version 2 This script parses a trace provided by the function tracer in kernel/trace/trace_functions.c The resulted trace is processed into a tree to produce a more human view of the call stack by drawing textual but hierarchical tree of calls. Only the functions's names and the the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER # mount -t debugfs nodev /sys/kernel/debug # echo function > /sys/kernel/debug/tracing/current_tracer $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/draw_functrace.py < raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ import sys, re class CallTree: """ This class provides a tree representation of the functions call stack. If a function has no parent in the kernel (interrupt, syscall, kernel thread...) then it is attached to a virtual parent called ROOT. """ ROOT = None def __init__(self, func, time = None, parent = None): self._func = func self._time = time if parent is None: self._parent = CallTree.ROOT else: self._parent = parent self._children = [] def calls(self, func, calltime): """ If a function calls another one, call this method to insert it into the tree at the appropriate place. @return: A reference to the newly created child node. """ child = CallTree(func, calltime, self) self._children.append(child) return child def getParent(self, func): """ Retrieve the last parent of the current node that has the name given by func. If this function is not on a parent, then create it as new child of root @return: A reference to the parent. """ tree = self while tree != CallTree.ROOT and tree._func != func: tree = tree._parent if tree == CallTree.ROOT: child = CallTree.ROOT.calls(func, None) return child return tree def __repr__(self): return self.__toString("", True) def __toString(self, branch, lastChild): if self._time is not None: s = "%s----%s (%s)\n" % (branch, self._func, self._time) else: s = "%s----%s\n" % (branch, self._func) i = 0 if lastChild: branch = branch[:-1] + " " while i < len(self._children): if i != len(self._children) - 1: s += "%s" % self._children[i].__toString(branch +\ " |", False) else: s += "%s" % self._children[i].__toString(branch +\ " |", True) i += 1 return s class BrokenLineException(Exception): """If the last line is not complete because of the pipe breakage, we want to stop the processing and ignore this line. """ pass class CommentLineException(Exception): """ If the line is a comment (as in the beginning of the trace file), just ignore it. """ pass def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException return (m.group(1), m.group(2), m.group(3)) def main(): CallTree.ROOT = CallTree("Root (Nowhere)", None, None) tree = CallTree.ROOT for line in sys.stdin: try: calltime, callee, caller = parseLine(line) except BrokenLineException: break except CommentLineException: continue tree = tree.getParent(caller) tree = tree.calls(callee, calltime) print CallTree.ROOT if __name__ == "__main__": main()
levkar/odoo
refs/heads/10.0
addons/hr_maintenance/models/equipment.py
31
# -*- coding: utf-8 -*- from odoo import api, fields, models, tools class MaintenanceEquipment(models.Model): _inherit = 'maintenance.equipment' employee_id = fields.Many2one('hr.employee', string='Assigned to Employee', track_visibility='onchange') department_id = fields.Many2one('hr.department', string='Assigned to Department', track_visibility='onchange') equipment_assign_to = fields.Selection( [('department', 'Department'), ('employee', 'Employee') ,('other', 'Other')], string='Used By', required=True, default='employee') owner_user_id = fields.Many2one(compute='_compute_owner', store=True) @api.one @api.depends('employee_id', 'department_id', 'equipment_assign_to') def _compute_owner(self): self.owner_user_id = self.env.user.id if self.equipment_assign_to == 'employee': self.owner_user_id = self.employee_id.user_id.id elif self.equipment_assign_to == 'department': self.owner_user_id = self.department_id.manager_id.user_id.id @api.onchange('equipment_assign_to') def _onchange_equipment_assign_to(self): if self.equipment_assign_to == 'employee': self.department_id = False if self.equipment_assign_to == 'department': self.employee_id = False self.assign_date = fields.Date.context_today(self) @api.model def create(self, vals): equipment = super(MaintenanceEquipment, self).create(vals) # subscribe employee or department manager when equipment assign to him. user_ids = [] if equipment.employee_id and equipment.employee_id.user_id: user_ids.append(equipment.employee_id.user_id.id) if equipment.department_id and equipment.department_id.manager_id and equipment.department_id.manager_id.user_id: user_ids.append(equipment.department_id.manager_id.user_id.id) if user_ids: equipment.message_subscribe_users(user_ids=user_ids) return equipment @api.multi def write(self, vals): user_ids = [] # subscribe employee or department manager when equipment assign to employee or department. if vals.get('employee_id'): user_id = self.env['hr.employee'].browse(vals['employee_id'])['user_id'] if user_id: user_ids.append(user_id.id) if vals.get('department_id'): department = self.env['hr.department'].browse(vals['department_id']) if department and department.manager_id and department.manager_id.user_id: user_ids.append(department.manager_id.user_id.id) if user_ids: self.message_subscribe_users(user_ids=user_ids) return super(MaintenanceEquipment, self).write(vals) @api.multi def _track_subtype(self, init_values): self.ensure_one() if ('employee_id' in init_values and self.employee_id) or ('department_id' in init_values and self.department_id): return 'maintenance.mt_mat_assign' return super(MaintenanceEquipment, self)._track_subtype(init_values) class MaintenanceRequest(models.Model): _inherit = 'maintenance.request' @api.returns('self') def _default_employee_get(self): return self.env['hr.employee'].search([('user_id', '=', self.env.uid)], limit=1) employee_id = fields.Many2one('hr.employee', string='Employee', default=_default_employee_get) department_id = fields.Many2one('hr.department', string='Department') owner_user_id = fields.Many2one(compute='_compute_owner', store=True) @api.depends('employee_id', 'department_id') def _compute_owner(self): if self.equipment_id.equipment_assign_to == 'employee': self.owner_user_id = self.employee_id.user_id.id elif self.equipment_id.equipment_assign_to == 'department': self.owner_user_id = self.department_id.manager_id.user_id.id @api.onchange('employee_id', 'department_id') def onchange_department_or_employee_id(self): domain = [] if self.department_id: domain = [('department_id', '=', self.department_id.id)] if self.employee_id and self.department_id: domain = ['|'] + domain if self.employee_id: domain = domain + ['|', ('employee_id', '=', self.employee_id.id), ('employee_id', '=', None)] equipment = self.env['maintenance.equipment'].search(domain, limit=2) if len(equipment) == 1: self.equipment_id = equipment return {'domain': {'equipment_id': domain}} @api.model def create(self, vals): result = super(MaintenanceRequest, self).create(vals) if result.employee_id.user_id: result.message_subscribe_users(user_ids=[result.employee_id.user_id.id]) return result @api.multi def write(self, vals): if vals.get('employee_id'): employee = self.env['hr.employee'].browse(vals['employee_id']) if employee and employee.user_id: self.message_subscribe_users(user_ids=[employee.user_id.id]) return super(MaintenanceRequest, self).write(vals) @api.model def message_new(self, msg, custom_values=None): """ Overrides mail_thread message_new that is called by the mailgateway through message_process. This override updates the document according to the email. """ if custom_values is None: custom_values = {} email = tools.email_split(msg.get('from')) and tools.email_split(msg.get('from'))[0] or False user = self.env['res.users'].search([('login', '=', email)], limit=1) if user: employee = self.env['hr.employee'].search([('user_id', '=', user.id)], limit=1) if employee: custom_values['employee_id'] = employee and employee[0].id return super(MaintenanceRequest, self).message_new(msg, custom_values=custom_values)
kenygia/xy-vsfilter
refs/heads/master
src/thirdparty/gtest/test/gtest_env_var_test.py
2408
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test correctly parses environment variables.""" __author__ = 'wan@google.com (Zhanyong Wan)' import os import gtest_test_utils IS_WINDOWS = os.name == 'nt' IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_') environ = os.environ.copy() def AssertEq(expected, actual): if expected != actual: print 'Expected: %s' % (expected,) print ' Actual: %s' % (actual,) raise AssertionError def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def GetFlag(flag): """Runs gtest_env_var_test_ and returns its output.""" args = [COMMAND] if flag is not None: args += [flag] return gtest_test_utils.Subprocess(args, env=environ).output def TestFlag(flag, test_val, default_val): """Verifies that the given flag is affected by the corresponding env var.""" env_var = 'GTEST_' + flag.upper() SetEnvVar(env_var, test_val) AssertEq(test_val, GetFlag(flag)) SetEnvVar(env_var, None) AssertEq(default_val, GetFlag(flag)) class GTestEnvVarTest(gtest_test_utils.TestCase): def testEnvVarAffectsFlag(self): """Tests that environment variable should affect the corresponding flag.""" TestFlag('break_on_failure', '1', '0') TestFlag('color', 'yes', 'auto') TestFlag('filter', 'FooTest.Bar', '*') TestFlag('output', 'xml:tmp/foo.xml', '') TestFlag('print_time', '0', '1') TestFlag('repeat', '999', '1') TestFlag('throw_on_failure', '1', '0') TestFlag('death_test_style', 'threadsafe', 'fast') TestFlag('catch_exceptions', '0', '1') if IS_LINUX: TestFlag('death_test_use_fork', '1', '0') TestFlag('stack_trace_depth', '0', '100') if __name__ == '__main__': gtest_test_utils.Main()
40223108/w18
refs/heads/master
static/Brython3.1.1-20150328-091302/Lib/decimal.py
623
# Copyright (c) 2004 Python Software Foundation. # All rights reserved. # Written by Eric Price <eprice at tjhsst.edu> # and Facundo Batista <facundo at taniquetil.com.ar> # and Raymond Hettinger <python at rcn.com> # and Aahz <aahz at pobox.com> # and Tim Peters # This module should be kept in sync with the latest updates of the # IBM specification as it evolves. Those updates will be treated # as bug fixes (deviation from the spec is a compatibility, usability # bug) and will be backported. At this point the spec is stabilizing # and the updates are becoming fewer, smaller, and less significant. """ This is an implementation of decimal floating point arithmetic based on the General Decimal Arithmetic Specification: http://speleotrove.com/decimal/decarith.html and IEEE standard 854-1987: http://en.wikipedia.org/wiki/IEEE_854-1987 Decimal floating point has finite precision with arbitrarily large bounds. The purpose of this module is to support arithmetic using familiar "schoolhouse" rules and to avoid some of the tricky representation issues associated with binary floating point. The package is especially useful for financial applications or for contexts where users have expectations that are at odds with binary floating point (for instance, in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected Decimal('0.00')). Here are some examples of using the decimal module: >>> from decimal import * >>> setcontext(ExtendedContext) >>> Decimal(0) Decimal('0') >>> Decimal('1') Decimal('1') >>> Decimal('-.0123') Decimal('-0.0123') >>> Decimal(123456) Decimal('123456') >>> Decimal('123.45e12345678') Decimal('1.2345E+12345680') >>> Decimal('1.33') + Decimal('1.27') Decimal('2.60') >>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41') Decimal('-2.20') >>> dig = Decimal(1) >>> print(dig / Decimal(3)) 0.333333333 >>> getcontext().prec = 18 >>> print(dig / Decimal(3)) 0.333333333333333333 >>> print(dig.sqrt()) 1 >>> print(Decimal(3).sqrt()) 1.73205080756887729 >>> print(Decimal(3) ** 123) 4.85192780976896427E+58 >>> inf = Decimal(1) / Decimal(0) >>> print(inf) Infinity >>> neginf = Decimal(-1) / Decimal(0) >>> print(neginf) -Infinity >>> print(neginf + inf) NaN >>> print(neginf * inf) -Infinity >>> print(dig / 0) Infinity >>> getcontext().traps[DivisionByZero] = 1 >>> print(dig / 0) Traceback (most recent call last): ... ... ... decimal.DivisionByZero: x / 0 >>> c = Context() >>> c.traps[InvalidOperation] = 0 >>> print(c.flags[InvalidOperation]) 0 >>> c.divide(Decimal(0), Decimal(0)) Decimal('NaN') >>> c.traps[InvalidOperation] = 1 >>> print(c.flags[InvalidOperation]) 1 >>> c.flags[InvalidOperation] = 0 >>> print(c.flags[InvalidOperation]) 0 >>> print(c.divide(Decimal(0), Decimal(0))) Traceback (most recent call last): ... ... ... decimal.InvalidOperation: 0 / 0 >>> print(c.flags[InvalidOperation]) 1 >>> c.flags[InvalidOperation] = 0 >>> c.traps[InvalidOperation] = 0 >>> print(c.divide(Decimal(0), Decimal(0))) NaN >>> print(c.flags[InvalidOperation]) 1 >>> """ __all__ = [ # Two major classes 'Decimal', 'Context', # Contexts 'DefaultContext', 'BasicContext', 'ExtendedContext', # Exceptions 'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero', 'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow', 'FloatOperation', # Constants for use in setting up contexts 'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING', 'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP', # Functions for manipulating contexts 'setcontext', 'getcontext', 'localcontext', # Limits for the C version for compatibility 'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY', # C version: compile time choice that enables the thread local context 'HAVE_THREADS' ] __version__ = '1.70' # Highest version of the spec this complies with # See http://speleotrove.com/decimal/ import copy as _copy import math as _math import numbers as _numbers import sys try: from collections import namedtuple as _namedtuple DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent') except ImportError: DecimalTuple = lambda *args: args # Rounding ROUND_DOWN = 'ROUND_DOWN' ROUND_HALF_UP = 'ROUND_HALF_UP' ROUND_HALF_EVEN = 'ROUND_HALF_EVEN' ROUND_CEILING = 'ROUND_CEILING' ROUND_FLOOR = 'ROUND_FLOOR' ROUND_UP = 'ROUND_UP' ROUND_HALF_DOWN = 'ROUND_HALF_DOWN' ROUND_05UP = 'ROUND_05UP' # Compatibility with the C version HAVE_THREADS = True if sys.maxsize == 2**63-1: MAX_PREC = 999999999999999999 MAX_EMAX = 999999999999999999 MIN_EMIN = -999999999999999999 else: MAX_PREC = 425000000 MAX_EMAX = 425000000 MIN_EMIN = -425000000 MIN_ETINY = MIN_EMIN - (MAX_PREC-1) # Errors class DecimalException(ArithmeticError): """Base exception class. Used exceptions derive from this. If an exception derives from another exception besides this (such as Underflow (Inexact, Rounded, Subnormal) that indicates that it is only called if the others are present. This isn't actually used for anything, though. handle -- Called when context._raise_error is called and the trap_enabler is not set. First argument is self, second is the context. More arguments can be given, those being after the explanation in _raise_error (For example, context._raise_error(NewError, '(-x)!', self._sign) would call NewError().handle(context, self._sign).) To define a new exception, it should be sufficient to have it derive from DecimalException. """ def handle(self, context, *args): pass class Clamped(DecimalException): """Exponent of a 0 changed to fit bounds. This occurs and signals clamped if the exponent of a result has been altered in order to fit the constraints of a specific concrete representation. This may occur when the exponent of a zero result would be outside the bounds of a representation, or when a large normal number would have an encoded exponent that cannot be represented. In this latter case, the exponent is reduced to fit and the corresponding number of zero digits are appended to the coefficient ("fold-down"). """ #brython fixme pass class InvalidOperation(DecimalException): """An invalid operation was performed. Various bad things cause this: Something creates a signaling NaN -INF + INF 0 * (+-)INF (+-)INF / (+-)INF x % 0 (+-)INF % x x._rescale( non-integer ) sqrt(-x) , x > 0 0 ** 0 x ** (non-integer) x ** (+-)INF An operand is invalid The result of the operation after these is a quiet positive NaN, except when the cause is a signaling NaN, in which case the result is also a quiet NaN, but with the original sign, and an optional diagnostic information. """ def handle(self, context, *args): if args: ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True) return ans._fix_nan(context) return _NaN class ConversionSyntax(InvalidOperation): """Trying to convert badly formed string. This occurs and signals invalid-operation if an string is being converted to a number and it does not conform to the numeric string syntax. The result is [0,qNaN]. """ def handle(self, context, *args): return _NaN class DivisionByZero(DecimalException, ZeroDivisionError): """Division by 0. This occurs and signals division-by-zero if division of a finite number by zero was attempted (during a divide-integer or divide operation, or a power operation with negative right-hand operand), and the dividend was not zero. The result of the operation is [sign,inf], where sign is the exclusive or of the signs of the operands for divide, or is 1 for an odd power of -0, for power. """ def handle(self, context, sign, *args): return _SignedInfinity[sign] class DivisionImpossible(InvalidOperation): """Cannot perform the division adequately. This occurs and signals invalid-operation if the integer result of a divide-integer or remainder operation had too many digits (would be longer than precision). The result is [0,qNaN]. """ def handle(self, context, *args): return _NaN class DivisionUndefined(InvalidOperation, ZeroDivisionError): """Undefined result of division. This occurs and signals invalid-operation if division by zero was attempted (during a divide-integer, divide, or remainder operation), and the dividend is also zero. The result is [0,qNaN]. """ def handle(self, context, *args): return _NaN class Inexact(DecimalException): """Had to round, losing information. This occurs and signals inexact whenever the result of an operation is not exact (that is, it needed to be rounded and any discarded digits were non-zero), or if an overflow or underflow condition occurs. The result in all cases is unchanged. The inexact signal may be tested (or trapped) to determine if a given operation (or sequence of operations) was inexact. """ #brython fix me pass class InvalidContext(InvalidOperation): """Invalid context. Unknown rounding, for example. This occurs and signals invalid-operation if an invalid context was detected during an operation. This can occur if contexts are not checked on creation and either the precision exceeds the capability of the underlying concrete representation or an unknown or unsupported rounding was specified. These aspects of the context need only be checked when the values are required to be used. The result is [0,qNaN]. """ def handle(self, context, *args): return _NaN class Rounded(DecimalException): """Number got rounded (not necessarily changed during rounding). This occurs and signals rounded whenever the result of an operation is rounded (that is, some zero or non-zero digits were discarded from the coefficient), or if an overflow or underflow condition occurs. The result in all cases is unchanged. The rounded signal may be tested (or trapped) to determine if a given operation (or sequence of operations) caused a loss of precision. """ #brython fix me pass class Subnormal(DecimalException): """Exponent < Emin before rounding. This occurs and signals subnormal whenever the result of a conversion or operation is subnormal (that is, its adjusted exponent is less than Emin, before any rounding). The result in all cases is unchanged. The subnormal signal may be tested (or trapped) to determine if a given or operation (or sequence of operations) yielded a subnormal result. """ #brython fix me pass class Overflow(Inexact, Rounded): """Numerical overflow. This occurs and signals overflow if the adjusted exponent of a result (from a conversion or from an operation that is not an attempt to divide by zero), after rounding, would be greater than the largest value that can be handled by the implementation (the value Emax). The result depends on the rounding mode: For round-half-up and round-half-even (and for round-half-down and round-up, if implemented), the result of the operation is [sign,inf], where sign is the sign of the intermediate result. For round-down, the result is the largest finite number that can be represented in the current precision, with the sign of the intermediate result. For round-ceiling, the result is the same as for round-down if the sign of the intermediate result is 1, or is [0,inf] otherwise. For round-floor, the result is the same as for round-down if the sign of the intermediate result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded will also be raised. """ def handle(self, context, sign, *args): if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_HALF_DOWN, ROUND_UP): return _SignedInfinity[sign] if sign == 0: if context.rounding == ROUND_CEILING: return _SignedInfinity[sign] return _dec_from_triple(sign, '9'*context.prec, context.Emax-context.prec+1) if sign == 1: if context.rounding == ROUND_FLOOR: return _SignedInfinity[sign] return _dec_from_triple(sign, '9'*context.prec, context.Emax-context.prec+1) class Underflow(Inexact, Rounded, Subnormal): """Numerical underflow with result rounded to 0. This occurs and signals underflow if a result is inexact and the adjusted exponent of the result would be smaller (more negative) than the smallest value that can be handled by the implementation (the value Emin). That is, the result is both inexact and subnormal. The result after an underflow will be a subnormal number rounded, if necessary, so that its exponent is not less than Etiny. This may result in 0 with the sign of the intermediate result and an exponent of Etiny. In all cases, Inexact, Rounded, and Subnormal will also be raised. """ #brython fix me pass class FloatOperation(DecimalException, TypeError): """Enable stricter semantics for mixing floats and Decimals. If the signal is not trapped (default), mixing floats and Decimals is permitted in the Decimal() constructor, context.create_decimal() and all comparison operators. Both conversion and comparisons are exact. Any occurrence of a mixed operation is silently recorded by setting FloatOperation in the context flags. Explicit conversions with Decimal.from_float() or context.create_decimal_from_float() do not set the flag. Otherwise (the signal is trapped), only equality comparisons and explicit conversions are silent. All other mixed operations raise FloatOperation. """ #brython fix me pass # List of public traps and flags _signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded, Underflow, InvalidOperation, Subnormal, FloatOperation] # Map conditions (per the spec) to signals _condition_map = {ConversionSyntax:InvalidOperation, DivisionImpossible:InvalidOperation, DivisionUndefined:InvalidOperation, InvalidContext:InvalidOperation} # Valid rounding modes _rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING, ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP) ##### Context Functions ################################################## # The getcontext() and setcontext() function manage access to a thread-local # current context. Py2.4 offers direct support for thread locals. If that # is not available, use threading.current_thread() which is slower but will # work for older Pythons. If threads are not part of the build, create a # mock threading object with threading.local() returning the module namespace. try: import threading except ImportError: # Python was compiled without threads; create a mock object instead class MockThreading(object): def local(self, sys=sys): return sys.modules[__name__] threading = MockThreading() del MockThreading try: threading.local except AttributeError: # To fix reloading, force it to create a new context # Old contexts have different exceptions in their dicts, making problems. if hasattr(threading.current_thread(), '__decimal_context__'): del threading.current_thread().__decimal_context__ def setcontext(context): """Set this thread's context to context.""" if context in (DefaultContext, BasicContext, ExtendedContext): context = context.copy() context.clear_flags() threading.current_thread().__decimal_context__ = context def getcontext(): """Returns this thread's context. If this thread does not yet have a context, returns a new context and sets this thread's context. New contexts are copies of DefaultContext. """ try: return threading.current_thread().__decimal_context__ except AttributeError: context = Context() threading.current_thread().__decimal_context__ = context return context else: local = threading.local() if hasattr(local, '__decimal_context__'): del local.__decimal_context__ def getcontext(_local=local): """Returns this thread's context. If this thread does not yet have a context, returns a new context and sets this thread's context. New contexts are copies of DefaultContext. """ try: return _local.__decimal_context__ except AttributeError: context = Context() _local.__decimal_context__ = context return context def setcontext(context, _local=local): """Set this thread's context to context.""" if context in (DefaultContext, BasicContext, ExtendedContext): context = context.copy() context.clear_flags() _local.__decimal_context__ = context del threading, local # Don't contaminate the namespace def localcontext(ctx=None): """Return a context manager for a copy of the supplied context Uses a copy of the current context if no context is specified The returned context manager creates a local decimal context in a with statement: def sin(x): with localcontext() as ctx: ctx.prec += 2 # Rest of sin calculation algorithm # uses a precision 2 greater than normal return +s # Convert result to normal precision def sin(x): with localcontext(ExtendedContext): # Rest of sin calculation algorithm # uses the Extended Context from the # General Decimal Arithmetic Specification return +s # Convert result to normal context >>> setcontext(DefaultContext) >>> print(getcontext().prec) 28 >>> with localcontext(): ... ctx = getcontext() ... ctx.prec += 2 ... print(ctx.prec) ... 30 >>> with localcontext(ExtendedContext): ... print(getcontext().prec) ... 9 >>> print(getcontext().prec) 28 """ if ctx is None: ctx = getcontext() return _ContextManager(ctx) ##### Decimal class ####################################################### # Do not subclass Decimal from numbers.Real and do not register it as such # (because Decimals are not interoperable with floats). See the notes in # numbers.py for more detail. class Decimal(object): """Floating point class for decimal arithmetic.""" __slots__ = ('_exp','_int','_sign', '_is_special') # Generally, the value of the Decimal instance is given by # (-1)**_sign * _int * 10**_exp # Special values are signified by _is_special == True # We're immutable, so use __new__ not __init__ def __new__(cls, value="0", context=None): """Create a decimal point instance. >>> Decimal('3.14') # string input Decimal('3.14') >>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent) Decimal('3.14') >>> Decimal(314) # int Decimal('314') >>> Decimal(Decimal(314)) # another decimal instance Decimal('314') >>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay Decimal('3.14') """ # Note that the coefficient, self._int, is actually stored as # a string rather than as a tuple of digits. This speeds up # the "digits to integer" and "integer to digits" conversions # that are used in almost every arithmetic operation on # Decimals. This is an internal detail: the as_tuple function # and the Decimal constructor still deal with tuples of # digits. self = object.__new__(cls) # From a string # REs insist on real strings, so we can too. if isinstance(value, str): value=value.strip().lower() if value.startswith("-"): self._sign = 1 value=value[1:] else: self._sign = 0 if value in ('', 'nan'): self._is_special = True self._int = '' #if m.group('signal'): #figure out what a signaling NaN is later # self._exp = 'N' #else: # self._exp = 'n' self._exp='n' return self if value in ('inf', 'infinity'): self._int = '0' self._exp = 'F' self._is_special = True return self import _jsre as re _m=re.match("^\d*\.?\d*(e\+?\d*)?$", value) if not _m: self._is_special = True self._int = '' self._exp='n' return self if '.' in value: intpart, fracpart=value.split('.') if 'e' in fracpart: fracpart, exp=fracpart.split('e') exp=int(exp) else: exp=0 #self._int = str(int(intpart+fracpart)) self._int = intpart+fracpart self._exp = exp - len(fracpart) self._is_special = False return self else: #is this a pure int? self._is_special = False if 'e' in value: self._int, _exp=value.split('e') self._exp=int(_exp) #print(self._int, self._exp) else: self._int = value self._exp = 0 return self #m = _parser(value.strip()) #if m is None: if context is None: context = getcontext() return context._raise_error(ConversionSyntax, "Invalid literal for Decimal: %r" % value) #if m.group('sign') == "-": # self._sign = 1 #else: # self._sign = 0 #intpart = m.group('int') #if intpart is not None: # # finite number # fracpart = m.group('frac') or '' # exp = int(m.group('exp') or '0') # self._int = str(int(intpart+fracpart)) # self._exp = exp - len(fracpart) # self._is_special = False #else: # diag = m.group('diag') # if diag is not None: # # NaN # self._int = str(int(diag or '0')).lstrip('0') # if m.group('signal'): # self._exp = 'N' # else: # self._exp = 'n' # else: # # infinity # self._int = '0' # self._exp = 'F' # self._is_special = True #return self # From an integer if isinstance(value, int): if value >= 0: self._sign = 0 else: self._sign = 1 self._exp = 0 self._int = str(abs(value)) self._is_special = False return self # From another decimal if isinstance(value, Decimal): self._exp = value._exp self._sign = value._sign self._int = value._int self._is_special = value._is_special return self # From an internal working value if isinstance(value, _WorkRep): self._sign = value.sign self._int = str(value.int) self._exp = int(value.exp) self._is_special = False return self # tuple/list conversion (possibly from as_tuple()) if isinstance(value, (list,tuple)): if len(value) != 3: raise ValueError('Invalid tuple size in creation of Decimal ' 'from list or tuple. The list or tuple ' 'should have exactly three elements.') # process sign. The isinstance test rejects floats if not (isinstance(value[0], int) and value[0] in (0,1)): raise ValueError("Invalid sign. The first value in the tuple " "should be an integer; either 0 for a " "positive number or 1 for a negative number.") self._sign = value[0] if value[2] == 'F': # infinity: value[1] is ignored self._int = '0' self._exp = value[2] self._is_special = True else: # process and validate the digits in value[1] digits = [] for digit in value[1]: if isinstance(digit, int) and 0 <= digit <= 9: # skip leading zeros if digits or digit != 0: digits.append(digit) else: raise ValueError("The second value in the tuple must " "be composed of integers in the range " "0 through 9.") if value[2] in ('n', 'N'): # NaN: digits form the diagnostic self._int = ''.join(map(str, digits)) self._exp = value[2] self._is_special = True elif isinstance(value[2], int): # finite number: digits give the coefficient self._int = ''.join(map(str, digits or [0])) self._exp = value[2] self._is_special = False else: raise ValueError("The third value in the tuple must " "be an integer, or one of the " "strings 'F', 'n', 'N'.") return self if isinstance(value, float): if context is None: context = getcontext() context._raise_error(FloatOperation, "strict semantics for mixing floats and Decimals are " "enabled") value = Decimal.from_float(value) self._exp = value._exp self._sign = value._sign self._int = value._int self._is_special = value._is_special return self raise TypeError("Cannot convert %r to Decimal" % value) # @classmethod, but @decorator is not valid Python 2.3 syntax, so # don't use it (see notes on Py2.3 compatibility at top of file) def from_float(cls, f): """Converts a float to a decimal number, exactly. Note that Decimal.from_float(0.1) is not the same as Decimal('0.1'). Since 0.1 is not exactly representable in binary floating point, the value is stored as the nearest representable value which is 0x1.999999999999ap-4. The exact equivalent of the value in decimal is 0.1000000000000000055511151231257827021181583404541015625. >>> Decimal.from_float(0.1) Decimal('0.1000000000000000055511151231257827021181583404541015625') >>> Decimal.from_float(float('nan')) Decimal('NaN') >>> Decimal.from_float(float('inf')) Decimal('Infinity') >>> Decimal.from_float(-float('inf')) Decimal('-Infinity') >>> Decimal.from_float(-0.0) Decimal('-0') """ if isinstance(f, int): # handle integer inputs return cls(f) if not isinstance(f, float): raise TypeError("argument must be int or float.") if _math.isinf(f) or _math.isnan(f): return cls(repr(f)) if _math.copysign(1.0, f) == 1.0: sign = 0 else: sign = 1 n, d = abs(f).as_integer_ratio() k = d.bit_length() - 1 result = _dec_from_triple(sign, str(n*5**k), -k) if cls is Decimal: return result else: return cls(result) from_float = classmethod(from_float) def _isnan(self): """Returns whether the number is not actually one. 0 if a number 1 if NaN 2 if sNaN """ if self._is_special: exp = self._exp if exp == 'n': return 1 elif exp == 'N': return 2 return 0 def _isinfinity(self): """Returns whether the number is infinite 0 if finite or not a number 1 if +INF -1 if -INF """ if self._exp == 'F': if self._sign: return -1 return 1 return 0 def _check_nans(self, other=None, context=None): """Returns whether the number is not actually one. if self, other are sNaN, signal if self, other are NaN return nan return 0 Done before operations. """ self_is_nan = self._isnan() if other is None: other_is_nan = False else: other_is_nan = other._isnan() if self_is_nan or other_is_nan: if context is None: context = getcontext() if self_is_nan == 2: return context._raise_error(InvalidOperation, 'sNaN', self) if other_is_nan == 2: return context._raise_error(InvalidOperation, 'sNaN', other) if self_is_nan: return self._fix_nan(context) return other._fix_nan(context) return 0 def _compare_check_nans(self, other, context): """Version of _check_nans used for the signaling comparisons compare_signal, __le__, __lt__, __ge__, __gt__. Signal InvalidOperation if either self or other is a (quiet or signaling) NaN. Signaling NaNs take precedence over quiet NaNs. Return 0 if neither operand is a NaN. """ if context is None: context = getcontext() if self._is_special or other._is_special: if self.is_snan(): return context._raise_error(InvalidOperation, 'comparison involving sNaN', self) elif other.is_snan(): return context._raise_error(InvalidOperation, 'comparison involving sNaN', other) elif self.is_qnan(): return context._raise_error(InvalidOperation, 'comparison involving NaN', self) elif other.is_qnan(): return context._raise_error(InvalidOperation, 'comparison involving NaN', other) return 0 def __bool__(self): """Return True if self is nonzero; otherwise return False. NaNs and infinities are considered nonzero. """ return self._is_special or self._int != '0' def _cmp(self, other): """Compare the two non-NaN decimal instances self and other. Returns -1 if self < other, 0 if self == other and 1 if self > other. This routine is for internal use only.""" if self._is_special or other._is_special: self_inf = self._isinfinity() other_inf = other._isinfinity() if self_inf == other_inf: return 0 elif self_inf < other_inf: return -1 else: return 1 # check for zeros; Decimal('0') == Decimal('-0') if not self: if not other: return 0 else: return -((-1)**other._sign) if not other: return (-1)**self._sign # If different signs, neg one is less if other._sign < self._sign: return -1 if self._sign < other._sign: return 1 self_adjusted = self.adjusted() other_adjusted = other.adjusted() if self_adjusted == other_adjusted: self_padded = self._int + '0'*(self._exp - other._exp) other_padded = other._int + '0'*(other._exp - self._exp) if self_padded == other_padded: return 0 elif self_padded < other_padded: return -(-1)**self._sign else: return (-1)**self._sign elif self_adjusted > other_adjusted: return (-1)**self._sign else: # self_adjusted < other_adjusted return -((-1)**self._sign) # Note: The Decimal standard doesn't cover rich comparisons for # Decimals. In particular, the specification is silent on the # subject of what should happen for a comparison involving a NaN. # We take the following approach: # # == comparisons involving a quiet NaN always return False # != comparisons involving a quiet NaN always return True # == or != comparisons involving a signaling NaN signal # InvalidOperation, and return False or True as above if the # InvalidOperation is not trapped. # <, >, <= and >= comparisons involving a (quiet or signaling) # NaN signal InvalidOperation, and return False if the # InvalidOperation is not trapped. # # This behavior is designed to conform as closely as possible to # that specified by IEEE 754. def __eq__(self, other, context=None): self, other = _convert_for_comparison(self, other, equality_op=True) if other is NotImplemented: return other if self._check_nans(other, context): return False return self._cmp(other) == 0 def __ne__(self, other, context=None): self, other = _convert_for_comparison(self, other, equality_op=True) if other is NotImplemented: return other if self._check_nans(other, context): return True return self._cmp(other) != 0 def __lt__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other ans = self._compare_check_nans(other, context) if ans: return False return self._cmp(other) < 0 def __le__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other ans = self._compare_check_nans(other, context) if ans: return False return self._cmp(other) <= 0 def __gt__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other ans = self._compare_check_nans(other, context) if ans: return False return self._cmp(other) > 0 def __ge__(self, other, context=None): self, other = _convert_for_comparison(self, other) if other is NotImplemented: return other ans = self._compare_check_nans(other, context) if ans: return False return self._cmp(other) >= 0 def compare(self, other, context=None): """Compares one to another. -1 => a < b 0 => a = b 1 => a > b NaN => one is NaN Like __cmp__, but returns Decimal instances. """ other = _convert_other(other, raiseit=True) # Compare(NaN, NaN) = NaN if (self._is_special or other and other._is_special): ans = self._check_nans(other, context) if ans: return ans return Decimal(self._cmp(other)) def __hash__(self): """x.__hash__() <==> hash(x)""" # In order to make sure that the hash of a Decimal instance # agrees with the hash of a numerically equal integer, float # or Fraction, we follow the rules for numeric hashes outlined # in the documentation. (See library docs, 'Built-in Types'). if self._is_special: if self.is_snan(): raise TypeError('Cannot hash a signaling NaN value.') elif self.is_nan(): return _PyHASH_NAN else: if self._sign: return -_PyHASH_INF else: return _PyHASH_INF if self._exp >= 0: exp_hash = pow(10, self._exp, _PyHASH_MODULUS) else: exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS) hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS ans = hash_ if self >= 0 else -hash_ return -2 if ans == -1 else ans def as_tuple(self): """Represents the number as a triple tuple. To show the internals exactly as they are. """ return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp) def __repr__(self): """Represents the number as an instance of Decimal.""" # Invariant: eval(repr(d)) == d return "Decimal('%s')" % str(self) def __str__(self, eng=False, context=None): """Return string representation of the number in scientific notation. Captures all of the information in the underlying representation. """ sign = ['', '-'][self._sign] if self._is_special: if self._exp == 'F': return sign + 'Infinity' elif self._exp == 'n': return sign + 'NaN' + self._int else: # self._exp == 'N' return sign + 'sNaN' + self._int # number of digits of self._int to left of decimal point leftdigits = self._exp + len(self._int) # dotplace is number of digits of self._int to the left of the # decimal point in the mantissa of the output string (that is, # after adjusting the exponent) if self._exp <= 0 and leftdigits > -6: # no exponent required dotplace = leftdigits elif not eng: # usual scientific notation: 1 digit on left of the point dotplace = 1 elif self._int == '0': # engineering notation, zero dotplace = (leftdigits + 1) % 3 - 1 else: # engineering notation, nonzero dotplace = (leftdigits - 1) % 3 + 1 if dotplace <= 0: intpart = '0' fracpart = '.' + '0'*(-dotplace) + self._int elif dotplace >= len(self._int): intpart = self._int+'0'*(dotplace-len(self._int)) fracpart = '' else: intpart = self._int[:dotplace] fracpart = '.' + self._int[dotplace:] if leftdigits == dotplace: exp = '' else: if context is None: context = getcontext() exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace) return sign + intpart + fracpart + exp def to_eng_string(self, context=None): """Convert to engineering-type string. Engineering notation has an exponent which is a multiple of 3, so there are up to 3 digits left of the decimal place. Same rules for when in exponential and when as a value as in __str__. """ return self.__str__(eng=True, context=context) def __neg__(self, context=None): """Returns a copy with the sign switched. Rounds, if it has reason. """ if self._is_special: ans = self._check_nans(context=context) if ans: return ans if context is None: context = getcontext() if not self and context.rounding != ROUND_FLOOR: # -Decimal('0') is Decimal('0'), not Decimal('-0'), except # in ROUND_FLOOR rounding mode. ans = self.copy_abs() else: ans = self.copy_negate() return ans._fix(context) def __pos__(self, context=None): """Returns a copy, unless it is a sNaN. Rounds the number (if more then precision digits) """ if self._is_special: ans = self._check_nans(context=context) if ans: return ans if context is None: context = getcontext() if not self and context.rounding != ROUND_FLOOR: # + (-0) = 0, except in ROUND_FLOOR rounding mode. ans = self.copy_abs() else: ans = Decimal(self) return ans._fix(context) def __abs__(self, round=True, context=None): """Returns the absolute value of self. If the keyword argument 'round' is false, do not round. The expression self.__abs__(round=False) is equivalent to self.copy_abs(). """ if not round: return self.copy_abs() if self._is_special: ans = self._check_nans(context=context) if ans: return ans if self._sign: ans = self.__neg__(context=context) else: ans = self.__pos__(context=context) return ans def __add__(self, other, context=None): """Returns self + other. -INF + INF (or the reverse) cause InvalidOperation errors. """ other = _convert_other(other) if other is NotImplemented: return other if context is None: context = getcontext() if self._is_special or other._is_special: ans = self._check_nans(other, context) if ans: return ans if self._isinfinity(): # If both INF, same sign => same as both, opposite => error. if self._sign != other._sign and other._isinfinity(): return context._raise_error(InvalidOperation, '-INF + INF') return Decimal(self) if other._isinfinity(): return Decimal(other) # Can't both be infinity here exp = min(self._exp, other._exp) negativezero = 0 if context.rounding == ROUND_FLOOR and self._sign != other._sign: # If the answer is 0, the sign should be negative, in this case. negativezero = 1 if not self and not other: sign = min(self._sign, other._sign) if negativezero: sign = 1 ans = _dec_from_triple(sign, '0', exp) ans = ans._fix(context) return ans if not self: exp = max(exp, other._exp - context.prec-1) ans = other._rescale(exp, context.rounding) ans = ans._fix(context) return ans if not other: exp = max(exp, self._exp - context.prec-1) ans = self._rescale(exp, context.rounding) ans = ans._fix(context) return ans op1 = _WorkRep(self) op2 = _WorkRep(other) op1, op2 = _normalize(op1, op2, context.prec) result = _WorkRep() if op1.sign != op2.sign: # Equal and opposite if op1.int == op2.int: ans = _dec_from_triple(negativezero, '0', exp) ans = ans._fix(context) return ans if op1.int < op2.int: op1, op2 = op2, op1 # OK, now abs(op1) > abs(op2) if op1.sign == 1: result.sign = 1 op1.sign, op2.sign = op2.sign, op1.sign else: result.sign = 0 # So we know the sign, and op1 > 0. elif op1.sign == 1: result.sign = 1 op1.sign, op2.sign = (0, 0) else: result.sign = 0 # Now, op1 > abs(op2) > 0 if op2.sign == 0: result.int = op1.int + op2.int else: result.int = op1.int - op2.int result.exp = op1.exp ans = Decimal(result) ans = ans._fix(context) return ans __radd__ = __add__ def __sub__(self, other, context=None): """Return self - other""" other = _convert_other(other) if other is NotImplemented: return other if self._is_special or other._is_special: ans = self._check_nans(other, context=context) if ans: return ans # self - other is computed as self + other.copy_negate() return self.__add__(other.copy_negate(), context=context) def __rsub__(self, other, context=None): """Return other - self""" other = _convert_other(other) if other is NotImplemented: return other return other.__sub__(self, context=context) def __mul__(self, other, context=None): """Return self * other. (+-) INF * 0 (or its reverse) raise InvalidOperation. """ other = _convert_other(other) if other is NotImplemented: return other if context is None: context = getcontext() resultsign = self._sign ^ other._sign if self._is_special or other._is_special: ans = self._check_nans(other, context) if ans: return ans if self._isinfinity(): if not other: return context._raise_error(InvalidOperation, '(+-)INF * 0') return _SignedInfinity[resultsign] if other._isinfinity(): if not self: return context._raise_error(InvalidOperation, '0 * (+-)INF') return _SignedInfinity[resultsign] resultexp = self._exp + other._exp # Special case for multiplying by zero if not self or not other: ans = _dec_from_triple(resultsign, '0', resultexp) # Fixing in case the exponent is out of bounds ans = ans._fix(context) return ans # Special case for multiplying by power of 10 if self._int == '1': ans = _dec_from_triple(resultsign, other._int, resultexp) ans = ans._fix(context) return ans if other._int == '1': ans = _dec_from_triple(resultsign, self._int, resultexp) ans = ans._fix(context) return ans op1 = _WorkRep(self) op2 = _WorkRep(other) ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp) ans = ans._fix(context) return ans __rmul__ = __mul__ def __truediv__(self, other, context=None): """Return self / other.""" other = _convert_other(other) if other is NotImplemented: return NotImplemented if context is None: context = getcontext() sign = self._sign ^ other._sign if self._is_special or other._is_special: ans = self._check_nans(other, context) if ans: return ans if self._isinfinity() and other._isinfinity(): return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF') if self._isinfinity(): return _SignedInfinity[sign] if other._isinfinity(): context._raise_error(Clamped, 'Division by infinity') return _dec_from_triple(sign, '0', context.Etiny()) # Special cases for zeroes if not other: if not self: return context._raise_error(DivisionUndefined, '0 / 0') return context._raise_error(DivisionByZero, 'x / 0', sign) if not self: exp = self._exp - other._exp coeff = 0 else: # OK, so neither = 0, INF or NaN shift = len(other._int) - len(self._int) + context.prec + 1 exp = self._exp - other._exp - shift op1 = _WorkRep(self) op2 = _WorkRep(other) if shift >= 0: coeff, remainder = divmod(op1.int * 10**shift, op2.int) else: coeff, remainder = divmod(op1.int, op2.int * 10**-shift) if remainder: # result is not exact; adjust to ensure correct rounding if coeff % 5 == 0: coeff += 1 else: # result is exact; get as close to ideal exponent as possible ideal_exp = self._exp - other._exp while exp < ideal_exp and coeff % 10 == 0: coeff //= 10 exp += 1 ans = _dec_from_triple(sign, str(coeff), exp) return ans._fix(context) def _divide(self, other, context): """Return (self // other, self % other), to context.prec precision. Assumes that neither self nor other is a NaN, that self is not infinite and that other is nonzero. """ sign = self._sign ^ other._sign if other._isinfinity(): ideal_exp = self._exp else: ideal_exp = min(self._exp, other._exp) expdiff = self.adjusted() - other.adjusted() if not self or other._isinfinity() or expdiff <= -2: return (_dec_from_triple(sign, '0', 0), self._rescale(ideal_exp, context.rounding)) if expdiff <= context.prec: op1 = _WorkRep(self) op2 = _WorkRep(other) if op1.exp >= op2.exp: op1.int *= 10**(op1.exp - op2.exp) else: op2.int *= 10**(op2.exp - op1.exp) q, r = divmod(op1.int, op2.int) if q < 10**context.prec: return (_dec_from_triple(sign, str(q), 0), _dec_from_triple(self._sign, str(r), ideal_exp)) # Here the quotient is too large to be representable ans = context._raise_error(DivisionImpossible, 'quotient too large in //, % or divmod') return ans, ans def __rtruediv__(self, other, context=None): """Swaps self/other and returns __truediv__.""" other = _convert_other(other) if other is NotImplemented: return other return other.__truediv__(self, context=context) def __divmod__(self, other, context=None): """ Return (self // other, self % other) """ other = _convert_other(other) if other is NotImplemented: return other if context is None: context = getcontext() ans = self._check_nans(other, context) if ans: return (ans, ans) sign = self._sign ^ other._sign if self._isinfinity(): if other._isinfinity(): ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)') return ans, ans else: return (_SignedInfinity[sign], context._raise_error(InvalidOperation, 'INF % x')) if not other: if not self: ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)') return ans, ans else: return (context._raise_error(DivisionByZero, 'x // 0', sign), context._raise_error(InvalidOperation, 'x % 0')) quotient, remainder = self._divide(other, context) remainder = remainder._fix(context) return quotient, remainder def __rdivmod__(self, other, context=None): """Swaps self/other and returns __divmod__.""" other = _convert_other(other) if other is NotImplemented: return other return other.__divmod__(self, context=context) def __mod__(self, other, context=None): """ self % other """ other = _convert_other(other) if other is NotImplemented: return other if context is None: context = getcontext() ans = self._check_nans(other, context) if ans: return ans if self._isinfinity(): return context._raise_error(InvalidOperation, 'INF % x') elif not other: if self: return context._raise_error(InvalidOperation, 'x % 0') else: return context._raise_error(DivisionUndefined, '0 % 0') remainder = self._divide(other, context)[1] remainder = remainder._fix(context) return remainder def __rmod__(self, other, context=None): """Swaps self/other and returns __mod__.""" other = _convert_other(other) if other is NotImplemented: return other return other.__mod__(self, context=context) def remainder_near(self, other, context=None): """ Remainder nearest to 0- abs(remainder-near) <= other/2 """ if context is None: context = getcontext() other = _convert_other(other, raiseit=True) ans = self._check_nans(other, context) if ans: return ans # self == +/-infinity -> InvalidOperation if self._isinfinity(): return context._raise_error(InvalidOperation, 'remainder_near(infinity, x)') # other == 0 -> either InvalidOperation or DivisionUndefined if not other: if self: return context._raise_error(InvalidOperation, 'remainder_near(x, 0)') else: return context._raise_error(DivisionUndefined, 'remainder_near(0, 0)') # other = +/-infinity -> remainder = self if other._isinfinity(): ans = Decimal(self) return ans._fix(context) # self = 0 -> remainder = self, with ideal exponent ideal_exponent = min(self._exp, other._exp) if not self: ans = _dec_from_triple(self._sign, '0', ideal_exponent) return ans._fix(context) # catch most cases of large or small quotient expdiff = self.adjusted() - other.adjusted() if expdiff >= context.prec + 1: # expdiff >= prec+1 => abs(self/other) > 10**prec return context._raise_error(DivisionImpossible) if expdiff <= -2: # expdiff <= -2 => abs(self/other) < 0.1 ans = self._rescale(ideal_exponent, context.rounding) return ans._fix(context) # adjust both arguments to have the same exponent, then divide op1 = _WorkRep(self) op2 = _WorkRep(other) if op1.exp >= op2.exp: op1.int *= 10**(op1.exp - op2.exp) else: op2.int *= 10**(op2.exp - op1.exp) q, r = divmod(op1.int, op2.int) # remainder is r*10**ideal_exponent; other is +/-op2.int * # 10**ideal_exponent. Apply correction to ensure that # abs(remainder) <= abs(other)/2 if 2*r + (q&1) > op2.int: r -= op2.int q += 1 if q >= 10**context.prec: return context._raise_error(DivisionImpossible) # result has same sign as self unless r is negative sign = self._sign if r < 0: sign = 1-sign r = -r ans = _dec_from_triple(sign, str(r), ideal_exponent) return ans._fix(context) def __floordiv__(self, other, context=None): """self // other""" other = _convert_other(other) if other is NotImplemented: return other if context is None: context = getcontext() ans = self._check_nans(other, context) if ans: return ans if self._isinfinity(): if other._isinfinity(): return context._raise_error(InvalidOperation, 'INF // INF') else: return _SignedInfinity[self._sign ^ other._sign] if not other: if self: return context._raise_error(DivisionByZero, 'x // 0', self._sign ^ other._sign) else: return context._raise_error(DivisionUndefined, '0 // 0') return self._divide(other, context)[0] def __rfloordiv__(self, other, context=None): """Swaps self/other and returns __floordiv__.""" other = _convert_other(other) if other is NotImplemented: return other return other.__floordiv__(self, context=context) def __float__(self): """Float representation.""" if self._isnan(): if self.is_snan(): raise ValueError("Cannot convert signaling NaN to float") s = "-nan" if self._sign else "nan" else: s = str(self) return float(s) def __int__(self): """Converts self to an int, truncating if necessary.""" if self._is_special: if self._isnan(): raise ValueError("Cannot convert NaN to integer") elif self._isinfinity(): raise OverflowError("Cannot convert infinity to integer") s = (-1)**self._sign if self._exp >= 0: return s*int(self._int)*10**self._exp else: return s*int(self._int[:self._exp] or '0') __trunc__ = __int__ def real(self): return self real = property(real) def imag(self): return Decimal(0) imag = property(imag) def conjugate(self): return self def __complex__(self): return complex(float(self)) def _fix_nan(self, context): """Decapitate the payload of a NaN to fit the context""" payload = self._int # maximum length of payload is precision if clamp=0, # precision-1 if clamp=1. max_payload_len = context.prec - context.clamp if len(payload) > max_payload_len: payload = payload[len(payload)-max_payload_len:].lstrip('0') return _dec_from_triple(self._sign, payload, self._exp, True) return Decimal(self) def _fix(self, context): """Round if it is necessary to keep self within prec precision. Rounds and fixes the exponent. Does not raise on a sNaN. Arguments: self - Decimal instance context - context used. """ if self._is_special: if self._isnan(): # decapitate payload if necessary return self._fix_nan(context) else: # self is +/-Infinity; return unaltered return Decimal(self) # if self is zero then exponent should be between Etiny and # Emax if clamp==0, and between Etiny and Etop if clamp==1. Etiny = context.Etiny() Etop = context.Etop() if not self: exp_max = [context.Emax, Etop][context.clamp] new_exp = min(max(self._exp, Etiny), exp_max) if new_exp != self._exp: context._raise_error(Clamped) return _dec_from_triple(self._sign, '0', new_exp) else: return Decimal(self) # exp_min is the smallest allowable exponent of the result, # equal to max(self.adjusted()-context.prec+1, Etiny) exp_min = len(self._int) + self._exp - context.prec if exp_min > Etop: # overflow: exp_min > Etop iff self.adjusted() > Emax ans = context._raise_error(Overflow, 'above Emax', self._sign) context._raise_error(Inexact) context._raise_error(Rounded) return ans self_is_subnormal = exp_min < Etiny if self_is_subnormal: exp_min = Etiny # round if self has too many digits if self._exp < exp_min: digits = len(self._int) + self._exp - exp_min if digits < 0: self = _dec_from_triple(self._sign, '1', exp_min-1) digits = 0 rounding_method = self._pick_rounding_function[context.rounding] changed = rounding_method(self, digits) coeff = self._int[:digits] or '0' if changed > 0: coeff = str(int(coeff)+1) if len(coeff) > context.prec: coeff = coeff[:-1] exp_min += 1 # check whether the rounding pushed the exponent out of range if exp_min > Etop: ans = context._raise_error(Overflow, 'above Emax', self._sign) else: ans = _dec_from_triple(self._sign, coeff, exp_min) # raise the appropriate signals, taking care to respect # the precedence described in the specification if changed and self_is_subnormal: context._raise_error(Underflow) if self_is_subnormal: context._raise_error(Subnormal) if changed: context._raise_error(Inexact) context._raise_error(Rounded) if not ans: # raise Clamped on underflow to 0 context._raise_error(Clamped) return ans if self_is_subnormal: context._raise_error(Subnormal) # fold down if clamp == 1 and self has too few digits if context.clamp == 1 and self._exp > Etop: context._raise_error(Clamped) self_padded = self._int + '0'*(self._exp - Etop) return _dec_from_triple(self._sign, self_padded, Etop) # here self was representable to begin with; return unchanged return Decimal(self) # for each of the rounding functions below: # self is a finite, nonzero Decimal # prec is an integer satisfying 0 <= prec < len(self._int) # # each function returns either -1, 0, or 1, as follows: # 1 indicates that self should be rounded up (away from zero) # 0 indicates that self should be truncated, and that all the # digits to be truncated are zeros (so the value is unchanged) # -1 indicates that there are nonzero digits to be truncated def _round_down(self, prec): """Also known as round-towards-0, truncate.""" if _all_zeros(self._int, prec): return 0 else: return -1 def _round_up(self, prec): """Rounds away from 0.""" return -self._round_down(prec) def _round_half_up(self, prec): """Rounds 5 up (away from 0)""" if self._int[prec] in '56789': return 1 elif _all_zeros(self._int, prec): return 0 else: return -1 def _round_half_down(self, prec): """Round 5 down""" if _exact_half(self._int, prec): return -1 else: return self._round_half_up(prec) def _round_half_even(self, prec): """Round 5 to even, rest to nearest.""" if _exact_half(self._int, prec) and \ (prec == 0 or self._int[prec-1] in '02468'): return -1 else: return self._round_half_up(prec) def _round_ceiling(self, prec): """Rounds up (not away from 0 if negative.)""" if self._sign: return self._round_down(prec) else: return -self._round_down(prec) def _round_floor(self, prec): """Rounds down (not towards 0 if negative)""" if not self._sign: return self._round_down(prec) else: return -self._round_down(prec) def _round_05up(self, prec): """Round down unless digit prec-1 is 0 or 5.""" if prec and self._int[prec-1] not in '05': return self._round_down(prec) else: return -self._round_down(prec) _pick_rounding_function = dict( ROUND_DOWN = _round_down, ROUND_UP = _round_up, ROUND_HALF_UP = _round_half_up, ROUND_HALF_DOWN = _round_half_down, ROUND_HALF_EVEN = _round_half_even, ROUND_CEILING = _round_ceiling, ROUND_FLOOR = _round_floor, ROUND_05UP = _round_05up, ) def __round__(self, n=None): """Round self to the nearest integer, or to a given precision. If only one argument is supplied, round a finite Decimal instance self to the nearest integer. If self is infinite or a NaN then a Python exception is raised. If self is finite and lies exactly halfway between two integers then it is rounded to the integer with even last digit. >>> round(Decimal('123.456')) 123 >>> round(Decimal('-456.789')) -457 >>> round(Decimal('-3.0')) -3 >>> round(Decimal('2.5')) 2 >>> round(Decimal('3.5')) 4 >>> round(Decimal('Inf')) Traceback (most recent call last): ... OverflowError: cannot round an infinity >>> round(Decimal('NaN')) Traceback (most recent call last): ... ValueError: cannot round a NaN If a second argument n is supplied, self is rounded to n decimal places using the rounding mode for the current context. For an integer n, round(self, -n) is exactly equivalent to self.quantize(Decimal('1En')). >>> round(Decimal('123.456'), 0) Decimal('123') >>> round(Decimal('123.456'), 2) Decimal('123.46') >>> round(Decimal('123.456'), -2) Decimal('1E+2') >>> round(Decimal('-Infinity'), 37) Decimal('NaN') >>> round(Decimal('sNaN123'), 0) Decimal('NaN123') """ if n is not None: # two-argument form: use the equivalent quantize call if not isinstance(n, int): raise TypeError('Second argument to round should be integral') exp = _dec_from_triple(0, '1', -n) return self.quantize(exp) # one-argument form if self._is_special: if self.is_nan(): raise ValueError("cannot round a NaN") else: raise OverflowError("cannot round an infinity") return int(self._rescale(0, ROUND_HALF_EVEN)) def __floor__(self): """Return the floor of self, as an integer. For a finite Decimal instance self, return the greatest integer n such that n <= self. If self is infinite or a NaN then a Python exception is raised. """ if self._is_special: if self.is_nan(): raise ValueError("cannot round a NaN") else: raise OverflowError("cannot round an infinity") return int(self._rescale(0, ROUND_FLOOR)) def __ceil__(self): """Return the ceiling of self, as an integer. For a finite Decimal instance self, return the least integer n such that n >= self. If self is infinite or a NaN then a Python exception is raised. """ if self._is_special: if self.is_nan(): raise ValueError("cannot round a NaN") else: raise OverflowError("cannot round an infinity") return int(self._rescale(0, ROUND_CEILING)) def fma(self, other, third, context=None): """Fused multiply-add. Returns self*other+third with no rounding of the intermediate product self*other. self and other are multiplied together, with no rounding of the result. The third operand is then added to the result, and a single final rounding is performed. """ other = _convert_other(other, raiseit=True) third = _convert_other(third, raiseit=True) # compute product; raise InvalidOperation if either operand is # a signaling NaN or if the product is zero times infinity. if self._is_special or other._is_special: if context is None: context = getcontext() if self._exp == 'N': return context._raise_error(InvalidOperation, 'sNaN', self) if other._exp == 'N': return context._raise_error(InvalidOperation, 'sNaN', other) if self._exp == 'n': product = self elif other._exp == 'n': product = other elif self._exp == 'F': if not other: return context._raise_error(InvalidOperation, 'INF * 0 in fma') product = _SignedInfinity[self._sign ^ other._sign] elif other._exp == 'F': if not self: return context._raise_error(InvalidOperation, '0 * INF in fma') product = _SignedInfinity[self._sign ^ other._sign] else: product = _dec_from_triple(self._sign ^ other._sign, str(int(self._int) * int(other._int)), self._exp + other._exp) return product.__add__(third, context) def _power_modulo(self, other, modulo, context=None): """Three argument version of __pow__""" other = _convert_other(other) if other is NotImplemented: return other modulo = _convert_other(modulo) if modulo is NotImplemented: return modulo if context is None: context = getcontext() # deal with NaNs: if there are any sNaNs then first one wins, # (i.e. behaviour for NaNs is identical to that of fma) self_is_nan = self._isnan() other_is_nan = other._isnan() modulo_is_nan = modulo._isnan() if self_is_nan or other_is_nan or modulo_is_nan: if self_is_nan == 2: return context._raise_error(InvalidOperation, 'sNaN', self) if other_is_nan == 2: return context._raise_error(InvalidOperation, 'sNaN', other) if modulo_is_nan == 2: return context._raise_error(InvalidOperation, 'sNaN', modulo) if self_is_nan: return self._fix_nan(context) if other_is_nan: return other._fix_nan(context) return modulo._fix_nan(context) # check inputs: we apply same restrictions as Python's pow() if not (self._isinteger() and other._isinteger() and modulo._isinteger()): return context._raise_error(InvalidOperation, 'pow() 3rd argument not allowed ' 'unless all arguments are integers') if other < 0: return context._raise_error(InvalidOperation, 'pow() 2nd argument cannot be ' 'negative when 3rd argument specified') if not modulo: return context._raise_error(InvalidOperation, 'pow() 3rd argument cannot be 0') # additional restriction for decimal: the modulus must be less # than 10**prec in absolute value if modulo.adjusted() >= context.prec: return context._raise_error(InvalidOperation, 'insufficient precision: pow() 3rd ' 'argument must not have more than ' 'precision digits') # define 0**0 == NaN, for consistency with two-argument pow # (even though it hurts!) if not other and not self: return context._raise_error(InvalidOperation, 'at least one of pow() 1st argument ' 'and 2nd argument must be nonzero ;' '0**0 is not defined') # compute sign of result if other._iseven(): sign = 0 else: sign = self._sign # convert modulo to a Python integer, and self and other to # Decimal integers (i.e. force their exponents to be >= 0) modulo = abs(int(modulo)) base = _WorkRep(self.to_integral_value()) exponent = _WorkRep(other.to_integral_value()) # compute result using integer pow() base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo for i in range(exponent.exp): base = pow(base, 10, modulo) base = pow(base, exponent.int, modulo) return _dec_from_triple(sign, str(base), 0) def _power_exact(self, other, p): """Attempt to compute self**other exactly. Given Decimals self and other and an integer p, attempt to compute an exact result for the power self**other, with p digits of precision. Return None if self**other is not exactly representable in p digits. Assumes that elimination of special cases has already been performed: self and other must both be nonspecial; self must be positive and not numerically equal to 1; other must be nonzero. For efficiency, other._exp should not be too large, so that 10**abs(other._exp) is a feasible calculation.""" # In the comments below, we write x for the value of self and y for the # value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc # and yc positive integers not divisible by 10. # The main purpose of this method is to identify the *failure* # of x**y to be exactly representable with as little effort as # possible. So we look for cheap and easy tests that # eliminate the possibility of x**y being exact. Only if all # these tests are passed do we go on to actually compute x**y. # Here's the main idea. Express y as a rational number m/n, with m and # n relatively prime and n>0. Then for x**y to be exactly # representable (at *any* precision), xc must be the nth power of a # positive integer and xe must be divisible by n. If y is negative # then additionally xc must be a power of either 2 or 5, hence a power # of 2**n or 5**n. # # There's a limit to how small |y| can be: if y=m/n as above # then: # # (1) if xc != 1 then for the result to be representable we # need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So # if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <= # 2**(1/|y|), hence xc**|y| < 2 and the result is not # representable. # # (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if # |y| < 1/|xe| then the result is not representable. # # Note that since x is not equal to 1, at least one of (1) and # (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) < # 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye. # # There's also a limit to how large y can be, at least if it's # positive: the normalized result will have coefficient xc**y, # so if it's representable then xc**y < 10**p, and y < # p/log10(xc). Hence if y*log10(xc) >= p then the result is # not exactly representable. # if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye, # so |y| < 1/xe and the result is not representable. # Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y| # < 1/nbits(xc). x = _WorkRep(self) xc, xe = x.int, x.exp while xc % 10 == 0: xc //= 10 xe += 1 y = _WorkRep(other) yc, ye = y.int, y.exp while yc % 10 == 0: yc //= 10 ye += 1 # case where xc == 1: result is 10**(xe*y), with xe*y # required to be an integer if xc == 1: xe *= yc # result is now 10**(xe * 10**ye); xe * 10**ye must be integral while xe % 10 == 0: xe //= 10 ye += 1 if ye < 0: return None exponent = xe * 10**ye if y.sign == 1: exponent = -exponent # if other is a nonnegative integer, use ideal exponent if other._isinteger() and other._sign == 0: ideal_exponent = self._exp*int(other) zeros = min(exponent-ideal_exponent, p-1) else: zeros = 0 return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros) # case where y is negative: xc must be either a power # of 2 or a power of 5. if y.sign == 1: last_digit = xc % 10 if last_digit in (2,4,6,8): # quick test for power of 2 if xc & -xc != xc: return None # now xc is a power of 2; e is its exponent e = _nbits(xc)-1 # We now have: # # x = 2**e * 10**xe, e > 0, and y < 0. # # The exact result is: # # x**y = 5**(-e*y) * 10**(e*y + xe*y) # # provided that both e*y and xe*y are integers. Note that if # 5**(-e*y) >= 10**p, then the result can't be expressed # exactly with p digits of precision. # # Using the above, we can guard against large values of ye. # 93/65 is an upper bound for log(10)/log(5), so if # # ye >= len(str(93*p//65)) # # then # # -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5), # # so 5**(-e*y) >= 10**p, and the coefficient of the result # can't be expressed in p digits. # emax >= largest e such that 5**e < 10**p. emax = p*93//65 if ye >= len(str(emax)): return None # Find -e*y and -xe*y; both must be integers e = _decimal_lshift_exact(e * yc, ye) xe = _decimal_lshift_exact(xe * yc, ye) if e is None or xe is None: return None if e > emax: return None xc = 5**e elif last_digit == 5: # e >= log_5(xc) if xc is a power of 5; we have # equality all the way up to xc=5**2658 e = _nbits(xc)*28//65 xc, remainder = divmod(5**e, xc) if remainder: return None while xc % 5 == 0: xc //= 5 e -= 1 # Guard against large values of ye, using the same logic as in # the 'xc is a power of 2' branch. 10/3 is an upper bound for # log(10)/log(2). emax = p*10//3 if ye >= len(str(emax)): return None e = _decimal_lshift_exact(e * yc, ye) xe = _decimal_lshift_exact(xe * yc, ye) if e is None or xe is None: return None if e > emax: return None xc = 2**e else: return None if xc >= 10**p: return None xe = -e-xe return _dec_from_triple(0, str(xc), xe) # now y is positive; find m and n such that y = m/n if ye >= 0: m, n = yc*10**ye, 1 else: if xe != 0 and len(str(abs(yc*xe))) <= -ye: return None xc_bits = _nbits(xc) if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye: return None m, n = yc, 10**(-ye) while m % 2 == n % 2 == 0: m //= 2 n //= 2 while m % 5 == n % 5 == 0: m //= 5 n //= 5 # compute nth root of xc*10**xe if n > 1: # if 1 < xc < 2**n then xc isn't an nth power if xc != 1 and xc_bits <= n: return None xe, rem = divmod(xe, n) if rem != 0: return None # compute nth root of xc using Newton's method a = 1 << -(-_nbits(xc)//n) # initial estimate while True: q, r = divmod(xc, a**(n-1)) if a <= q: break else: a = (a*(n-1) + q)//n if not (a == q and r == 0): return None xc = a # now xc*10**xe is the nth root of the original xc*10**xe # compute mth power of xc*10**xe # if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m > # 10**p and the result is not representable. if xc > 1 and m > p*100//_log10_lb(xc): return None xc = xc**m xe *= m if xc > 10**p: return None # by this point the result *is* exactly representable # adjust the exponent to get as close as possible to the ideal # exponent, if necessary str_xc = str(xc) if other._isinteger() and other._sign == 0: ideal_exponent = self._exp*int(other) zeros = min(xe-ideal_exponent, p-len(str_xc)) else: zeros = 0 return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros) def __pow__(self, other, modulo=None, context=None): """Return self ** other [ % modulo]. With two arguments, compute self**other. With three arguments, compute (self**other) % modulo. For the three argument form, the following restrictions on the arguments hold: - all three arguments must be integral - other must be nonnegative - either self or other (or both) must be nonzero - modulo must be nonzero and must have at most p digits, where p is the context precision. If any of these restrictions is violated the InvalidOperation flag is raised. The result of pow(self, other, modulo) is identical to the result that would be obtained by computing (self**other) % modulo with unbounded precision, but is computed more efficiently. It is always exact. """ if modulo is not None: return self._power_modulo(other, modulo, context) other = _convert_other(other) if other is NotImplemented: return other if context is None: context = getcontext() # either argument is a NaN => result is NaN ans = self._check_nans(other, context) if ans: return ans # 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity) if not other: if not self: return context._raise_error(InvalidOperation, '0 ** 0') else: return _One # result has sign 1 iff self._sign is 1 and other is an odd integer result_sign = 0 if self._sign == 1: if other._isinteger(): if not other._iseven(): result_sign = 1 else: # -ve**noninteger = NaN # (-0)**noninteger = 0**noninteger if self: return context._raise_error(InvalidOperation, 'x ** y with x negative and y not an integer') # negate self, without doing any unwanted rounding self = self.copy_negate() # 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity if not self: if other._sign == 0: return _dec_from_triple(result_sign, '0', 0) else: return _SignedInfinity[result_sign] # Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0 if self._isinfinity(): if other._sign == 0: return _SignedInfinity[result_sign] else: return _dec_from_triple(result_sign, '0', 0) # 1**other = 1, but the choice of exponent and the flags # depend on the exponent of self, and on whether other is a # positive integer, a negative integer, or neither if self == _One: if other._isinteger(): # exp = max(self._exp*max(int(other), 0), # 1-context.prec) but evaluating int(other) directly # is dangerous until we know other is small (other # could be 1e999999999) if other._sign == 1: multiplier = 0 elif other > context.prec: multiplier = context.prec else: multiplier = int(other) exp = self._exp * multiplier if exp < 1-context.prec: exp = 1-context.prec context._raise_error(Rounded) else: context._raise_error(Inexact) context._raise_error(Rounded) exp = 1-context.prec return _dec_from_triple(result_sign, '1'+'0'*-exp, exp) # compute adjusted exponent of self self_adj = self.adjusted() # self ** infinity is infinity if self > 1, 0 if self < 1 # self ** -infinity is infinity if self < 1, 0 if self > 1 if other._isinfinity(): if (other._sign == 0) == (self_adj < 0): return _dec_from_triple(result_sign, '0', 0) else: return _SignedInfinity[result_sign] # from here on, the result always goes through the call # to _fix at the end of this function. ans = None exact = False # crude test to catch cases of extreme overflow/underflow. If # log10(self)*other >= 10**bound and bound >= len(str(Emax)) # then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence # self**other >= 10**(Emax+1), so overflow occurs. The test # for underflow is similar. bound = self._log10_exp_bound() + other.adjusted() if (self_adj >= 0) == (other._sign == 0): # self > 1 and other +ve, or self < 1 and other -ve # possibility of overflow if bound >= len(str(context.Emax)): ans = _dec_from_triple(result_sign, '1', context.Emax+1) else: # self > 1 and other -ve, or self < 1 and other +ve # possibility of underflow to 0 Etiny = context.Etiny() if bound >= len(str(-Etiny)): ans = _dec_from_triple(result_sign, '1', Etiny-1) # try for an exact result with precision +1 if ans is None: ans = self._power_exact(other, context.prec + 1) if ans is not None: if result_sign == 1: ans = _dec_from_triple(1, ans._int, ans._exp) exact = True # usual case: inexact result, x**y computed directly as exp(y*log(x)) if ans is None: p = context.prec x = _WorkRep(self) xc, xe = x.int, x.exp y = _WorkRep(other) yc, ye = y.int, y.exp if y.sign == 1: yc = -yc # compute correctly rounded result: start with precision +3, # then increase precision until result is unambiguously roundable extra = 3 while True: coeff, exp = _dpower(xc, xe, yc, ye, p+extra) if coeff % (5*10**(len(str(coeff))-p-1)): break extra += 3 ans = _dec_from_triple(result_sign, str(coeff), exp) # unlike exp, ln and log10, the power function respects the # rounding mode; no need to switch to ROUND_HALF_EVEN here # There's a difficulty here when 'other' is not an integer and # the result is exact. In this case, the specification # requires that the Inexact flag be raised (in spite of # exactness), but since the result is exact _fix won't do this # for us. (Correspondingly, the Underflow signal should also # be raised for subnormal results.) We can't directly raise # these signals either before or after calling _fix, since # that would violate the precedence for signals. So we wrap # the ._fix call in a temporary context, and reraise # afterwards. if exact and not other._isinteger(): # pad with zeros up to length context.prec+1 if necessary; this # ensures that the Rounded signal will be raised. if len(ans._int) <= context.prec: expdiff = context.prec + 1 - len(ans._int) ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff, ans._exp-expdiff) # create a copy of the current context, with cleared flags/traps newcontext = context.copy() newcontext.clear_flags() for exception in _signals: newcontext.traps[exception] = 0 # round in the new context ans = ans._fix(newcontext) # raise Inexact, and if necessary, Underflow newcontext._raise_error(Inexact) if newcontext.flags[Subnormal]: newcontext._raise_error(Underflow) # propagate signals to the original context; _fix could # have raised any of Overflow, Underflow, Subnormal, # Inexact, Rounded, Clamped. Overflow needs the correct # arguments. Note that the order of the exceptions is # important here. if newcontext.flags[Overflow]: context._raise_error(Overflow, 'above Emax', ans._sign) for exception in Underflow, Subnormal, Inexact, Rounded, Clamped: if newcontext.flags[exception]: context._raise_error(exception) else: ans = ans._fix(context) return ans def __rpow__(self, other, context=None): """Swaps self/other and returns __pow__.""" other = _convert_other(other) if other is NotImplemented: return other return other.__pow__(self, context=context) def normalize(self, context=None): """Normalize- strip trailing 0s, change anything equal to 0 to 0e0""" if context is None: context = getcontext() if self._is_special: ans = self._check_nans(context=context) if ans: return ans dup = self._fix(context) if dup._isinfinity(): return dup if not dup: return _dec_from_triple(dup._sign, '0', 0) exp_max = [context.Emax, context.Etop()][context.clamp] end = len(dup._int) exp = dup._exp while dup._int[end-1] == '0' and exp < exp_max: exp += 1 end -= 1 return _dec_from_triple(dup._sign, dup._int[:end], exp) def quantize(self, exp, rounding=None, context=None, watchexp=True): """Quantize self so its exponent is the same as that of exp. Similar to self._rescale(exp._exp) but with error checking. """ exp = _convert_other(exp, raiseit=True) if context is None: context = getcontext() if rounding is None: rounding = context.rounding if self._is_special or exp._is_special: ans = self._check_nans(exp, context) if ans: return ans if exp._isinfinity() or self._isinfinity(): if exp._isinfinity() and self._isinfinity(): return Decimal(self) # if both are inf, it is OK return context._raise_error(InvalidOperation, 'quantize with one INF') # if we're not watching exponents, do a simple rescale if not watchexp: ans = self._rescale(exp._exp, rounding) # raise Inexact and Rounded where appropriate if ans._exp > self._exp: context._raise_error(Rounded) if ans != self: context._raise_error(Inexact) return ans # exp._exp should be between Etiny and Emax if not (context.Etiny() <= exp._exp <= context.Emax): return context._raise_error(InvalidOperation, 'target exponent out of bounds in quantize') if not self: ans = _dec_from_triple(self._sign, '0', exp._exp) return ans._fix(context) self_adjusted = self.adjusted() if self_adjusted > context.Emax: return context._raise_error(InvalidOperation, 'exponent of quantize result too large for current context') if self_adjusted - exp._exp + 1 > context.prec: return context._raise_error(InvalidOperation, 'quantize result has too many digits for current context') ans = self._rescale(exp._exp, rounding) if ans.adjusted() > context.Emax: return context._raise_error(InvalidOperation, 'exponent of quantize result too large for current context') if len(ans._int) > context.prec: return context._raise_error(InvalidOperation, 'quantize result has too many digits for current context') # raise appropriate flags if ans and ans.adjusted() < context.Emin: context._raise_error(Subnormal) if ans._exp > self._exp: if ans != self: context._raise_error(Inexact) context._raise_error(Rounded) # call to fix takes care of any necessary folddown, and # signals Clamped if necessary ans = ans._fix(context) return ans def same_quantum(self, other, context=None): """Return True if self and other have the same exponent; otherwise return False. If either operand is a special value, the following rules are used: * return True if both operands are infinities * return True if both operands are NaNs * otherwise, return False. """ other = _convert_other(other, raiseit=True) if self._is_special or other._is_special: return (self.is_nan() and other.is_nan() or self.is_infinite() and other.is_infinite()) return self._exp == other._exp def _rescale(self, exp, rounding): """Rescale self so that the exponent is exp, either by padding with zeros or by truncating digits, using the given rounding mode. Specials are returned without change. This operation is quiet: it raises no flags, and uses no information from the context. exp = exp to scale to (an integer) rounding = rounding mode """ if self._is_special: return Decimal(self) if not self: return _dec_from_triple(self._sign, '0', exp) if self._exp >= exp: # pad answer with zeros if necessary return _dec_from_triple(self._sign, self._int + '0'*(self._exp - exp), exp) # too many digits; round and lose data. If self.adjusted() < # exp-1, replace self by 10**(exp-1) before rounding digits = len(self._int) + self._exp - exp if digits < 0: self = _dec_from_triple(self._sign, '1', exp-1) digits = 0 this_function = self._pick_rounding_function[rounding] changed = this_function(self, digits) coeff = self._int[:digits] or '0' if changed == 1: coeff = str(int(coeff)+1) return _dec_from_triple(self._sign, coeff, exp) def _round(self, places, rounding): """Round a nonzero, nonspecial Decimal to a fixed number of significant figures, using the given rounding mode. Infinities, NaNs and zeros are returned unaltered. This operation is quiet: it raises no flags, and uses no information from the context. """ if places <= 0: raise ValueError("argument should be at least 1 in _round") if self._is_special or not self: return Decimal(self) ans = self._rescale(self.adjusted()+1-places, rounding) # it can happen that the rescale alters the adjusted exponent; # for example when rounding 99.97 to 3 significant figures. # When this happens we end up with an extra 0 at the end of # the number; a second rescale fixes this. if ans.adjusted() != self.adjusted(): ans = ans._rescale(ans.adjusted()+1-places, rounding) return ans def to_integral_exact(self, rounding=None, context=None): """Rounds to a nearby integer. If no rounding mode is specified, take the rounding mode from the context. This method raises the Rounded and Inexact flags when appropriate. See also: to_integral_value, which does exactly the same as this method except that it doesn't raise Inexact or Rounded. """ if self._is_special: ans = self._check_nans(context=context) if ans: return ans return Decimal(self) if self._exp >= 0: return Decimal(self) if not self: return _dec_from_triple(self._sign, '0', 0) if context is None: context = getcontext() if rounding is None: rounding = context.rounding ans = self._rescale(0, rounding) if ans != self: context._raise_error(Inexact) context._raise_error(Rounded) return ans def to_integral_value(self, rounding=None, context=None): """Rounds to the nearest integer, without raising inexact, rounded.""" if context is None: context = getcontext() if rounding is None: rounding = context.rounding if self._is_special: ans = self._check_nans(context=context) if ans: return ans return Decimal(self) if self._exp >= 0: return Decimal(self) else: return self._rescale(0, rounding) # the method name changed, but we provide also the old one, for compatibility to_integral = to_integral_value def sqrt(self, context=None): """Return the square root of self.""" if context is None: context = getcontext() if self._is_special: ans = self._check_nans(context=context) if ans: return ans if self._isinfinity() and self._sign == 0: return Decimal(self) if not self: # exponent = self._exp // 2. sqrt(-0) = -0 ans = _dec_from_triple(self._sign, '0', self._exp // 2) return ans._fix(context) if self._sign == 1: return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0') # At this point self represents a positive number. Let p be # the desired precision and express self in the form c*100**e # with c a positive real number and e an integer, c and e # being chosen so that 100**(p-1) <= c < 100**p. Then the # (exact) square root of self is sqrt(c)*10**e, and 10**(p-1) # <= sqrt(c) < 10**p, so the closest representable Decimal at # precision p is n*10**e where n = round_half_even(sqrt(c)), # the closest integer to sqrt(c) with the even integer chosen # in the case of a tie. # # To ensure correct rounding in all cases, we use the # following trick: we compute the square root to an extra # place (precision p+1 instead of precision p), rounding down. # Then, if the result is inexact and its last digit is 0 or 5, # we increase the last digit to 1 or 6 respectively; if it's # exact we leave the last digit alone. Now the final round to # p places (or fewer in the case of underflow) will round # correctly and raise the appropriate flags. # use an extra digit of precision prec = context.prec+1 # write argument in the form c*100**e where e = self._exp//2 # is the 'ideal' exponent, to be used if the square root is # exactly representable. l is the number of 'digits' of c in # base 100, so that 100**(l-1) <= c < 100**l. op = _WorkRep(self) e = op.exp >> 1 if op.exp & 1: c = op.int * 10 l = (len(self._int) >> 1) + 1 else: c = op.int l = len(self._int)+1 >> 1 # rescale so that c has exactly prec base 100 'digits' shift = prec-l if shift >= 0: c *= 100**shift exact = True else: c, remainder = divmod(c, 100**-shift) exact = not remainder e -= shift # find n = floor(sqrt(c)) using Newton's method n = 10**prec while True: q = c//n if n <= q: break else: n = n + q >> 1 exact = exact and n*n == c if exact: # result is exact; rescale to use ideal exponent e if shift >= 0: # assert n % 10**shift == 0 n //= 10**shift else: n *= 10**-shift e += shift else: # result is not exact; fix last digit as described above if n % 5 == 0: n += 1 ans = _dec_from_triple(0, str(n), e) # round, and fit to current context context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) ans = ans._fix(context) context.rounding = rounding return ans def max(self, other, context=None): """Returns the larger value. Like max(self, other) except if one is not a number, returns NaN (and signals if one is sNaN). Also rounds. """ other = _convert_other(other, raiseit=True) if context is None: context = getcontext() if self._is_special or other._is_special: # If one operand is a quiet NaN and the other is number, then the # number is always returned sn = self._isnan() on = other._isnan() if sn or on: if on == 1 and sn == 0: return self._fix(context) if sn == 1 and on == 0: return other._fix(context) return self._check_nans(other, context) c = self._cmp(other) if c == 0: # If both operands are finite and equal in numerical value # then an ordering is applied: # # If the signs differ then max returns the operand with the # positive sign and min returns the operand with the negative sign # # If the signs are the same then the exponent is used to select # the result. This is exactly the ordering used in compare_total. c = self.compare_total(other) if c == -1: ans = other else: ans = self return ans._fix(context) def min(self, other, context=None): """Returns the smaller value. Like min(self, other) except if one is not a number, returns NaN (and signals if one is sNaN). Also rounds. """ other = _convert_other(other, raiseit=True) if context is None: context = getcontext() if self._is_special or other._is_special: # If one operand is a quiet NaN and the other is number, then the # number is always returned sn = self._isnan() on = other._isnan() if sn or on: if on == 1 and sn == 0: return self._fix(context) if sn == 1 and on == 0: return other._fix(context) return self._check_nans(other, context) c = self._cmp(other) if c == 0: c = self.compare_total(other) if c == -1: ans = self else: ans = other return ans._fix(context) def _isinteger(self): """Returns whether self is an integer""" if self._is_special: return False if self._exp >= 0: return True rest = self._int[self._exp:] return rest == '0'*len(rest) def _iseven(self): """Returns True if self is even. Assumes self is an integer.""" if not self or self._exp > 0: return True return self._int[-1+self._exp] in '02468' def adjusted(self): """Return the adjusted exponent of self""" try: return self._exp + len(self._int) - 1 # If NaN or Infinity, self._exp is string except TypeError: return 0 def canonical(self): """Returns the same Decimal object. As we do not have different encodings for the same number, the received object already is in its canonical form. """ return self def compare_signal(self, other, context=None): """Compares self to the other operand numerically. It's pretty much like compare(), but all NaNs signal, with signaling NaNs taking precedence over quiet NaNs. """ other = _convert_other(other, raiseit = True) ans = self._compare_check_nans(other, context) if ans: return ans return self.compare(other, context=context) def compare_total(self, other, context=None): """Compares self to other using the abstract representations. This is not like the standard compare, which use their numerical value. Note that a total ordering is defined for all possible abstract representations. """ other = _convert_other(other, raiseit=True) # if one is negative and the other is positive, it's easy if self._sign and not other._sign: return _NegativeOne if not self._sign and other._sign: return _One sign = self._sign # let's handle both NaN types self_nan = self._isnan() other_nan = other._isnan() if self_nan or other_nan: if self_nan == other_nan: # compare payloads as though they're integers self_key = len(self._int), self._int other_key = len(other._int), other._int if self_key < other_key: if sign: return _One else: return _NegativeOne if self_key > other_key: if sign: return _NegativeOne else: return _One return _Zero if sign: if self_nan == 1: return _NegativeOne if other_nan == 1: return _One if self_nan == 2: return _NegativeOne if other_nan == 2: return _One else: if self_nan == 1: return _One if other_nan == 1: return _NegativeOne if self_nan == 2: return _One if other_nan == 2: return _NegativeOne if self < other: return _NegativeOne if self > other: return _One if self._exp < other._exp: if sign: return _One else: return _NegativeOne if self._exp > other._exp: if sign: return _NegativeOne else: return _One return _Zero def compare_total_mag(self, other, context=None): """Compares self to other using abstract repr., ignoring sign. Like compare_total, but with operand's sign ignored and assumed to be 0. """ other = _convert_other(other, raiseit=True) s = self.copy_abs() o = other.copy_abs() return s.compare_total(o) def copy_abs(self): """Returns a copy with the sign set to 0. """ return _dec_from_triple(0, self._int, self._exp, self._is_special) def copy_negate(self): """Returns a copy with the sign inverted.""" if self._sign: return _dec_from_triple(0, self._int, self._exp, self._is_special) else: return _dec_from_triple(1, self._int, self._exp, self._is_special) def copy_sign(self, other, context=None): """Returns self with the sign of other.""" other = _convert_other(other, raiseit=True) return _dec_from_triple(other._sign, self._int, self._exp, self._is_special) def exp(self, context=None): """Returns e ** self.""" if context is None: context = getcontext() # exp(NaN) = NaN ans = self._check_nans(context=context) if ans: return ans # exp(-Infinity) = 0 if self._isinfinity() == -1: return _Zero # exp(0) = 1 if not self: return _One # exp(Infinity) = Infinity if self._isinfinity() == 1: return Decimal(self) # the result is now guaranteed to be inexact (the true # mathematical result is transcendental). There's no need to # raise Rounded and Inexact here---they'll always be raised as # a result of the call to _fix. p = context.prec adj = self.adjusted() # we only need to do any computation for quite a small range # of adjusted exponents---for example, -29 <= adj <= 10 for # the default context. For smaller exponent the result is # indistinguishable from 1 at the given precision, while for # larger exponent the result either overflows or underflows. if self._sign == 0 and adj > len(str((context.Emax+1)*3)): # overflow ans = _dec_from_triple(0, '1', context.Emax+1) elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)): # underflow to 0 ans = _dec_from_triple(0, '1', context.Etiny()-1) elif self._sign == 0 and adj < -p: # p+1 digits; final round will raise correct flags ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p) elif self._sign == 1 and adj < -p-1: # p+1 digits; final round will raise correct flags ans = _dec_from_triple(0, '9'*(p+1), -p-1) # general case else: op = _WorkRep(self) c, e = op.int, op.exp if op.sign == 1: c = -c # compute correctly rounded result: increase precision by # 3 digits at a time until we get an unambiguously # roundable result extra = 3 while True: coeff, exp = _dexp(c, e, p+extra) if coeff % (5*10**(len(str(coeff))-p-1)): break extra += 3 ans = _dec_from_triple(0, str(coeff), exp) # at this stage, ans should round correctly with *any* # rounding mode, not just with ROUND_HALF_EVEN context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) ans = ans._fix(context) context.rounding = rounding return ans def is_canonical(self): """Return True if self is canonical; otherwise return False. Currently, the encoding of a Decimal instance is always canonical, so this method returns True for any Decimal. """ return True def is_finite(self): """Return True if self is finite; otherwise return False. A Decimal instance is considered finite if it is neither infinite nor a NaN. """ return not self._is_special def is_infinite(self): """Return True if self is infinite; otherwise return False.""" return self._exp == 'F' def is_nan(self): """Return True if self is a qNaN or sNaN; otherwise return False.""" return self._exp in ('n', 'N') def is_normal(self, context=None): """Return True if self is a normal number; otherwise return False.""" if self._is_special or not self: return False if context is None: context = getcontext() return context.Emin <= self.adjusted() def is_qnan(self): """Return True if self is a quiet NaN; otherwise return False.""" return self._exp == 'n' def is_signed(self): """Return True if self is negative; otherwise return False.""" return self._sign == 1 def is_snan(self): """Return True if self is a signaling NaN; otherwise return False.""" return self._exp == 'N' def is_subnormal(self, context=None): """Return True if self is subnormal; otherwise return False.""" if self._is_special or not self: return False if context is None: context = getcontext() return self.adjusted() < context.Emin def is_zero(self): """Return True if self is a zero; otherwise return False.""" return not self._is_special and self._int == '0' def _ln_exp_bound(self): """Compute a lower bound for the adjusted exponent of self.ln(). In other words, compute r such that self.ln() >= 10**r. Assumes that self is finite and positive and that self != 1. """ # for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1 adj = self._exp + len(self._int) - 1 if adj >= 1: # argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10) return len(str(adj*23//10)) - 1 if adj <= -2: # argument <= 0.1 return len(str((-1-adj)*23//10)) - 1 op = _WorkRep(self) c, e = op.int, op.exp if adj == 0: # 1 < self < 10 num = str(c-10**-e) den = str(c) return len(num) - len(den) - (num < den) # adj == -1, 0.1 <= self < 1 return e + len(str(10**-e - c)) - 1 def ln(self, context=None): """Returns the natural (base e) logarithm of self.""" if context is None: context = getcontext() # ln(NaN) = NaN ans = self._check_nans(context=context) if ans: return ans # ln(0.0) == -Infinity if not self: return _NegativeInfinity # ln(Infinity) = Infinity if self._isinfinity() == 1: return _Infinity # ln(1.0) == 0.0 if self == _One: return _Zero # ln(negative) raises InvalidOperation if self._sign == 1: return context._raise_error(InvalidOperation, 'ln of a negative value') # result is irrational, so necessarily inexact op = _WorkRep(self) c, e = op.int, op.exp p = context.prec # correctly rounded result: repeatedly increase precision by 3 # until we get an unambiguously roundable result places = p - self._ln_exp_bound() + 2 # at least p+3 places while True: coeff = _dlog(c, e, places) # assert len(str(abs(coeff)))-p >= 1 if coeff % (5*10**(len(str(abs(coeff)))-p-1)): break places += 3 ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) ans = ans._fix(context) context.rounding = rounding return ans def _log10_exp_bound(self): """Compute a lower bound for the adjusted exponent of self.log10(). In other words, find r such that self.log10() >= 10**r. Assumes that self is finite and positive and that self != 1. """ # For x >= 10 or x < 0.1 we only need a bound on the integer # part of log10(self), and this comes directly from the # exponent of x. For 0.1 <= x <= 10 we use the inequalities # 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| > # (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0 adj = self._exp + len(self._int) - 1 if adj >= 1: # self >= 10 return len(str(adj))-1 if adj <= -2: # self < 0.1 return len(str(-1-adj))-1 op = _WorkRep(self) c, e = op.int, op.exp if adj == 0: # 1 < self < 10 num = str(c-10**-e) den = str(231*c) return len(num) - len(den) - (num < den) + 2 # adj == -1, 0.1 <= self < 1 num = str(10**-e-c) return len(num) + e - (num < "231") - 1 def log10(self, context=None): """Returns the base 10 logarithm of self.""" if context is None: context = getcontext() # log10(NaN) = NaN ans = self._check_nans(context=context) if ans: return ans # log10(0.0) == -Infinity if not self: return _NegativeInfinity # log10(Infinity) = Infinity if self._isinfinity() == 1: return _Infinity # log10(negative or -Infinity) raises InvalidOperation if self._sign == 1: return context._raise_error(InvalidOperation, 'log10 of a negative value') # log10(10**n) = n if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1): # answer may need rounding ans = Decimal(self._exp + len(self._int) - 1) else: # result is irrational, so necessarily inexact op = _WorkRep(self) c, e = op.int, op.exp p = context.prec # correctly rounded result: repeatedly increase precision # until result is unambiguously roundable places = p-self._log10_exp_bound()+2 while True: coeff = _dlog10(c, e, places) # assert len(str(abs(coeff)))-p >= 1 if coeff % (5*10**(len(str(abs(coeff)))-p-1)): break places += 3 ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places) context = context._shallow_copy() rounding = context._set_rounding(ROUND_HALF_EVEN) ans = ans._fix(context) context.rounding = rounding return ans def logb(self, context=None): """ Returns the exponent of the magnitude of self's MSD. The result is the integer which is the exponent of the magnitude of the most significant digit of self (as though it were truncated to a single digit while maintaining the value of that digit and without limiting the resulting exponent). """ # logb(NaN) = NaN ans = self._check_nans(context=context) if ans: return ans if context is None: context = getcontext() # logb(+/-Inf) = +Inf if self._isinfinity(): return _Infinity # logb(0) = -Inf, DivisionByZero if not self: return context._raise_error(DivisionByZero, 'logb(0)', 1) # otherwise, simply return the adjusted exponent of self, as a # Decimal. Note that no attempt is made to fit the result # into the current context. ans = Decimal(self.adjusted()) return ans._fix(context) def _islogical(self): """Return True if self is a logical operand. For being logical, it must be a finite number with a sign of 0, an exponent of 0, and a coefficient whose digits must all be either 0 or 1. """ if self._sign != 0 or self._exp != 0: return False for dig in self._int: if dig not in '01': return False return True def _fill_logical(self, context, opa, opb): dif = context.prec - len(opa) if dif > 0: opa = '0'*dif + opa elif dif < 0: opa = opa[-context.prec:] dif = context.prec - len(opb) if dif > 0: opb = '0'*dif + opb elif dif < 0: opb = opb[-context.prec:] return opa, opb def logical_and(self, other, context=None): """Applies an 'and' operation between self and other's digits.""" if context is None: context = getcontext() other = _convert_other(other, raiseit=True) if not self._islogical() or not other._islogical(): return context._raise_error(InvalidOperation) # fill to context.prec (opa, opb) = self._fill_logical(context, self._int, other._int) # make the operation, and clean starting zeroes result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)]) return _dec_from_triple(0, result.lstrip('0') or '0', 0) def logical_invert(self, context=None): """Invert all its digits.""" if context is None: context = getcontext() return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0), context) def logical_or(self, other, context=None): """Applies an 'or' operation between self and other's digits.""" if context is None: context = getcontext() other = _convert_other(other, raiseit=True) if not self._islogical() or not other._islogical(): return context._raise_error(InvalidOperation) # fill to context.prec (opa, opb) = self._fill_logical(context, self._int, other._int) # make the operation, and clean starting zeroes result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)]) return _dec_from_triple(0, result.lstrip('0') or '0', 0) def logical_xor(self, other, context=None): """Applies an 'xor' operation between self and other's digits.""" if context is None: context = getcontext() other = _convert_other(other, raiseit=True) if not self._islogical() or not other._islogical(): return context._raise_error(InvalidOperation) # fill to context.prec (opa, opb) = self._fill_logical(context, self._int, other._int) # make the operation, and clean starting zeroes result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)]) return _dec_from_triple(0, result.lstrip('0') or '0', 0) def max_mag(self, other, context=None): """Compares the values numerically with their sign ignored.""" other = _convert_other(other, raiseit=True) if context is None: context = getcontext() if self._is_special or other._is_special: # If one operand is a quiet NaN and the other is number, then the # number is always returned sn = self._isnan() on = other._isnan() if sn or on: if on == 1 and sn == 0: return self._fix(context) if sn == 1 and on == 0: return other._fix(context) return self._check_nans(other, context) c = self.copy_abs()._cmp(other.copy_abs()) if c == 0: c = self.compare_total(other) if c == -1: ans = other else: ans = self return ans._fix(context) def min_mag(self, other, context=None): """Compares the values numerically with their sign ignored.""" other = _convert_other(other, raiseit=True) if context is None: context = getcontext() if self._is_special or other._is_special: # If one operand is a quiet NaN and the other is number, then the # number is always returned sn = self._isnan() on = other._isnan() if sn or on: if on == 1 and sn == 0: return self._fix(context) if sn == 1 and on == 0: return other._fix(context) return self._check_nans(other, context) c = self.copy_abs()._cmp(other.copy_abs()) if c == 0: c = self.compare_total(other) if c == -1: ans = self else: ans = other return ans._fix(context) def next_minus(self, context=None): """Returns the largest representable number smaller than itself.""" if context is None: context = getcontext() ans = self._check_nans(context=context) if ans: return ans if self._isinfinity() == -1: return _NegativeInfinity if self._isinfinity() == 1: return _dec_from_triple(0, '9'*context.prec, context.Etop()) context = context.copy() context._set_rounding(ROUND_FLOOR) context._ignore_all_flags() new_self = self._fix(context) if new_self != self: return new_self return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1), context) def next_plus(self, context=None): """Returns the smallest representable number larger than itself.""" if context is None: context = getcontext() ans = self._check_nans(context=context) if ans: return ans if self._isinfinity() == 1: return _Infinity if self._isinfinity() == -1: return _dec_from_triple(1, '9'*context.prec, context.Etop()) context = context.copy() context._set_rounding(ROUND_CEILING) context._ignore_all_flags() new_self = self._fix(context) if new_self != self: return new_self return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1), context) def next_toward(self, other, context=None): """Returns the number closest to self, in the direction towards other. The result is the closest representable number to self (excluding self) that is in the direction towards other, unless both have the same value. If the two operands are numerically equal, then the result is a copy of self with the sign set to be the same as the sign of other. """ other = _convert_other(other, raiseit=True) if context is None: context = getcontext() ans = self._check_nans(other, context) if ans: return ans comparison = self._cmp(other) if comparison == 0: return self.copy_sign(other) if comparison == -1: ans = self.next_plus(context) else: # comparison == 1 ans = self.next_minus(context) # decide which flags to raise using value of ans if ans._isinfinity(): context._raise_error(Overflow, 'Infinite result from next_toward', ans._sign) context._raise_error(Inexact) context._raise_error(Rounded) elif ans.adjusted() < context.Emin: context._raise_error(Underflow) context._raise_error(Subnormal) context._raise_error(Inexact) context._raise_error(Rounded) # if precision == 1 then we don't raise Clamped for a # result 0E-Etiny. if not ans: context._raise_error(Clamped) return ans def number_class(self, context=None): """Returns an indication of the class of self. The class is one of the following strings: sNaN NaN -Infinity -Normal -Subnormal -Zero +Zero +Subnormal +Normal +Infinity """ if self.is_snan(): return "sNaN" if self.is_qnan(): return "NaN" inf = self._isinfinity() if inf == 1: return "+Infinity" if inf == -1: return "-Infinity" if self.is_zero(): if self._sign: return "-Zero" else: return "+Zero" if context is None: context = getcontext() if self.is_subnormal(context=context): if self._sign: return "-Subnormal" else: return "+Subnormal" # just a normal, regular, boring number, :) if self._sign: return "-Normal" else: return "+Normal" def radix(self): """Just returns 10, as this is Decimal, :)""" return Decimal(10) def rotate(self, other, context=None): """Returns a rotated copy of self, value-of-other times.""" if context is None: context = getcontext() other = _convert_other(other, raiseit=True) ans = self._check_nans(other, context) if ans: return ans if other._exp != 0: return context._raise_error(InvalidOperation) if not (-context.prec <= int(other) <= context.prec): return context._raise_error(InvalidOperation) if self._isinfinity(): return Decimal(self) # get values, pad if necessary torot = int(other) rotdig = self._int topad = context.prec - len(rotdig) if topad > 0: rotdig = '0'*topad + rotdig elif topad < 0: rotdig = rotdig[-topad:] # let's rotate! rotated = rotdig[torot:] + rotdig[:torot] return _dec_from_triple(self._sign, rotated.lstrip('0') or '0', self._exp) def scaleb(self, other, context=None): """Returns self operand after adding the second value to its exp.""" if context is None: context = getcontext() other = _convert_other(other, raiseit=True) ans = self._check_nans(other, context) if ans: return ans if other._exp != 0: return context._raise_error(InvalidOperation) liminf = -2 * (context.Emax + context.prec) limsup = 2 * (context.Emax + context.prec) if not (liminf <= int(other) <= limsup): return context._raise_error(InvalidOperation) if self._isinfinity(): return Decimal(self) d = _dec_from_triple(self._sign, self._int, self._exp + int(other)) d = d._fix(context) return d def shift(self, other, context=None): """Returns a shifted copy of self, value-of-other times.""" if context is None: context = getcontext() other = _convert_other(other, raiseit=True) ans = self._check_nans(other, context) if ans: return ans if other._exp != 0: return context._raise_error(InvalidOperation) if not (-context.prec <= int(other) <= context.prec): return context._raise_error(InvalidOperation) if self._isinfinity(): return Decimal(self) # get values, pad if necessary torot = int(other) rotdig = self._int topad = context.prec - len(rotdig) if topad > 0: rotdig = '0'*topad + rotdig elif topad < 0: rotdig = rotdig[-topad:] # let's shift! if torot < 0: shifted = rotdig[:torot] else: shifted = rotdig + '0'*torot shifted = shifted[-context.prec:] return _dec_from_triple(self._sign, shifted.lstrip('0') or '0', self._exp) # Support for pickling, copy, and deepcopy def __reduce__(self): return (self.__class__, (str(self),)) def __copy__(self): if type(self) is Decimal: return self # I'm immutable; therefore I am my own clone return self.__class__(str(self)) def __deepcopy__(self, memo): if type(self) is Decimal: return self # My components are also immutable return self.__class__(str(self)) # PEP 3101 support. the _localeconv keyword argument should be # considered private: it's provided for ease of testing only. def __format__(self, specifier, context=None, _localeconv=None): """Format a Decimal instance according to the given specifier. The specifier should be a standard format specifier, with the form described in PEP 3101. Formatting types 'e', 'E', 'f', 'F', 'g', 'G', 'n' and '%' are supported. If the formatting type is omitted it defaults to 'g' or 'G', depending on the value of context.capitals. """ # Note: PEP 3101 says that if the type is not present then # there should be at least one digit after the decimal point. # We take the liberty of ignoring this requirement for # Decimal---it's presumably there to make sure that # format(float, '') behaves similarly to str(float). if context is None: context = getcontext() spec = _parse_format_specifier(specifier, _localeconv=_localeconv) # special values don't care about the type or precision if self._is_special: sign = _format_sign(self._sign, spec) body = str(self.copy_abs()) return _format_align(sign, body, spec) # a type of None defaults to 'g' or 'G', depending on context if spec['type'] is None: spec['type'] = ['g', 'G'][context.capitals] # if type is '%', adjust exponent of self accordingly if spec['type'] == '%': self = _dec_from_triple(self._sign, self._int, self._exp+2) # round if necessary, taking rounding mode from the context rounding = context.rounding precision = spec['precision'] if precision is not None: if spec['type'] in 'eE': self = self._round(precision+1, rounding) elif spec['type'] in 'fF%': self = self._rescale(-precision, rounding) elif spec['type'] in 'gG' and len(self._int) > precision: self = self._round(precision, rounding) # special case: zeros with a positive exponent can't be # represented in fixed point; rescale them to 0e0. if not self and self._exp > 0 and spec['type'] in 'fF%': self = self._rescale(0, rounding) # figure out placement of the decimal point leftdigits = self._exp + len(self._int) if spec['type'] in 'eE': if not self and precision is not None: dotplace = 1 - precision else: dotplace = 1 elif spec['type'] in 'fF%': dotplace = leftdigits elif spec['type'] in 'gG': if self._exp <= 0 and leftdigits > -6: dotplace = leftdigits else: dotplace = 1 # find digits before and after decimal point, and get exponent if dotplace < 0: intpart = '0' fracpart = '0'*(-dotplace) + self._int elif dotplace > len(self._int): intpart = self._int + '0'*(dotplace-len(self._int)) fracpart = '' else: intpart = self._int[:dotplace] or '0' fracpart = self._int[dotplace:] exp = leftdigits-dotplace # done with the decimal-specific stuff; hand over the rest # of the formatting to the _format_number function return _format_number(self._sign, intpart, fracpart, exp, spec) def _dec_from_triple(sign, coefficient, exponent, special=False): """Create a decimal instance directly, without any validation, normalization (e.g. removal of leading zeros) or argument conversion. This function is for *internal use only*. """ self = object.__new__(Decimal) self._sign = sign self._int = coefficient self._exp = exponent self._is_special = special return self # Register Decimal as a kind of Number (an abstract base class). # However, do not register it as Real (because Decimals are not # interoperable with floats). _numbers.Number.register(Decimal) ##### Context class ####################################################### class _ContextManager(object): """Context manager class to support localcontext(). Sets a copy of the supplied context in __enter__() and restores the previous decimal context in __exit__() """ def __init__(self, new_context): self.new_context = new_context.copy() def __enter__(self): self.saved_context = getcontext() setcontext(self.new_context) return self.new_context def __exit__(self, t, v, tb): setcontext(self.saved_context) class Context(object): """Contains the context for a Decimal instance. Contains: prec - precision (for use in rounding, division, square roots..) rounding - rounding type (how you round) traps - If traps[exception] = 1, then the exception is raised when it is caused. Otherwise, a value is substituted in. flags - When an exception is caused, flags[exception] is set. (Whether or not the trap_enabler is set) Should be reset by user of Decimal instance. Emin - Minimum exponent Emax - Maximum exponent capitals - If 1, 1*10^1 is printed as 1E+1. If 0, printed as 1e1 clamp - If 1, change exponents if too high (Default 0) """ def __init__(self, prec=None, rounding=None, Emin=None, Emax=None, capitals=None, clamp=None, flags=None, traps=None, _ignored_flags=None): # Set defaults; for everything except flags and _ignored_flags, # inherit from DefaultContext. try: dc = DefaultContext except NameError: pass self.prec = prec if prec is not None else dc.prec self.rounding = rounding if rounding is not None else dc.rounding self.Emin = Emin if Emin is not None else dc.Emin self.Emax = Emax if Emax is not None else dc.Emax self.capitals = capitals if capitals is not None else dc.capitals self.clamp = clamp if clamp is not None else dc.clamp if _ignored_flags is None: self._ignored_flags = [] else: self._ignored_flags = _ignored_flags if traps is None: self.traps = dc.traps.copy() elif not isinstance(traps, dict): self.traps = dict((s, int(s in traps)) for s in _signals + traps) else: self.traps = traps if flags is None: self.flags = dict.fromkeys(_signals, 0) elif not isinstance(flags, dict): self.flags = dict((s, int(s in flags)) for s in _signals + flags) else: self.flags = flags def _set_integer_check(self, name, value, vmin, vmax): if not isinstance(value, int): raise TypeError("%s must be an integer" % name) if vmin == '-inf': if value > vmax: raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value)) elif vmax == 'inf': if value < vmin: raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value)) else: if value < vmin or value > vmax: raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value)) return object.__setattr__(self, name, value) def _set_signal_dict(self, name, d): if not isinstance(d, dict): raise TypeError("%s must be a signal dict" % d) for key in d: if not key in _signals: raise KeyError("%s is not a valid signal dict" % d) for key in _signals: if not key in d: raise KeyError("%s is not a valid signal dict" % d) return object.__setattr__(self, name, d) def __setattr__(self, name, value): if name == 'prec': return self._set_integer_check(name, value, 1, 'inf') elif name == 'Emin': return self._set_integer_check(name, value, '-inf', 0) elif name == 'Emax': return self._set_integer_check(name, value, 0, 'inf') elif name == 'capitals': return self._set_integer_check(name, value, 0, 1) elif name == 'clamp': return self._set_integer_check(name, value, 0, 1) elif name == 'rounding': if not value in _rounding_modes: # raise TypeError even for strings to have consistency # among various implementations. raise TypeError("%s: invalid rounding mode" % value) return object.__setattr__(self, name, value) elif name == 'flags' or name == 'traps': return self._set_signal_dict(name, value) elif name == '_ignored_flags': return object.__setattr__(self, name, value) else: raise AttributeError( "'decimal.Context' object has no attribute '%s'" % name) def __delattr__(self, name): raise AttributeError("%s cannot be deleted" % name) # Support for pickling, copy, and deepcopy def __reduce__(self): flags = [sig for sig, v in self.flags.items() if v] traps = [sig for sig, v in self.traps.items() if v] return (self.__class__, (self.prec, self.rounding, self.Emin, self.Emax, self.capitals, self.clamp, flags, traps)) def __repr__(self): """Show the current context.""" s = [] s.append('Context(prec=%(prec)d, rounding=%(rounding)s, ' 'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, ' 'clamp=%(clamp)d' % vars(self)) names = [f.__name__ for f, v in self.flags.items() if v] s.append('flags=[' + ', '.join(names) + ']') names = [t.__name__ for t, v in self.traps.items() if v] s.append('traps=[' + ', '.join(names) + ']') return ', '.join(s) + ')' def clear_flags(self): """Reset all flags to zero""" for flag in self.flags: self.flags[flag] = 0 def clear_traps(self): """Reset all traps to zero""" for flag in self.traps: self.traps[flag] = 0 def _shallow_copy(self): """Returns a shallow copy from self.""" nc = Context(self.prec, self.rounding, self.Emin, self.Emax, self.capitals, self.clamp, self.flags, self.traps, self._ignored_flags) return nc def copy(self): """Returns a deep copy from self.""" nc = Context(self.prec, self.rounding, self.Emin, self.Emax, self.capitals, self.clamp, self.flags.copy(), self.traps.copy(), self._ignored_flags) return nc __copy__ = copy def _raise_error(self, condition, explanation = None, *args): """Handles an error If the flag is in _ignored_flags, returns the default response. Otherwise, it sets the flag, then, if the corresponding trap_enabler is set, it reraises the exception. Otherwise, it returns the default value after setting the flag. """ error = _condition_map.get(condition, condition) if error in self._ignored_flags: # Don't touch the flag return error().handle(self, *args) self.flags[error] = 1 if not self.traps[error]: # The errors define how to handle themselves. return condition().handle(self, *args) # Errors should only be risked on copies of the context # self._ignored_flags = [] raise error(explanation) def _ignore_all_flags(self): """Ignore all flags, if they are raised""" return self._ignore_flags(*_signals) def _ignore_flags(self, *flags): """Ignore the flags, if they are raised""" # Do not mutate-- This way, copies of a context leave the original # alone. self._ignored_flags = (self._ignored_flags + list(flags)) return list(flags) def _regard_flags(self, *flags): """Stop ignoring the flags, if they are raised""" if flags and isinstance(flags[0], (tuple,list)): flags = flags[0] for flag in flags: self._ignored_flags.remove(flag) # We inherit object.__hash__, so we must deny this explicitly __hash__ = None def Etiny(self): """Returns Etiny (= Emin - prec + 1)""" return int(self.Emin - self.prec + 1) def Etop(self): """Returns maximum exponent (= Emax - prec + 1)""" return int(self.Emax - self.prec + 1) def _set_rounding(self, type): """Sets the rounding type. Sets the rounding type, and returns the current (previous) rounding type. Often used like: context = context.copy() # so you don't change the calling context # if an error occurs in the middle. rounding = context._set_rounding(ROUND_UP) val = self.__sub__(other, context=context) context._set_rounding(rounding) This will make it round up for that operation. """ rounding = self.rounding self.rounding= type return rounding def create_decimal(self, num='0'): """Creates a new Decimal instance but using self as context. This method implements the to-number operation of the IBM Decimal specification.""" if isinstance(num, str) and num != num.strip(): return self._raise_error(ConversionSyntax, "no trailing or leading whitespace is " "permitted.") d = Decimal(num, context=self) if d._isnan() and len(d._int) > self.prec - self.clamp: return self._raise_error(ConversionSyntax, "diagnostic info too long in NaN") return d._fix(self) def create_decimal_from_float(self, f): """Creates a new Decimal instance from a float but rounding using self as the context. >>> context = Context(prec=5, rounding=ROUND_DOWN) >>> context.create_decimal_from_float(3.1415926535897932) Decimal('3.1415') >>> context = Context(prec=5, traps=[Inexact]) >>> context.create_decimal_from_float(3.1415926535897932) Traceback (most recent call last): ... decimal.Inexact: None """ d = Decimal.from_float(f) # An exact conversion return d._fix(self) # Apply the context rounding # Methods def abs(self, a): """Returns the absolute value of the operand. If the operand is negative, the result is the same as using the minus operation on the operand. Otherwise, the result is the same as using the plus operation on the operand. >>> ExtendedContext.abs(Decimal('2.1')) Decimal('2.1') >>> ExtendedContext.abs(Decimal('-100')) Decimal('100') >>> ExtendedContext.abs(Decimal('101.5')) Decimal('101.5') >>> ExtendedContext.abs(Decimal('-101.5')) Decimal('101.5') >>> ExtendedContext.abs(-1) Decimal('1') """ a = _convert_other(a, raiseit=True) return a.__abs__(context=self) def add(self, a, b): """Return the sum of the two operands. >>> ExtendedContext.add(Decimal('12'), Decimal('7.00')) Decimal('19.00') >>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4')) Decimal('1.02E+4') >>> ExtendedContext.add(1, Decimal(2)) Decimal('3') >>> ExtendedContext.add(Decimal(8), 5) Decimal('13') >>> ExtendedContext.add(5, 5) Decimal('10') """ a = _convert_other(a, raiseit=True) r = a.__add__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def _apply(self, a): return str(a._fix(self)) def canonical(self, a): """Returns the same Decimal object. As we do not have different encodings for the same number, the received object already is in its canonical form. >>> ExtendedContext.canonical(Decimal('2.50')) Decimal('2.50') """ if not isinstance(a, Decimal): raise TypeError("canonical requires a Decimal as an argument.") return a.canonical() def compare(self, a, b): """Compares values numerically. If the signs of the operands differ, a value representing each operand ('-1' if the operand is less than zero, '0' if the operand is zero or negative zero, or '1' if the operand is greater than zero) is used in place of that operand for the comparison instead of the actual operand. The comparison is then effected by subtracting the second operand from the first and then returning a value according to the result of the subtraction: '-1' if the result is less than zero, '0' if the result is zero or negative zero, or '1' if the result is greater than zero. >>> ExtendedContext.compare(Decimal('2.1'), Decimal('3')) Decimal('-1') >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1')) Decimal('0') >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10')) Decimal('0') >>> ExtendedContext.compare(Decimal('3'), Decimal('2.1')) Decimal('1') >>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3')) Decimal('1') >>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1')) Decimal('-1') >>> ExtendedContext.compare(1, 2) Decimal('-1') >>> ExtendedContext.compare(Decimal(1), 2) Decimal('-1') >>> ExtendedContext.compare(1, Decimal(2)) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.compare(b, context=self) def compare_signal(self, a, b): """Compares the values of the two operands numerically. It's pretty much like compare(), but all NaNs signal, with signaling NaNs taking precedence over quiet NaNs. >>> c = ExtendedContext >>> c.compare_signal(Decimal('2.1'), Decimal('3')) Decimal('-1') >>> c.compare_signal(Decimal('2.1'), Decimal('2.1')) Decimal('0') >>> c.flags[InvalidOperation] = 0 >>> print(c.flags[InvalidOperation]) 0 >>> c.compare_signal(Decimal('NaN'), Decimal('2.1')) Decimal('NaN') >>> print(c.flags[InvalidOperation]) 1 >>> c.flags[InvalidOperation] = 0 >>> print(c.flags[InvalidOperation]) 0 >>> c.compare_signal(Decimal('sNaN'), Decimal('2.1')) Decimal('NaN') >>> print(c.flags[InvalidOperation]) 1 >>> c.compare_signal(-1, 2) Decimal('-1') >>> c.compare_signal(Decimal(-1), 2) Decimal('-1') >>> c.compare_signal(-1, Decimal(2)) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.compare_signal(b, context=self) def compare_total(self, a, b): """Compares two operands using their abstract representation. This is not like the standard compare, which use their numerical value. Note that a total ordering is defined for all possible abstract representations. >>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9')) Decimal('-1') >>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12')) Decimal('-1') >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3')) Decimal('-1') >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30')) Decimal('0') >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300')) Decimal('1') >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN')) Decimal('-1') >>> ExtendedContext.compare_total(1, 2) Decimal('-1') >>> ExtendedContext.compare_total(Decimal(1), 2) Decimal('-1') >>> ExtendedContext.compare_total(1, Decimal(2)) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.compare_total(b) def compare_total_mag(self, a, b): """Compares two operands using their abstract representation ignoring sign. Like compare_total, but with operand's sign ignored and assumed to be 0. """ a = _convert_other(a, raiseit=True) return a.compare_total_mag(b) def copy_abs(self, a): """Returns a copy of the operand with the sign set to 0. >>> ExtendedContext.copy_abs(Decimal('2.1')) Decimal('2.1') >>> ExtendedContext.copy_abs(Decimal('-100')) Decimal('100') >>> ExtendedContext.copy_abs(-1) Decimal('1') """ a = _convert_other(a, raiseit=True) return a.copy_abs() def copy_decimal(self, a): """Returns a copy of the decimal object. >>> ExtendedContext.copy_decimal(Decimal('2.1')) Decimal('2.1') >>> ExtendedContext.copy_decimal(Decimal('-1.00')) Decimal('-1.00') >>> ExtendedContext.copy_decimal(1) Decimal('1') """ a = _convert_other(a, raiseit=True) return Decimal(a) def copy_negate(self, a): """Returns a copy of the operand with the sign inverted. >>> ExtendedContext.copy_negate(Decimal('101.5')) Decimal('-101.5') >>> ExtendedContext.copy_negate(Decimal('-101.5')) Decimal('101.5') >>> ExtendedContext.copy_negate(1) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.copy_negate() def copy_sign(self, a, b): """Copies the second operand's sign to the first one. In detail, it returns a copy of the first operand with the sign equal to the sign of the second operand. >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33')) Decimal('1.50') >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33')) Decimal('1.50') >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33')) Decimal('-1.50') >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33')) Decimal('-1.50') >>> ExtendedContext.copy_sign(1, -2) Decimal('-1') >>> ExtendedContext.copy_sign(Decimal(1), -2) Decimal('-1') >>> ExtendedContext.copy_sign(1, Decimal(-2)) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.copy_sign(b) def divide(self, a, b): """Decimal division in a specified context. >>> ExtendedContext.divide(Decimal('1'), Decimal('3')) Decimal('0.333333333') >>> ExtendedContext.divide(Decimal('2'), Decimal('3')) Decimal('0.666666667') >>> ExtendedContext.divide(Decimal('5'), Decimal('2')) Decimal('2.5') >>> ExtendedContext.divide(Decimal('1'), Decimal('10')) Decimal('0.1') >>> ExtendedContext.divide(Decimal('12'), Decimal('12')) Decimal('1') >>> ExtendedContext.divide(Decimal('8.00'), Decimal('2')) Decimal('4.00') >>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0')) Decimal('1.20') >>> ExtendedContext.divide(Decimal('1000'), Decimal('100')) Decimal('10') >>> ExtendedContext.divide(Decimal('1000'), Decimal('1')) Decimal('1000') >>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2')) Decimal('1.20E+6') >>> ExtendedContext.divide(5, 5) Decimal('1') >>> ExtendedContext.divide(Decimal(5), 5) Decimal('1') >>> ExtendedContext.divide(5, Decimal(5)) Decimal('1') """ a = _convert_other(a, raiseit=True) r = a.__truediv__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def divide_int(self, a, b): """Divides two numbers and returns the integer part of the result. >>> ExtendedContext.divide_int(Decimal('2'), Decimal('3')) Decimal('0') >>> ExtendedContext.divide_int(Decimal('10'), Decimal('3')) Decimal('3') >>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3')) Decimal('3') >>> ExtendedContext.divide_int(10, 3) Decimal('3') >>> ExtendedContext.divide_int(Decimal(10), 3) Decimal('3') >>> ExtendedContext.divide_int(10, Decimal(3)) Decimal('3') """ a = _convert_other(a, raiseit=True) r = a.__floordiv__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def divmod(self, a, b): """Return (a // b, a % b). >>> ExtendedContext.divmod(Decimal(8), Decimal(3)) (Decimal('2'), Decimal('2')) >>> ExtendedContext.divmod(Decimal(8), Decimal(4)) (Decimal('2'), Decimal('0')) >>> ExtendedContext.divmod(8, 4) (Decimal('2'), Decimal('0')) >>> ExtendedContext.divmod(Decimal(8), 4) (Decimal('2'), Decimal('0')) >>> ExtendedContext.divmod(8, Decimal(4)) (Decimal('2'), Decimal('0')) """ a = _convert_other(a, raiseit=True) r = a.__divmod__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def exp(self, a): """Returns e ** a. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.exp(Decimal('-Infinity')) Decimal('0') >>> c.exp(Decimal('-1')) Decimal('0.367879441') >>> c.exp(Decimal('0')) Decimal('1') >>> c.exp(Decimal('1')) Decimal('2.71828183') >>> c.exp(Decimal('0.693147181')) Decimal('2.00000000') >>> c.exp(Decimal('+Infinity')) Decimal('Infinity') >>> c.exp(10) Decimal('22026.4658') """ a =_convert_other(a, raiseit=True) return a.exp(context=self) def fma(self, a, b, c): """Returns a multiplied by b, plus c. The first two operands are multiplied together, using multiply, the third operand is then added to the result of that multiplication, using add, all with only one final rounding. >>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7')) Decimal('22') >>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7')) Decimal('-8') >>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578')) Decimal('1.38435736E+12') >>> ExtendedContext.fma(1, 3, 4) Decimal('7') >>> ExtendedContext.fma(1, Decimal(3), 4) Decimal('7') >>> ExtendedContext.fma(1, 3, Decimal(4)) Decimal('7') """ a = _convert_other(a, raiseit=True) return a.fma(b, c, context=self) def is_canonical(self, a): """Return True if the operand is canonical; otherwise return False. Currently, the encoding of a Decimal instance is always canonical, so this method returns True for any Decimal. >>> ExtendedContext.is_canonical(Decimal('2.50')) True """ if not isinstance(a, Decimal): raise TypeError("is_canonical requires a Decimal as an argument.") return a.is_canonical() def is_finite(self, a): """Return True if the operand is finite; otherwise return False. A Decimal instance is considered finite if it is neither infinite nor a NaN. >>> ExtendedContext.is_finite(Decimal('2.50')) True >>> ExtendedContext.is_finite(Decimal('-0.3')) True >>> ExtendedContext.is_finite(Decimal('0')) True >>> ExtendedContext.is_finite(Decimal('Inf')) False >>> ExtendedContext.is_finite(Decimal('NaN')) False >>> ExtendedContext.is_finite(1) True """ a = _convert_other(a, raiseit=True) return a.is_finite() def is_infinite(self, a): """Return True if the operand is infinite; otherwise return False. >>> ExtendedContext.is_infinite(Decimal('2.50')) False >>> ExtendedContext.is_infinite(Decimal('-Inf')) True >>> ExtendedContext.is_infinite(Decimal('NaN')) False >>> ExtendedContext.is_infinite(1) False """ a = _convert_other(a, raiseit=True) return a.is_infinite() def is_nan(self, a): """Return True if the operand is a qNaN or sNaN; otherwise return False. >>> ExtendedContext.is_nan(Decimal('2.50')) False >>> ExtendedContext.is_nan(Decimal('NaN')) True >>> ExtendedContext.is_nan(Decimal('-sNaN')) True >>> ExtendedContext.is_nan(1) False """ a = _convert_other(a, raiseit=True) return a.is_nan() def is_normal(self, a): """Return True if the operand is a normal number; otherwise return False. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.is_normal(Decimal('2.50')) True >>> c.is_normal(Decimal('0.1E-999')) False >>> c.is_normal(Decimal('0.00')) False >>> c.is_normal(Decimal('-Inf')) False >>> c.is_normal(Decimal('NaN')) False >>> c.is_normal(1) True """ a = _convert_other(a, raiseit=True) return a.is_normal(context=self) def is_qnan(self, a): """Return True if the operand is a quiet NaN; otherwise return False. >>> ExtendedContext.is_qnan(Decimal('2.50')) False >>> ExtendedContext.is_qnan(Decimal('NaN')) True >>> ExtendedContext.is_qnan(Decimal('sNaN')) False >>> ExtendedContext.is_qnan(1) False """ a = _convert_other(a, raiseit=True) return a.is_qnan() def is_signed(self, a): """Return True if the operand is negative; otherwise return False. >>> ExtendedContext.is_signed(Decimal('2.50')) False >>> ExtendedContext.is_signed(Decimal('-12')) True >>> ExtendedContext.is_signed(Decimal('-0')) True >>> ExtendedContext.is_signed(8) False >>> ExtendedContext.is_signed(-8) True """ a = _convert_other(a, raiseit=True) return a.is_signed() def is_snan(self, a): """Return True if the operand is a signaling NaN; otherwise return False. >>> ExtendedContext.is_snan(Decimal('2.50')) False >>> ExtendedContext.is_snan(Decimal('NaN')) False >>> ExtendedContext.is_snan(Decimal('sNaN')) True >>> ExtendedContext.is_snan(1) False """ a = _convert_other(a, raiseit=True) return a.is_snan() def is_subnormal(self, a): """Return True if the operand is subnormal; otherwise return False. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.is_subnormal(Decimal('2.50')) False >>> c.is_subnormal(Decimal('0.1E-999')) True >>> c.is_subnormal(Decimal('0.00')) False >>> c.is_subnormal(Decimal('-Inf')) False >>> c.is_subnormal(Decimal('NaN')) False >>> c.is_subnormal(1) False """ a = _convert_other(a, raiseit=True) return a.is_subnormal(context=self) def is_zero(self, a): """Return True if the operand is a zero; otherwise return False. >>> ExtendedContext.is_zero(Decimal('0')) True >>> ExtendedContext.is_zero(Decimal('2.50')) False >>> ExtendedContext.is_zero(Decimal('-0E+2')) True >>> ExtendedContext.is_zero(1) False >>> ExtendedContext.is_zero(0) True """ a = _convert_other(a, raiseit=True) return a.is_zero() def ln(self, a): """Returns the natural (base e) logarithm of the operand. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.ln(Decimal('0')) Decimal('-Infinity') >>> c.ln(Decimal('1.000')) Decimal('0') >>> c.ln(Decimal('2.71828183')) Decimal('1.00000000') >>> c.ln(Decimal('10')) Decimal('2.30258509') >>> c.ln(Decimal('+Infinity')) Decimal('Infinity') >>> c.ln(1) Decimal('0') """ a = _convert_other(a, raiseit=True) return a.ln(context=self) def log10(self, a): """Returns the base 10 logarithm of the operand. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.log10(Decimal('0')) Decimal('-Infinity') >>> c.log10(Decimal('0.001')) Decimal('-3') >>> c.log10(Decimal('1.000')) Decimal('0') >>> c.log10(Decimal('2')) Decimal('0.301029996') >>> c.log10(Decimal('10')) Decimal('1') >>> c.log10(Decimal('70')) Decimal('1.84509804') >>> c.log10(Decimal('+Infinity')) Decimal('Infinity') >>> c.log10(0) Decimal('-Infinity') >>> c.log10(1) Decimal('0') """ a = _convert_other(a, raiseit=True) return a.log10(context=self) def logb(self, a): """ Returns the exponent of the magnitude of the operand's MSD. The result is the integer which is the exponent of the magnitude of the most significant digit of the operand (as though the operand were truncated to a single digit while maintaining the value of that digit and without limiting the resulting exponent). >>> ExtendedContext.logb(Decimal('250')) Decimal('2') >>> ExtendedContext.logb(Decimal('2.50')) Decimal('0') >>> ExtendedContext.logb(Decimal('0.03')) Decimal('-2') >>> ExtendedContext.logb(Decimal('0')) Decimal('-Infinity') >>> ExtendedContext.logb(1) Decimal('0') >>> ExtendedContext.logb(10) Decimal('1') >>> ExtendedContext.logb(100) Decimal('2') """ a = _convert_other(a, raiseit=True) return a.logb(context=self) def logical_and(self, a, b): """Applies the logical operation 'and' between each operand's digits. The operands must be both logical numbers. >>> ExtendedContext.logical_and(Decimal('0'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('0'), Decimal('1')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('1'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_and(Decimal('1'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010')) Decimal('1000') >>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10')) Decimal('10') >>> ExtendedContext.logical_and(110, 1101) Decimal('100') >>> ExtendedContext.logical_and(Decimal(110), 1101) Decimal('100') >>> ExtendedContext.logical_and(110, Decimal(1101)) Decimal('100') """ a = _convert_other(a, raiseit=True) return a.logical_and(b, context=self) def logical_invert(self, a): """Invert all the digits in the operand. The operand must be a logical number. >>> ExtendedContext.logical_invert(Decimal('0')) Decimal('111111111') >>> ExtendedContext.logical_invert(Decimal('1')) Decimal('111111110') >>> ExtendedContext.logical_invert(Decimal('111111111')) Decimal('0') >>> ExtendedContext.logical_invert(Decimal('101010101')) Decimal('10101010') >>> ExtendedContext.logical_invert(1101) Decimal('111110010') """ a = _convert_other(a, raiseit=True) return a.logical_invert(context=self) def logical_or(self, a, b): """Applies the logical operation 'or' between each operand's digits. The operands must be both logical numbers. >>> ExtendedContext.logical_or(Decimal('0'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_or(Decimal('0'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1'), Decimal('0')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010')) Decimal('1110') >>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10')) Decimal('1110') >>> ExtendedContext.logical_or(110, 1101) Decimal('1111') >>> ExtendedContext.logical_or(Decimal(110), 1101) Decimal('1111') >>> ExtendedContext.logical_or(110, Decimal(1101)) Decimal('1111') """ a = _convert_other(a, raiseit=True) return a.logical_or(b, context=self) def logical_xor(self, a, b): """Applies the logical operation 'xor' between each operand's digits. The operands must be both logical numbers. >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0')) Decimal('1') >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1')) Decimal('0') >>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010')) Decimal('110') >>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10')) Decimal('1101') >>> ExtendedContext.logical_xor(110, 1101) Decimal('1011') >>> ExtendedContext.logical_xor(Decimal(110), 1101) Decimal('1011') >>> ExtendedContext.logical_xor(110, Decimal(1101)) Decimal('1011') """ a = _convert_other(a, raiseit=True) return a.logical_xor(b, context=self) def max(self, a, b): """max compares two values numerically and returns the maximum. If either operand is a NaN then the general rules apply. Otherwise, the operands are compared as though by the compare operation. If they are numerically equal then the left-hand operand is chosen as the result. Otherwise the maximum (closer to positive infinity) of the two operands is chosen as the result. >>> ExtendedContext.max(Decimal('3'), Decimal('2')) Decimal('3') >>> ExtendedContext.max(Decimal('-10'), Decimal('3')) Decimal('3') >>> ExtendedContext.max(Decimal('1.0'), Decimal('1')) Decimal('1') >>> ExtendedContext.max(Decimal('7'), Decimal('NaN')) Decimal('7') >>> ExtendedContext.max(1, 2) Decimal('2') >>> ExtendedContext.max(Decimal(1), 2) Decimal('2') >>> ExtendedContext.max(1, Decimal(2)) Decimal('2') """ a = _convert_other(a, raiseit=True) return a.max(b, context=self) def max_mag(self, a, b): """Compares the values numerically with their sign ignored. >>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN')) Decimal('7') >>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10')) Decimal('-10') >>> ExtendedContext.max_mag(1, -2) Decimal('-2') >>> ExtendedContext.max_mag(Decimal(1), -2) Decimal('-2') >>> ExtendedContext.max_mag(1, Decimal(-2)) Decimal('-2') """ a = _convert_other(a, raiseit=True) return a.max_mag(b, context=self) def min(self, a, b): """min compares two values numerically and returns the minimum. If either operand is a NaN then the general rules apply. Otherwise, the operands are compared as though by the compare operation. If they are numerically equal then the left-hand operand is chosen as the result. Otherwise the minimum (closer to negative infinity) of the two operands is chosen as the result. >>> ExtendedContext.min(Decimal('3'), Decimal('2')) Decimal('2') >>> ExtendedContext.min(Decimal('-10'), Decimal('3')) Decimal('-10') >>> ExtendedContext.min(Decimal('1.0'), Decimal('1')) Decimal('1.0') >>> ExtendedContext.min(Decimal('7'), Decimal('NaN')) Decimal('7') >>> ExtendedContext.min(1, 2) Decimal('1') >>> ExtendedContext.min(Decimal(1), 2) Decimal('1') >>> ExtendedContext.min(1, Decimal(29)) Decimal('1') """ a = _convert_other(a, raiseit=True) return a.min(b, context=self) def min_mag(self, a, b): """Compares the values numerically with their sign ignored. >>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2')) Decimal('-2') >>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN')) Decimal('-3') >>> ExtendedContext.min_mag(1, -2) Decimal('1') >>> ExtendedContext.min_mag(Decimal(1), -2) Decimal('1') >>> ExtendedContext.min_mag(1, Decimal(-2)) Decimal('1') """ a = _convert_other(a, raiseit=True) return a.min_mag(b, context=self) def minus(self, a): """Minus corresponds to unary prefix minus in Python. The operation is evaluated using the same rules as subtract; the operation minus(a) is calculated as subtract('0', a) where the '0' has the same exponent as the operand. >>> ExtendedContext.minus(Decimal('1.3')) Decimal('-1.3') >>> ExtendedContext.minus(Decimal('-1.3')) Decimal('1.3') >>> ExtendedContext.minus(1) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.__neg__(context=self) def multiply(self, a, b): """multiply multiplies two operands. If either operand is a special value then the general rules apply. Otherwise, the operands are multiplied together ('long multiplication'), resulting in a number which may be as long as the sum of the lengths of the two operands. >>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3')) Decimal('3.60') >>> ExtendedContext.multiply(Decimal('7'), Decimal('3')) Decimal('21') >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8')) Decimal('0.72') >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0')) Decimal('-0.0') >>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321')) Decimal('4.28135971E+11') >>> ExtendedContext.multiply(7, 7) Decimal('49') >>> ExtendedContext.multiply(Decimal(7), 7) Decimal('49') >>> ExtendedContext.multiply(7, Decimal(7)) Decimal('49') """ a = _convert_other(a, raiseit=True) r = a.__mul__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def next_minus(self, a): """Returns the largest representable number smaller than a. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> ExtendedContext.next_minus(Decimal('1')) Decimal('0.999999999') >>> c.next_minus(Decimal('1E-1007')) Decimal('0E-1007') >>> ExtendedContext.next_minus(Decimal('-1.00000003')) Decimal('-1.00000004') >>> c.next_minus(Decimal('Infinity')) Decimal('9.99999999E+999') >>> c.next_minus(1) Decimal('0.999999999') """ a = _convert_other(a, raiseit=True) return a.next_minus(context=self) def next_plus(self, a): """Returns the smallest representable number larger than a. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> ExtendedContext.next_plus(Decimal('1')) Decimal('1.00000001') >>> c.next_plus(Decimal('-1E-1007')) Decimal('-0E-1007') >>> ExtendedContext.next_plus(Decimal('-1.00000003')) Decimal('-1.00000002') >>> c.next_plus(Decimal('-Infinity')) Decimal('-9.99999999E+999') >>> c.next_plus(1) Decimal('1.00000001') """ a = _convert_other(a, raiseit=True) return a.next_plus(context=self) def next_toward(self, a, b): """Returns the number closest to a, in direction towards b. The result is the closest representable number from the first operand (but not the first operand) that is in the direction towards the second operand, unless the operands have the same value. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.next_toward(Decimal('1'), Decimal('2')) Decimal('1.00000001') >>> c.next_toward(Decimal('-1E-1007'), Decimal('1')) Decimal('-0E-1007') >>> c.next_toward(Decimal('-1.00000003'), Decimal('0')) Decimal('-1.00000002') >>> c.next_toward(Decimal('1'), Decimal('0')) Decimal('0.999999999') >>> c.next_toward(Decimal('1E-1007'), Decimal('-100')) Decimal('0E-1007') >>> c.next_toward(Decimal('-1.00000003'), Decimal('-10')) Decimal('-1.00000004') >>> c.next_toward(Decimal('0.00'), Decimal('-0.0000')) Decimal('-0.00') >>> c.next_toward(0, 1) Decimal('1E-1007') >>> c.next_toward(Decimal(0), 1) Decimal('1E-1007') >>> c.next_toward(0, Decimal(1)) Decimal('1E-1007') """ a = _convert_other(a, raiseit=True) return a.next_toward(b, context=self) def normalize(self, a): """normalize reduces an operand to its simplest form. Essentially a plus operation with all trailing zeros removed from the result. >>> ExtendedContext.normalize(Decimal('2.1')) Decimal('2.1') >>> ExtendedContext.normalize(Decimal('-2.0')) Decimal('-2') >>> ExtendedContext.normalize(Decimal('1.200')) Decimal('1.2') >>> ExtendedContext.normalize(Decimal('-120')) Decimal('-1.2E+2') >>> ExtendedContext.normalize(Decimal('120.00')) Decimal('1.2E+2') >>> ExtendedContext.normalize(Decimal('0.00')) Decimal('0') >>> ExtendedContext.normalize(6) Decimal('6') """ a = _convert_other(a, raiseit=True) return a.normalize(context=self) def number_class(self, a): """Returns an indication of the class of the operand. The class is one of the following strings: -sNaN -NaN -Infinity -Normal -Subnormal -Zero +Zero +Subnormal +Normal +Infinity >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.number_class(Decimal('Infinity')) '+Infinity' >>> c.number_class(Decimal('1E-10')) '+Normal' >>> c.number_class(Decimal('2.50')) '+Normal' >>> c.number_class(Decimal('0.1E-999')) '+Subnormal' >>> c.number_class(Decimal('0')) '+Zero' >>> c.number_class(Decimal('-0')) '-Zero' >>> c.number_class(Decimal('-0.1E-999')) '-Subnormal' >>> c.number_class(Decimal('-1E-10')) '-Normal' >>> c.number_class(Decimal('-2.50')) '-Normal' >>> c.number_class(Decimal('-Infinity')) '-Infinity' >>> c.number_class(Decimal('NaN')) 'NaN' >>> c.number_class(Decimal('-NaN')) 'NaN' >>> c.number_class(Decimal('sNaN')) 'sNaN' >>> c.number_class(123) '+Normal' """ a = _convert_other(a, raiseit=True) return a.number_class(context=self) def plus(self, a): """Plus corresponds to unary prefix plus in Python. The operation is evaluated using the same rules as add; the operation plus(a) is calculated as add('0', a) where the '0' has the same exponent as the operand. >>> ExtendedContext.plus(Decimal('1.3')) Decimal('1.3') >>> ExtendedContext.plus(Decimal('-1.3')) Decimal('-1.3') >>> ExtendedContext.plus(-1) Decimal('-1') """ a = _convert_other(a, raiseit=True) return a.__pos__(context=self) def power(self, a, b, modulo=None): """Raises a to the power of b, to modulo if given. With two arguments, compute a**b. If a is negative then b must be integral. The result will be inexact unless b is integral and the result is finite and can be expressed exactly in 'precision' digits. With three arguments, compute (a**b) % modulo. For the three argument form, the following restrictions on the arguments hold: - all three arguments must be integral - b must be nonnegative - at least one of a or b must be nonzero - modulo must be nonzero and have at most 'precision' digits The result of pow(a, b, modulo) is identical to the result that would be obtained by computing (a**b) % modulo with unbounded precision, but is computed more efficiently. It is always exact. >>> c = ExtendedContext.copy() >>> c.Emin = -999 >>> c.Emax = 999 >>> c.power(Decimal('2'), Decimal('3')) Decimal('8') >>> c.power(Decimal('-2'), Decimal('3')) Decimal('-8') >>> c.power(Decimal('2'), Decimal('-3')) Decimal('0.125') >>> c.power(Decimal('1.7'), Decimal('8')) Decimal('69.7575744') >>> c.power(Decimal('10'), Decimal('0.301029996')) Decimal('2.00000000') >>> c.power(Decimal('Infinity'), Decimal('-1')) Decimal('0') >>> c.power(Decimal('Infinity'), Decimal('0')) Decimal('1') >>> c.power(Decimal('Infinity'), Decimal('1')) Decimal('Infinity') >>> c.power(Decimal('-Infinity'), Decimal('-1')) Decimal('-0') >>> c.power(Decimal('-Infinity'), Decimal('0')) Decimal('1') >>> c.power(Decimal('-Infinity'), Decimal('1')) Decimal('-Infinity') >>> c.power(Decimal('-Infinity'), Decimal('2')) Decimal('Infinity') >>> c.power(Decimal('0'), Decimal('0')) Decimal('NaN') >>> c.power(Decimal('3'), Decimal('7'), Decimal('16')) Decimal('11') >>> c.power(Decimal('-3'), Decimal('7'), Decimal('16')) Decimal('-11') >>> c.power(Decimal('-3'), Decimal('8'), Decimal('16')) Decimal('1') >>> c.power(Decimal('3'), Decimal('7'), Decimal('-16')) Decimal('11') >>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789')) Decimal('11729830') >>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729')) Decimal('-0') >>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537')) Decimal('1') >>> ExtendedContext.power(7, 7) Decimal('823543') >>> ExtendedContext.power(Decimal(7), 7) Decimal('823543') >>> ExtendedContext.power(7, Decimal(7), 2) Decimal('1') """ a = _convert_other(a, raiseit=True) r = a.__pow__(b, modulo, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def quantize(self, a, b): """Returns a value equal to 'a' (rounded), having the exponent of 'b'. The coefficient of the result is derived from that of the left-hand operand. It may be rounded using the current rounding setting (if the exponent is being increased), multiplied by a positive power of ten (if the exponent is being decreased), or is unchanged (if the exponent is already equal to that of the right-hand operand). Unlike other operations, if the length of the coefficient after the quantize operation would be greater than precision then an Invalid operation condition is raised. This guarantees that, unless there is an error condition, the exponent of the result of a quantize is always equal to that of the right-hand operand. Also unlike other operations, quantize will never raise Underflow, even if the result is subnormal and inexact. >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001')) Decimal('2.170') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01')) Decimal('2.17') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1')) Decimal('2.2') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0')) Decimal('2') >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1')) Decimal('0E+1') >>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity')) Decimal('-Infinity') >>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity')) Decimal('NaN') >>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1')) Decimal('-0') >>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5')) Decimal('-0E+5') >>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2')) Decimal('NaN') >>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2')) Decimal('NaN') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1')) Decimal('217.0') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0')) Decimal('217') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1')) Decimal('2.2E+2') >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2')) Decimal('2E+2') >>> ExtendedContext.quantize(1, 2) Decimal('1') >>> ExtendedContext.quantize(Decimal(1), 2) Decimal('1') >>> ExtendedContext.quantize(1, Decimal(2)) Decimal('1') """ a = _convert_other(a, raiseit=True) return a.quantize(b, context=self) def radix(self): """Just returns 10, as this is Decimal, :) >>> ExtendedContext.radix() Decimal('10') """ return Decimal(10) def remainder(self, a, b): """Returns the remainder from integer division. The result is the residue of the dividend after the operation of calculating integer division as described for divide-integer, rounded to precision digits if necessary. The sign of the result, if non-zero, is the same as that of the original dividend. This operation will fail under the same conditions as integer division (that is, if integer division on the same two operands would fail, the remainder cannot be calculated). >>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3')) Decimal('2.1') >>> ExtendedContext.remainder(Decimal('10'), Decimal('3')) Decimal('1') >>> ExtendedContext.remainder(Decimal('-10'), Decimal('3')) Decimal('-1') >>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1')) Decimal('0.2') >>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3')) Decimal('0.1') >>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3')) Decimal('1.0') >>> ExtendedContext.remainder(22, 6) Decimal('4') >>> ExtendedContext.remainder(Decimal(22), 6) Decimal('4') >>> ExtendedContext.remainder(22, Decimal(6)) Decimal('4') """ a = _convert_other(a, raiseit=True) r = a.__mod__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def remainder_near(self, a, b): """Returns to be "a - b * n", where n is the integer nearest the exact value of "x / b" (if two integers are equally near then the even one is chosen). If the result is equal to 0 then its sign will be the sign of a. This operation will fail under the same conditions as integer division (that is, if integer division on the same two operands would fail, the remainder cannot be calculated). >>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3')) Decimal('-0.9') >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6')) Decimal('-2') >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3')) Decimal('1') >>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3')) Decimal('-1') >>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1')) Decimal('0.2') >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3')) Decimal('0.1') >>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3')) Decimal('-0.3') >>> ExtendedContext.remainder_near(3, 11) Decimal('3') >>> ExtendedContext.remainder_near(Decimal(3), 11) Decimal('3') >>> ExtendedContext.remainder_near(3, Decimal(11)) Decimal('3') """ a = _convert_other(a, raiseit=True) return a.remainder_near(b, context=self) def rotate(self, a, b): """Returns a rotated copy of a, b times. The coefficient of the result is a rotated copy of the digits in the coefficient of the first operand. The number of places of rotation is taken from the absolute value of the second operand, with the rotation being to the left if the second operand is positive or to the right otherwise. >>> ExtendedContext.rotate(Decimal('34'), Decimal('8')) Decimal('400000003') >>> ExtendedContext.rotate(Decimal('12'), Decimal('9')) Decimal('12') >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2')) Decimal('891234567') >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0')) Decimal('123456789') >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2')) Decimal('345678912') >>> ExtendedContext.rotate(1333333, 1) Decimal('13333330') >>> ExtendedContext.rotate(Decimal(1333333), 1) Decimal('13333330') >>> ExtendedContext.rotate(1333333, Decimal(1)) Decimal('13333330') """ a = _convert_other(a, raiseit=True) return a.rotate(b, context=self) def same_quantum(self, a, b): """Returns True if the two operands have the same exponent. The result is never affected by either the sign or the coefficient of either operand. >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001')) False >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01')) True >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1')) False >>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf')) True >>> ExtendedContext.same_quantum(10000, -1) True >>> ExtendedContext.same_quantum(Decimal(10000), -1) True >>> ExtendedContext.same_quantum(10000, Decimal(-1)) True """ a = _convert_other(a, raiseit=True) return a.same_quantum(b) def scaleb (self, a, b): """Returns the first operand after adding the second value its exp. >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2')) Decimal('0.0750') >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0')) Decimal('7.50') >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3')) Decimal('7.50E+3') >>> ExtendedContext.scaleb(1, 4) Decimal('1E+4') >>> ExtendedContext.scaleb(Decimal(1), 4) Decimal('1E+4') >>> ExtendedContext.scaleb(1, Decimal(4)) Decimal('1E+4') """ a = _convert_other(a, raiseit=True) return a.scaleb(b, context=self) def shift(self, a, b): """Returns a shifted copy of a, b times. The coefficient of the result is a shifted copy of the digits in the coefficient of the first operand. The number of places to shift is taken from the absolute value of the second operand, with the shift being to the left if the second operand is positive or to the right otherwise. Digits shifted into the coefficient are zeros. >>> ExtendedContext.shift(Decimal('34'), Decimal('8')) Decimal('400000000') >>> ExtendedContext.shift(Decimal('12'), Decimal('9')) Decimal('0') >>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2')) Decimal('1234567') >>> ExtendedContext.shift(Decimal('123456789'), Decimal('0')) Decimal('123456789') >>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2')) Decimal('345678900') >>> ExtendedContext.shift(88888888, 2) Decimal('888888800') >>> ExtendedContext.shift(Decimal(88888888), 2) Decimal('888888800') >>> ExtendedContext.shift(88888888, Decimal(2)) Decimal('888888800') """ a = _convert_other(a, raiseit=True) return a.shift(b, context=self) def sqrt(self, a): """Square root of a non-negative number to context precision. If the result must be inexact, it is rounded using the round-half-even algorithm. >>> ExtendedContext.sqrt(Decimal('0')) Decimal('0') >>> ExtendedContext.sqrt(Decimal('-0')) Decimal('-0') >>> ExtendedContext.sqrt(Decimal('0.39')) Decimal('0.624499800') >>> ExtendedContext.sqrt(Decimal('100')) Decimal('10') >>> ExtendedContext.sqrt(Decimal('1')) Decimal('1') >>> ExtendedContext.sqrt(Decimal('1.0')) Decimal('1.0') >>> ExtendedContext.sqrt(Decimal('1.00')) Decimal('1.0') >>> ExtendedContext.sqrt(Decimal('7')) Decimal('2.64575131') >>> ExtendedContext.sqrt(Decimal('10')) Decimal('3.16227766') >>> ExtendedContext.sqrt(2) Decimal('1.41421356') >>> ExtendedContext.prec 9 """ a = _convert_other(a, raiseit=True) return a.sqrt(context=self) def subtract(self, a, b): """Return the difference between the two operands. >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07')) Decimal('0.23') >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30')) Decimal('0.00') >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07')) Decimal('-0.77') >>> ExtendedContext.subtract(8, 5) Decimal('3') >>> ExtendedContext.subtract(Decimal(8), 5) Decimal('3') >>> ExtendedContext.subtract(8, Decimal(5)) Decimal('3') """ a = _convert_other(a, raiseit=True) r = a.__sub__(b, context=self) if r is NotImplemented: raise TypeError("Unable to convert %s to Decimal" % b) else: return r def to_eng_string(self, a): """Converts a number to a string, using scientific notation. The operation is not affected by the context. """ a = _convert_other(a, raiseit=True) return a.to_eng_string(context=self) def to_sci_string(self, a): """Converts a number to a string, using scientific notation. The operation is not affected by the context. """ a = _convert_other(a, raiseit=True) return a.__str__(context=self) def to_integral_exact(self, a): """Rounds to an integer. When the operand has a negative exponent, the result is the same as using the quantize() operation using the given operand as the left-hand-operand, 1E+0 as the right-hand-operand, and the precision of the operand as the precision setting; Inexact and Rounded flags are allowed in this operation. The rounding mode is taken from the context. >>> ExtendedContext.to_integral_exact(Decimal('2.1')) Decimal('2') >>> ExtendedContext.to_integral_exact(Decimal('100')) Decimal('100') >>> ExtendedContext.to_integral_exact(Decimal('100.0')) Decimal('100') >>> ExtendedContext.to_integral_exact(Decimal('101.5')) Decimal('102') >>> ExtendedContext.to_integral_exact(Decimal('-101.5')) Decimal('-102') >>> ExtendedContext.to_integral_exact(Decimal('10E+5')) Decimal('1.0E+6') >>> ExtendedContext.to_integral_exact(Decimal('7.89E+77')) Decimal('7.89E+77') >>> ExtendedContext.to_integral_exact(Decimal('-Inf')) Decimal('-Infinity') """ a = _convert_other(a, raiseit=True) return a.to_integral_exact(context=self) def to_integral_value(self, a): """Rounds to an integer. When the operand has a negative exponent, the result is the same as using the quantize() operation using the given operand as the left-hand-operand, 1E+0 as the right-hand-operand, and the precision of the operand as the precision setting, except that no flags will be set. The rounding mode is taken from the context. >>> ExtendedContext.to_integral_value(Decimal('2.1')) Decimal('2') >>> ExtendedContext.to_integral_value(Decimal('100')) Decimal('100') >>> ExtendedContext.to_integral_value(Decimal('100.0')) Decimal('100') >>> ExtendedContext.to_integral_value(Decimal('101.5')) Decimal('102') >>> ExtendedContext.to_integral_value(Decimal('-101.5')) Decimal('-102') >>> ExtendedContext.to_integral_value(Decimal('10E+5')) Decimal('1.0E+6') >>> ExtendedContext.to_integral_value(Decimal('7.89E+77')) Decimal('7.89E+77') >>> ExtendedContext.to_integral_value(Decimal('-Inf')) Decimal('-Infinity') """ a = _convert_other(a, raiseit=True) return a.to_integral_value(context=self) # the method name changed, but we provide also the old one, for compatibility to_integral = to_integral_value class _WorkRep(object): __slots__ = ('sign','int','exp') # sign: 0 or 1 # int: int # exp: None, int, or string def __init__(self, value=None): if value is None: self.sign = None self.int = 0 self.exp = None elif isinstance(value, Decimal): self.sign = value._sign self.int = int(value._int) self.exp = value._exp else: # assert isinstance(value, tuple) self.sign = value[0] self.int = value[1] self.exp = value[2] def __repr__(self): return "(%r, %r, %r)" % (self.sign, self.int, self.exp) __str__ = __repr__ def _normalize(op1, op2, prec = 0): """Normalizes op1, op2 to have the same exp and length of coefficient. Done during addition. """ if op1.exp < op2.exp: tmp = op2 other = op1 else: tmp = op1 other = op2 # Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1). # Then adding 10**exp to tmp has the same effect (after rounding) # as adding any positive quantity smaller than 10**exp; similarly # for subtraction. So if other is smaller than 10**exp we replace # it with 10**exp. This avoids tmp.exp - other.exp getting too large. tmp_len = len(str(tmp.int)) other_len = len(str(other.int)) exp = tmp.exp + min(-1, tmp_len - prec - 2) if other_len + other.exp - 1 < exp: other.int = 1 other.exp = exp tmp.int *= 10 ** (tmp.exp - other.exp) tmp.exp = other.exp return op1, op2 ##### Integer arithmetic functions used by ln, log10, exp and __pow__ ##### _nbits = int.bit_length def _decimal_lshift_exact(n, e): """ Given integers n and e, return n * 10**e if it's an integer, else None. The computation is designed to avoid computing large powers of 10 unnecessarily. >>> _decimal_lshift_exact(3, 4) 30000 >>> _decimal_lshift_exact(300, -999999999) # returns None """ if n == 0: return 0 elif e >= 0: return n * 10**e else: # val_n = largest power of 10 dividing n. str_n = str(abs(n)) val_n = len(str_n) - len(str_n.rstrip('0')) return None if val_n < -e else n // 10**-e def _sqrt_nearest(n, a): """Closest integer to the square root of the positive integer n. a is an initial approximation to the square root. Any positive integer will do for a, but the closer a is to the square root of n the faster convergence will be. """ if n <= 0 or a <= 0: raise ValueError("Both arguments to _sqrt_nearest should be positive.") b=0 while a != b: b, a = a, a--n//a>>1 return a def _rshift_nearest(x, shift): """Given an integer x and a nonnegative integer shift, return closest integer to x / 2**shift; use round-to-even in case of a tie. """ b, q = 1 << shift, x >> shift return q + (2*(x & (b-1)) + (q&1) > b) def _div_nearest(a, b): """Closest integer to a/b, a and b positive integers; rounds to even in the case of a tie. """ q, r = divmod(a, b) return q + (2*r + (q&1) > b) def _ilog(x, M, L = 8): """Integer approximation to M*log(x/M), with absolute error boundable in terms only of x/M. Given positive integers x and M, return an integer approximation to M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference between the approximation and the exact result is at most 22. For L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In both cases these are upper bounds on the error; it will usually be much smaller.""" # The basic algorithm is the following: let log1p be the function # log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use # the reduction # # log1p(y) = 2*log1p(y/(1+sqrt(1+y))) # # repeatedly until the argument to log1p is small (< 2**-L in # absolute value). For small y we can use the Taylor series # expansion # # log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T # # truncating at T such that y**T is small enough. The whole # computation is carried out in a form of fixed-point arithmetic, # with a real number z being represented by an integer # approximation to z*M. To avoid loss of precision, the y below # is actually an integer approximation to 2**R*y*M, where R is the # number of reductions performed so far. y = x-M # argument reduction; R = number of reductions performed R = 0 while (R <= L and abs(y) << L-R >= M or R > L and abs(y) >> R-L >= M): y = _div_nearest((M*y) << 1, M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M)) R += 1 # Taylor series with T terms T = -int(-10*len(str(M))//(3*L)) yshift = _rshift_nearest(y, R) w = _div_nearest(M, T) for k in range(T-1, 0, -1): w = _div_nearest(M, k) - _div_nearest(yshift*w, M) return _div_nearest(w*y, M) def _dlog10(c, e, p): """Given integers c, e and p with c > 0, p >= 0, compute an integer approximation to 10**p * log10(c*10**e), with an absolute error of at most 1. Assumes that c*10**e is not exactly 1.""" # increase precision by 2; compensate for this by dividing # final result by 100 p += 2 # write c*10**e as d*10**f with either: # f >= 0 and 1 <= d <= 10, or # f <= 0 and 0.1 <= d <= 1. # Thus for c*10**e close to 1, f = 0 l = len(str(c)) f = e+l - (e+l >= 1) if p > 0: M = 10**p k = e+p-f if k >= 0: c *= 10**k else: c = _div_nearest(c, 10**-k) log_d = _ilog(c, M) # error < 5 + 22 = 27 log_10 = _log10_digits(p) # error < 1 log_d = _div_nearest(log_d*M, log_10) log_tenpower = f*M # exact else: log_d = 0 # error < 2.31 log_tenpower = _div_nearest(f, 10**-p) # error < 0.5 return _div_nearest(log_tenpower+log_d, 100) def _dlog(c, e, p): """Given integers c, e and p with c > 0, compute an integer approximation to 10**p * log(c*10**e), with an absolute error of at most 1. Assumes that c*10**e is not exactly 1.""" # Increase precision by 2. The precision increase is compensated # for at the end with a division by 100. p += 2 # rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10, # or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e) # as 10**p * log(d) + 10**p*f * log(10). l = len(str(c)) f = e+l - (e+l >= 1) # compute approximation to 10**p*log(d), with error < 27 if p > 0: k = e+p-f if k >= 0: c *= 10**k else: c = _div_nearest(c, 10**-k) # error of <= 0.5 in c # _ilog magnifies existing error in c by a factor of at most 10 log_d = _ilog(c, 10**p) # error < 5 + 22 = 27 else: # p <= 0: just approximate the whole thing by 0; error < 2.31 log_d = 0 # compute approximation to f*10**p*log(10), with error < 11. if f: extra = len(str(abs(f)))-1 if p + extra >= 0: # error in f * _log10_digits(p+extra) < |f| * 1 = |f| # after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11 f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra) else: f_log_ten = 0 else: f_log_ten = 0 # error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1 return _div_nearest(f_log_ten + log_d, 100) class _Log10Memoize(object): """Class to compute, store, and allow retrieval of, digits of the constant log(10) = 2.302585.... This constant is needed by Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__.""" def __init__(self): self.digits = "23025850929940456840179914546843642076011014886" def getdigits(self, p): """Given an integer p >= 0, return floor(10**p)*log(10). For example, self.getdigits(3) returns 2302. """ # digits are stored as a string, for quick conversion to # integer in the case that we've already computed enough # digits; the stored digits should always be correct # (truncated, not rounded to nearest). if p < 0: raise ValueError("p should be nonnegative") if p >= len(self.digits): # compute p+3, p+6, p+9, ... digits; continue until at # least one of the extra digits is nonzero extra = 3 while True: # compute p+extra digits, correct to within 1ulp M = 10**(p+extra+2) digits = str(_div_nearest(_ilog(10*M, M), 100)) if digits[-extra:] != '0'*extra: break extra += 3 # keep all reliable digits so far; remove trailing zeros # and next nonzero digit self.digits = digits.rstrip('0')[:-1] return int(self.digits[:p+1]) _log10_digits = _Log10Memoize().getdigits def _iexp(x, M, L=8): """Given integers x and M, M > 0, such that x/M is small in absolute value, compute an integer approximation to M*exp(x/M). For 0 <= x/M <= 2.4, the absolute error in the result is bounded by 60 (and is usually much smaller).""" # Algorithm: to compute exp(z) for a real number z, first divide z # by a suitable power R of 2 so that |z/2**R| < 2**-L. Then # compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor # series # # expm1(x) = x + x**2/2! + x**3/3! + ... # # Now use the identity # # expm1(2x) = expm1(x)*(expm1(x)+2) # # R times to compute the sequence expm1(z/2**R), # expm1(z/2**(R-1)), ... , exp(z/2), exp(z). # Find R such that x/2**R/M <= 2**-L R = _nbits((x<<L)//M) # Taylor series. (2**L)**T > M T = -int(-10*len(str(M))//(3*L)) y = _div_nearest(x, T) Mshift = M<<R for i in range(T-1, 0, -1): y = _div_nearest(x*(Mshift + y), Mshift * i) # Expansion for k in range(R-1, -1, -1): Mshift = M<<(k+2) y = _div_nearest(y*(y+Mshift), Mshift) return M+y def _dexp(c, e, p): """Compute an approximation to exp(c*10**e), with p decimal places of precision. Returns integers d, f such that: 10**(p-1) <= d <= 10**p, and (d-1)*10**f < exp(c*10**e) < (d+1)*10**f In other words, d*10**f is an approximation to exp(c*10**e) with p digits of precision, and with an error in d of at most 1. This is almost, but not quite, the same as the error being < 1ulp: when d = 10**(p-1) the error could be up to 10 ulp.""" # we'll call iexp with M = 10**(p+2), giving p+3 digits of precision p += 2 # compute log(10) with extra precision = adjusted exponent of c*10**e extra = max(0, e + len(str(c)) - 1) q = p + extra # compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q), # rounding down shift = e+q if shift >= 0: cshift = c*10**shift else: cshift = c//10**-shift quot, rem = divmod(cshift, _log10_digits(q)) # reduce remainder back to original precision rem = _div_nearest(rem, 10**extra) # error in result of _iexp < 120; error after division < 0.62 return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3 def _dpower(xc, xe, yc, ye, p): """Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that: 10**(p-1) <= c <= 10**p, and (c-1)*10**e < x**y < (c+1)*10**e in other words, c*10**e is an approximation to x**y with p digits of precision, and with an error in c of at most 1. (This is almost, but not quite, the same as the error being < 1ulp: when c == 10**(p-1) we can only guarantee error < 10ulp.) We assume that: x is positive and not equal to 1, and y is nonzero. """ # Find b such that 10**(b-1) <= |y| <= 10**b b = len(str(abs(yc))) + ye # log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point lxc = _dlog(xc, xe, p+b+1) # compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1) shift = ye-b if shift >= 0: pc = lxc*yc*10**shift else: pc = _div_nearest(lxc*yc, 10**-shift) if pc == 0: # we prefer a result that isn't exactly 1; this makes it # easier to compute a correctly rounded result in __pow__ if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1: coeff, exp = 10**(p-1)+1, 1-p else: coeff, exp = 10**p-1, -p else: coeff, exp = _dexp(pc, -(p+1), p+1) coeff = _div_nearest(coeff, 10) exp += 1 return coeff, exp def _log10_lb(c, correction = { '1': 100, '2': 70, '3': 53, '4': 40, '5': 31, '6': 23, '7': 16, '8': 10, '9': 5}): """Compute a lower bound for 100*log10(c) for a positive integer c.""" if c <= 0: raise ValueError("The argument to _log10_lb should be nonnegative.") str_c = str(c) return 100*len(str_c) - correction[str_c[0]] ##### Helper Functions #################################################### def _convert_other(other, raiseit=False, allow_float=False): """Convert other to Decimal. Verifies that it's ok to use in an implicit construction. If allow_float is true, allow conversion from float; this is used in the comparison methods (__eq__ and friends). """ if isinstance(other, Decimal): return other if isinstance(other, int): return Decimal(other) if allow_float and isinstance(other, float): return Decimal.from_float(other) if raiseit: raise TypeError("Unable to convert %s to Decimal" % other) return NotImplemented def _convert_for_comparison(self, other, equality_op=False): """Given a Decimal instance self and a Python object other, return a pair (s, o) of Decimal instances such that "s op o" is equivalent to "self op other" for any of the 6 comparison operators "op". """ if isinstance(other, Decimal): return self, other # Comparison with a Rational instance (also includes integers): # self op n/d <=> self*d op n (for n and d integers, d positive). # A NaN or infinity can be left unchanged without affecting the # comparison result. if isinstance(other, _numbers.Rational): if not self._is_special: self = _dec_from_triple(self._sign, str(int(self._int) * other.denominator), self._exp) return self, Decimal(other.numerator) # Comparisons with float and complex types. == and != comparisons # with complex numbers should succeed, returning either True or False # as appropriate. Other comparisons return NotImplemented. if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0: other = other.real if isinstance(other, float): context = getcontext() if equality_op: context.flags[FloatOperation] = 1 else: context._raise_error(FloatOperation, "strict semantics for mixing floats and Decimals are enabled") return self, Decimal.from_float(other) return NotImplemented, NotImplemented ##### Setup Specific Contexts ############################################ # The default context prototype used by Context() # Is mutable, so that new contexts can have different default values DefaultContext = Context( prec=17, rounding=ROUND_HALF_EVEN, traps=[DivisionByZero, Overflow, InvalidOperation], flags=[], Emax=308, Emin=-324, capitals=1, clamp=0 ) # Pre-made alternate contexts offered by the specification # Don't change these; the user should be able to select these # contexts and be able to reproduce results from other implementations # of the spec. BasicContext = Context( prec=9, rounding=ROUND_HALF_UP, traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow], flags=[], ) ExtendedContext = Context( prec=9, rounding=ROUND_HALF_EVEN, traps=[], flags=[], ) ##### crud for parsing strings ############################################# # # Regular expression used for parsing numeric strings. Additional # comments: # # 1. Uncomment the two '\s*' lines to allow leading and/or trailing # whitespace. But note that the specification disallows whitespace in # a numeric string. # # 2. For finite numbers (not infinities and NaNs) the body of the # number between the optional sign and the optional exponent must have # at least one decimal digit, possibly after the decimal point. The # lookahead expression '(?=\d|\.\d)' checks this. #import re #_parser = re.compile(r""" # A numeric string consists of: # \s* # (?P<sign>[-+])? # an optional sign, followed by either... # ( # (?=\d|\.\d) # ...a number (with at least one digit) # (?P<int>\d*) # having a (possibly empty) integer part # (\.(?P<frac>\d*))? # followed by an optional fractional part # (E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or... # | # Inf(inity)? # ...an infinity, or... # | # (?P<signal>s)? # ...an (optionally signaling) # NaN # NaN # (?P<diag>\d*) # with (possibly empty) diagnostic info. # ) # \s* # \Z #""", re.VERBOSE | re.IGNORECASE).match import _jsre as re _all_zeros = re.compile('0*$').match _exact_half = re.compile('50*$').match ##### PEP3101 support functions ############################################## # The functions in this section have little to do with the Decimal # class, and could potentially be reused or adapted for other pure # Python numeric classes that want to implement __format__ # # A format specifier for Decimal looks like: # # [[fill]align][sign][#][0][minimumwidth][,][.precision][type] #_parse_format_specifier_regex = re.compile(r"""\A #(?: # (?P<fill>.)? # (?P<align>[<>=^]) #)? #(?P<sign>[-+ ])? #(?P<alt>\#)? #(?P<zeropad>0)? #(?P<minimumwidth>(?!0)\d+)? #(?P<thousands_sep>,)? #(?:\.(?P<precision>0|(?!0)\d+))? #(?P<type>[eEfFgGn%])? #\Z #""", re.VERBOSE|re.DOTALL) del re # The locale module is only needed for the 'n' format specifier. The # rest of the PEP 3101 code functions quite happily without it, so we # don't care too much if locale isn't present. try: import locale as _locale except ImportError: pass def _parse_format_specifier(format_spec, _localeconv=None): """Parse and validate a format specifier. Turns a standard numeric format specifier into a dict, with the following entries: fill: fill character to pad field to minimum width align: alignment type, either '<', '>', '=' or '^' sign: either '+', '-' or ' ' minimumwidth: nonnegative integer giving minimum width zeropad: boolean, indicating whether to pad with zeros thousands_sep: string to use as thousands separator, or '' grouping: grouping for thousands separators, in format used by localeconv decimal_point: string to use for decimal point precision: nonnegative integer giving precision, or None type: one of the characters 'eEfFgG%', or None """ m = _parse_format_specifier_regex.match(format_spec) if m is None: raise ValueError("Invalid format specifier: " + format_spec) # get the dictionary format_dict = m.groupdict() # zeropad; defaults for fill and alignment. If zero padding # is requested, the fill and align fields should be absent. fill = format_dict['fill'] align = format_dict['align'] format_dict['zeropad'] = (format_dict['zeropad'] is not None) if format_dict['zeropad']: if fill is not None: raise ValueError("Fill character conflicts with '0'" " in format specifier: " + format_spec) if align is not None: raise ValueError("Alignment conflicts with '0' in " "format specifier: " + format_spec) format_dict['fill'] = fill or ' ' # PEP 3101 originally specified that the default alignment should # be left; it was later agreed that right-aligned makes more sense # for numeric types. See http://bugs.python.org/issue6857. format_dict['align'] = align or '>' # default sign handling: '-' for negative, '' for positive if format_dict['sign'] is None: format_dict['sign'] = '-' # minimumwidth defaults to 0; precision remains None if not given format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0') if format_dict['precision'] is not None: format_dict['precision'] = int(format_dict['precision']) # if format type is 'g' or 'G' then a precision of 0 makes little # sense; convert it to 1. Same if format type is unspecified. if format_dict['precision'] == 0: if format_dict['type'] is None or format_dict['type'] in 'gGn': format_dict['precision'] = 1 # determine thousands separator, grouping, and decimal separator, and # add appropriate entries to format_dict if format_dict['type'] == 'n': # apart from separators, 'n' behaves just like 'g' format_dict['type'] = 'g' if _localeconv is None: _localeconv = _locale.localeconv() if format_dict['thousands_sep'] is not None: raise ValueError("Explicit thousands separator conflicts with " "'n' type in format specifier: " + format_spec) format_dict['thousands_sep'] = _localeconv['thousands_sep'] format_dict['grouping'] = _localeconv['grouping'] format_dict['decimal_point'] = _localeconv['decimal_point'] else: if format_dict['thousands_sep'] is None: format_dict['thousands_sep'] = '' format_dict['grouping'] = [3, 0] format_dict['decimal_point'] = '.' return format_dict def _format_align(sign, body, spec): """Given an unpadded, non-aligned numeric string 'body' and sign string 'sign', add padding and alignment conforming to the given format specifier dictionary 'spec' (as produced by parse_format_specifier). """ # how much extra space do we have to play with? minimumwidth = spec['minimumwidth'] fill = spec['fill'] padding = fill*(minimumwidth - len(sign) - len(body)) align = spec['align'] if align == '<': result = sign + body + padding elif align == '>': result = padding + sign + body elif align == '=': result = sign + padding + body elif align == '^': half = len(padding)//2 result = padding[:half] + sign + body + padding[half:] else: raise ValueError('Unrecognised alignment field') return result def _group_lengths(grouping): """Convert a localeconv-style grouping into a (possibly infinite) iterable of integers representing group lengths. """ # The result from localeconv()['grouping'], and the input to this # function, should be a list of integers in one of the # following three forms: # # (1) an empty list, or # (2) nonempty list of positive integers + [0] # (3) list of positive integers + [locale.CHAR_MAX], or from itertools import chain, repeat if not grouping: return [] elif grouping[-1] == 0 and len(grouping) >= 2: return chain(grouping[:-1], repeat(grouping[-2])) elif grouping[-1] == _locale.CHAR_MAX: return grouping[:-1] else: raise ValueError('unrecognised format for grouping') def _insert_thousands_sep(digits, spec, min_width=1): """Insert thousands separators into a digit string. spec is a dictionary whose keys should include 'thousands_sep' and 'grouping'; typically it's the result of parsing the format specifier using _parse_format_specifier. The min_width keyword argument gives the minimum length of the result, which will be padded on the left with zeros if necessary. If necessary, the zero padding adds an extra '0' on the left to avoid a leading thousands separator. For example, inserting commas every three digits in '123456', with min_width=8, gives '0,123,456', even though that has length 9. """ sep = spec['thousands_sep'] grouping = spec['grouping'] groups = [] for l in _group_lengths(grouping): if l <= 0: raise ValueError("group length should be positive") # max(..., 1) forces at least 1 digit to the left of a separator l = min(max(len(digits), min_width, 1), l) groups.append('0'*(l - len(digits)) + digits[-l:]) digits = digits[:-l] min_width -= l if not digits and min_width <= 0: break min_width -= len(sep) else: l = max(len(digits), min_width, 1) groups.append('0'*(l - len(digits)) + digits[-l:]) return sep.join(reversed(groups)) def _format_sign(is_negative, spec): """Determine sign character.""" if is_negative: return '-' elif spec['sign'] in ' +': return spec['sign'] else: return '' def _format_number(is_negative, intpart, fracpart, exp, spec): """Format a number, given the following data: is_negative: true if the number is negative, else false intpart: string of digits that must appear before the decimal point fracpart: string of digits that must come after the point exp: exponent, as an integer spec: dictionary resulting from parsing the format specifier This function uses the information in spec to: insert separators (decimal separator and thousands separators) format the sign format the exponent add trailing '%' for the '%' type zero-pad if necessary fill and align if necessary """ sign = _format_sign(is_negative, spec) if fracpart or spec['alt']: fracpart = spec['decimal_point'] + fracpart if exp != 0 or spec['type'] in 'eE': echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']] fracpart += "{0}{1:+}".format(echar, exp) if spec['type'] == '%': fracpart += '%' if spec['zeropad']: min_width = spec['minimumwidth'] - len(fracpart) - len(sign) else: min_width = 0 intpart = _insert_thousands_sep(intpart, spec, min_width) return _format_align(sign, intpart+fracpart, spec) ##### Useful Constants (internal use only) ################################ # Reusable defaults _Infinity = Decimal('Inf') _NegativeInfinity = Decimal('-Inf') _NaN = Decimal('NaN') _Zero = Decimal(0) _One = Decimal(1) _NegativeOne = Decimal(-1) # _SignedInfinity[sign] is infinity w/ that sign _SignedInfinity = (_Infinity, _NegativeInfinity) # Constants related to the hash implementation; hash(x) is based # on the reduction of x modulo _PyHASH_MODULUS _PyHASH_MODULUS = sys.hash_info.modulus # hash values to use for positive and negative infinities, and nans _PyHASH_INF = sys.hash_info.inf _PyHASH_NAN = sys.hash_info.nan # _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS _PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS) del sys try: import _decimal except ImportError: pass else: s1 = set(dir()) s2 = set(dir(_decimal)) for name in s1 - s2: del globals()[name] del s1, s2, name from _decimal import * if __name__ == '__main__': import doctest, decimal doctest.testmod(decimal)
madj4ck/ansible
refs/heads/devel
lib/ansible/playbook/vars_file.py
7690
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type
FHannes/intellij-community
refs/heads/master
python/helpers/python-skeletons/py/error.py
36
from errno import *
bolster/django-bloom
refs/heads/master
bloom/share/urls.py
4
# Share URLs for Bloom # # Kevin Tom # Copyright 2008 Handi Mobility # www.handimobility.ca from django.conf.urls.defaults import * urlpatterns = patterns('bloom.share.views', (r'^lookup/(?P<slug>[A-Za-z0-9]+)/$', 'lookup_view'), # (r'^email/$', 'share_by_email_view'), # (r'^sms/$', 'share_by_sms_view'), )
tuedtran/blox
refs/heads/dev
deploy/demo-cli/blox-create-deployment.py
1
#!/usr/bin/env python import json, os, sys import common def main(argv): # Command Line Arguments args = [{'arg':'--apigateway', 'dest':'apigateway', 'default':None, 'type':'boolean', 'help':'Call API Gateway endpoint'}] if '--apigateway' in argv: args.extend([{'arg':'--stack', 'dest':'stack', 'default':None, 'help':'CloudFormation stack name'}]) else: args.extend([{'arg':'--host', 'dest':'host', 'default':'localhost:2000', 'help':'Blox Scheduler <Host>:<Port>'}]) args.extend([{'arg':'--environment', 'dest':'environment', 'default':None, 'help':'Blox environment name'}]) args.extend([{'arg':'--deployment-token', 'dest':'token', 'default':None, 'help':'Blox deployment token'}]) # Parse Command Line Arguments params = common.parse_cli_args('Create Blox Deployment', args) if params.apigateway: run_apigateway(params) else: run_local(params) # Call Blox Scheduler API Gateway Endpoint def run_apigateway(params): command = ["cloudformation", "describe-stack-resource", "--stack-name", params.stack, "--logical-resource-id", "RestApi"] restApi = common.run_shell_command(params.region, command) command = ["cloudformation", "describe-stack-resource", "--stack-name", params.stack, "--logical-resource-id", "ApiResource"] restResource = common.run_shell_command(params.region, command) command = ["apigateway", "test-invoke-method", "--rest-api-id", restApi['StackResourceDetail']['PhysicalResourceId'], "--resource-id", restResource['StackResourceDetail']['PhysicalResourceId'], "--http-method", "POST", "--headers", "{}", "--path-with-query-string", "/v1/environments/%s/deployments?deploymentToken=%s" % (params.environment, params.token), "--body", ""] response = common.run_shell_command(params.region, command) print "HTTP Response Code: %d" % response['status'] try: obj = json.loads(response['body']) print json.dumps(obj, indent=2) except Exception as e: print "Error: Could not parse response - %s" % e print json.dumps(response, indent=2) sys.exit(1) # Call Blox Scheduler Local Endpoint def run_local(params): api = common.Object() api.method = 'POST' api.headers = {} api.host = params.host api.uri = '/v1/environments/%s/deployments?deploymentToken=%s' % (params.environment, params.token) api.data = None response = common.call_api(api) print "HTTP Response Code: %d" % response.status try: obj = json.loads(response.body) print json.dumps(obj, indent=2) except Exception as e: print "Error: Could not parse response - %s" % e print response.body sys.exit(1) if __name__ == "__main__": main(sys.argv[1:])
ChenJunor/hue
refs/heads/master
desktop/core/src/desktop/settings.py
1
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Django settings for Hue. # # Local customizations are done by symlinking a file # as local_settings.py. import logging import os import pkg_resources import sys from guppy import hpy import desktop.conf import desktop.log import desktop.redaction from desktop.lib.paths import get_desktop_root from desktop.lib.python_util import force_dict_to_strings from django.utils.translation import ugettext_lazy as _ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), '..', '..', '..')) HUE_DESKTOP_VERSION = pkg_resources.get_distribution("desktop").version or "Unknown" NICE_NAME = "Hue" ENV_HUE_PROCESS_NAME = "HUE_PROCESS_NAME" ENV_DESKTOP_DEBUG = "DESKTOP_DEBUG" ############################################################ # Part 1: Logging and imports. ############################################################ # Configure debug mode DEBUG = True TEMPLATE_DEBUG = DEBUG # Start basic logging as soon as possible. if ENV_HUE_PROCESS_NAME not in os.environ: _proc = os.path.basename(len(sys.argv) > 1 and sys.argv[1] or sys.argv[0]) os.environ[ENV_HUE_PROCESS_NAME] = _proc desktop.log.basic_logging(os.environ[ENV_HUE_PROCESS_NAME]) logging.info("Welcome to Hue " + HUE_DESKTOP_VERSION) # Then we can safely import some more stuff from desktop import appmanager from desktop.lib import conf # Add fancy logging desktop.log.fancy_logging() ############################################################ # Part 2: Generic Configuration ############################################################ # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = False # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' ############################################################ # Part 3: Django configuration ############################################################ # Additional locations of static files STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'desktop', 'libs', 'indexer', 'src', 'indexer', 'static'), os.path.join(BASE_DIR, 'desktop', 'libs', 'liboauth', 'src', 'liboauth', 'static'), ) STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.CachedStaticFilesStorage' # For Django admin interface STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'build', 'static') # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader' ) MIDDLEWARE_CLASSES = [ # The order matters 'desktop.middleware.MetricsMiddleware', 'desktop.middleware.EnsureSafeMethodMiddleware', 'desktop.middleware.AuditLoggingMiddleware', 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'desktop.middleware.SpnegoMiddleware', 'desktop.middleware.HueRemoteUserMiddleware', 'django.middleware.locale.LocaleMiddleware', 'babeldjango.middleware.LocaleMiddleware', 'desktop.middleware.AjaxMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # Must be after Session, Auth, and Ajax. Before everything else. 'desktop.middleware.LoginAndPermissionMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'desktop.middleware.NotificationMiddleware', 'desktop.middleware.ExceptionMiddleware', 'desktop.middleware.ClusterMiddleware', # 'debug_toolbar.middleware.DebugToolbarMiddleware' 'django.middleware.csrf.CsrfViewMiddleware', 'django.middleware.http.ConditionalGetMiddleware', 'axes.middleware.FailedLoginMiddleware', ] if os.environ.get(ENV_DESKTOP_DEBUG): MIDDLEWARE_CLASSES.append('desktop.middleware.HtmlValidationMiddleware') logging.debug("Will try to validate generated HTML.") ROOT_URLCONF = 'desktop.urls' # Hue runs its own wsgi applications WSGI_APPLICATION = None TEMPLATE_DIRS = ( get_desktop_root("core/templates"), ) INSTALLED_APPS = [ 'django.contrib.auth', 'django_openid_auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.staticfiles', 'django.contrib.admin', 'django_extensions', # 'debug_toolbar', 'south', # database migration tool # i18n support 'babeldjango', # Desktop injects all the other installed apps into here magically. 'desktop', # App that keeps track of failed logins. 'axes', ] LOCALE_PATHS = [ get_desktop_root('core/src/desktop/locale') ] # Keep default values up to date TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.request', 'django.contrib.messages.context_processors.messages', # Not default 'desktop.context_processors.app_name', ) # Desktop doesn't use an auth profile module, because # because it doesn't mesh very well with the notion # of having multiple apps. If your app needs # to store data related to users, it should # manage its own table with an appropriate foreign key. AUTH_PROFILE_MODULE=None LOGIN_REDIRECT_URL = "/" LOGOUT_REDIRECT_URL = "/" # For djangosaml2 bug. PYLINTRC = get_desktop_root('.pylintrc') # Insert our HDFS upload handler FILE_UPLOAD_HANDLERS = ( 'hadoop.fs.upload.HDFSfileUploadHandler', 'django.core.files.uploadhandler.MemoryFileUploadHandler', 'django.core.files.uploadhandler.TemporaryFileUploadHandler', ) # Custom CSRF Failure View CSRF_FAILURE_VIEW = 'desktop.views.csrf_failure' ############################################################ # Part 4: Installation of apps ############################################################ _config_dir = os.getenv("HUE_CONF_DIR", get_desktop_root("conf")) # Libraries are loaded and configured before the apps appmanager.load_libs() _lib_conf_modules = [dict(module=app.conf, config_key=None) for app in appmanager.DESKTOP_LIBS if app.conf is not None] LOCALE_PATHS.extend([app.locale_path for app in appmanager.DESKTOP_LIBS]) # Load desktop config _desktop_conf_modules = [dict(module=desktop.conf, config_key=None)] conf.initialize(_desktop_conf_modules, _config_dir) # Register the redaction filters into the root logger as soon as possible. desktop.redaction.register_log_filtering(desktop.conf.get_redaction_policy()) # Activate l10n # Install apps appmanager.load_apps(desktop.conf.APP_BLACKLIST.get()) for app in appmanager.DESKTOP_APPS: INSTALLED_APPS.extend(app.django_apps) LOCALE_PATHS.append(app.locale_path) logging.debug("Installed Django modules: %s" % ",".join(map(str, appmanager.DESKTOP_MODULES))) # Load app configuration _app_conf_modules = [dict(module=app.conf, config_key=app.config_key) for app in appmanager.DESKTOP_APPS if app.conf is not None] conf.initialize(_lib_conf_modules, _config_dir) conf.initialize(_app_conf_modules, _config_dir) # Now that we've loaded the desktop conf, set the django DEBUG mode based on the conf. DEBUG = desktop.conf.DJANGO_DEBUG_MODE.get() TEMPLATE_DEBUG = DEBUG ############################################################ # Part 4a: Django configuration that requires bound Desktop # configs. ############################################################ # Configure allowed hosts ALLOWED_HOSTS = desktop.conf.ALLOWED_HOSTS.get() # Configure hue admins ADMINS = [] for admin in desktop.conf.DJANGO_ADMINS.get(): admin_conf = desktop.conf.DJANGO_ADMINS[admin] if 'name' in admin_conf.bind_to and 'email' in admin_conf.bind_to: ADMINS.append(((admin_conf.NAME.get(), admin_conf.EMAIL.get()))) ADMINS = tuple(ADMINS) MANAGERS = ADMINS # Server Email Address SERVER_EMAIL = desktop.conf.DJANGO_SERVER_EMAIL.get() # Email backend EMAIL_BACKEND = desktop.conf.DJANGO_EMAIL_BACKEND.get() # Configure database if os.getenv('DESKTOP_DB_CONFIG'): conn_string = os.getenv('DESKTOP_DB_CONFIG') logging.debug("DESKTOP_DB_CONFIG SET: %s" % (conn_string)) default_db = dict(zip( ["ENGINE", "NAME", "TEST_NAME", "USER", "PASSWORD", "HOST", "PORT"], conn_string.split(':'))) else: test_name = os.environ.get('DESKTOP_DB_TEST_NAME', get_desktop_root('desktop-test.db')) logging.debug("DESKTOP_DB_TEST_NAME SET: %s" % test_name) test_user = os.environ.get('DESKTOP_DB_TEST_USER', 'hue_test') logging.debug("DESKTOP_DB_TEST_USER SET: %s" % test_user) default_db = { "ENGINE" : desktop.conf.DATABASE.ENGINE.get(), "NAME" : desktop.conf.DATABASE.NAME.get(), "USER" : desktop.conf.DATABASE.USER.get(), "PASSWORD" : desktop.conf.get_database_password(), "HOST" : desktop.conf.DATABASE.HOST.get(), "PORT" : str(desktop.conf.DATABASE.PORT.get()), "OPTIONS": force_dict_to_strings(desktop.conf.DATABASE.OPTIONS.get()), # DB used for tests "TEST_NAME" : test_name, "TEST_USER" : test_user, # Wrap each request in a transaction. "ATOMIC_REQUESTS" : True, } DATABASES = { 'default': default_db } CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'unique-hue' } } # Configure sessions SESSION_COOKIE_AGE = desktop.conf.SESSION.TTL.get() SESSION_COOKIE_SECURE = desktop.conf.SESSION.SECURE.get() SESSION_EXPIRE_AT_BROWSER_CLOSE = desktop.conf.SESSION.EXPIRE_AT_BROWSER_CLOSE.get() # HTTP only SESSION_COOKIE_HTTPONLY = desktop.conf.SESSION.HTTP_ONLY.get() # django-nose test specifics TEST_RUNNER = 'desktop.lib.test_runners.HueTestRunner' # Turn off cache middleware if 'test' in sys.argv: CACHE_MIDDLEWARE_SECONDS = 0 # Limit Nose coverage to Hue apps NOSE_ARGS = [ '--cover-package=%s' % ','.join([app.name for app in appmanager.DESKTOP_APPS + appmanager.DESKTOP_LIBS]), '--no-path-adjustment', '--traverse-namespace' ] TIME_ZONE = desktop.conf.TIME_ZONE.get() if desktop.conf.DEMO_ENABLED.get(): AUTHENTICATION_BACKENDS = ('desktop.auth.backend.DemoBackend',) else: AUTHENTICATION_BACKENDS = tuple(desktop.conf.AUTH.BACKEND.get()) EMAIL_HOST = desktop.conf.SMTP.HOST.get() EMAIL_PORT = desktop.conf.SMTP.PORT.get() EMAIL_HOST_USER = desktop.conf.SMTP.USER.get() EMAIL_HOST_PASSWORD = desktop.conf.get_smtp_password() EMAIL_USE_TLS = desktop.conf.SMTP.USE_TLS.get() DEFAULT_FROM_EMAIL = desktop.conf.SMTP.DEFAULT_FROM.get() # Used for securely creating sessions. Should be unique and not shared with anybody. Changing auth backends will invalidate all open sessions. SECRET_KEY = desktop.conf.get_secret_key() if SECRET_KEY: SECRET_KEY += str(AUTHENTICATION_BACKENDS) else: import uuid SECRET_KEY = str(uuid.uuid4()) # Axes AXES_LOGIN_FAILURE_LIMIT = desktop.conf.AUTH.LOGIN_FAILURE_LIMIT.get() AXES_LOCK_OUT_AT_FAILURE = desktop.conf.AUTH.LOGIN_LOCK_OUT_AT_FAILURE.get() AXES_COOLOFF_TIME = desktop.conf.AUTH.LOGIN_COOLOFF_TIME.get() AXES_USE_USER_AGENT = desktop.conf.AUTH.LOGIN_LOCK_OUT_BY_COMBINATION_BROWSER_USER_AGENT_AND_IP.get() AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = desktop.conf.AUTH.LOGIN_LOCK_OUT_BY_COMBINATION_USER_AND_IP.get() # SAML SAML_AUTHENTICATION = 'libsaml.backend.SAML2Backend' in AUTHENTICATION_BACKENDS if SAML_AUTHENTICATION: from libsaml.saml_settings import * INSTALLED_APPS.append('libsaml') LOGIN_URL = '/saml2/login/' SESSION_EXPIRE_AT_BROWSER_CLOSE = True # Middleware classes. for middleware in desktop.conf.MIDDLEWARE.get(): MIDDLEWARE_CLASSES.append(middleware) # OpenId OPENID_AUTHENTICATION = 'libopenid.backend.OpenIDBackend' in AUTHENTICATION_BACKENDS if OPENID_AUTHENTICATION: from libopenid.openid_settings import * INSTALLED_APPS.append('libopenid') LOGIN_URL = '/openid/login' SESSION_EXPIRE_AT_BROWSER_CLOSE = True # OAuth OAUTH_AUTHENTICATION='liboauth.backend.OAuthBackend' in AUTHENTICATION_BACKENDS if OAUTH_AUTHENTICATION: INSTALLED_APPS.append('liboauth') LOGIN_URL = '/oauth/accounts/login' SESSION_EXPIRE_AT_BROWSER_CLOSE = True # URL Redirection white list. if desktop.conf.REDIRECT_WHITELIST.get(): MIDDLEWARE_CLASSES.append('desktop.middleware.EnsureSafeRedirectURLMiddleware') # Enable X-Forwarded-Host header if the load balancer requires it USE_X_FORWARDED_HOST = desktop.conf.USE_X_FORWARDED_HOST.get() # Support HTTPS load-balancing if desktop.conf.SECURE_PROXY_SSL_HEADER.get(): SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') # Add last activity tracking. if 'useradmin' in appmanager.DESKTOP_APPS: MIDDLEWARE_CLASSES.append('useradmin.middleware.UpdateLastActivityMiddleware') ############################################################ # Necessary for South to not fuzz with tests. Fixed in South 0.7.1 SKIP_SOUTH_TESTS = True # Set up environment variable so Kerberos libraries look at our private # ticket cache os.environ['KRB5CCNAME'] = desktop.conf.KERBEROS.CCACHE_PATH.get() # If Hue is configured to use a CACERTS truststore, make sure that the # REQUESTS_CA_BUNDLE is set so that we can use it when we make external requests. # This is for the REST calls made by Hue with the requests library. if desktop.conf.SSL_CACERTS.get() and os.environ.get('REQUESTS_CA_BUNDLE') is None: os.environ['REQUESTS_CA_BUNDLE'] = desktop.conf.SSL_CACERTS.get() # Preventing local build failure by not validating the default value of REQUESTS_CA_BUNDLE if os.environ.get('REQUESTS_CA_BUNDLE') and os.environ.get('REQUESTS_CA_BUNDLE') != desktop.conf.SSL_CACERTS.config.default and not os.path.isfile(os.environ['REQUESTS_CA_BUNDLE']): raise Exception(_('SSL Certificate pointed by REQUESTS_CA_BUNDLE does not exist: %s') % os.environ['REQUESTS_CA_BUNDLE']) # Memory if desktop.conf.MEMORY_PROFILER.get(): MEMORY_PROFILER = hpy() MEMORY_PROFILER.setrelheap() if not desktop.conf.DATABASE_LOGGING.get(): def disable_database_logging(): from django.db.backends import BaseDatabaseWrapper from django.db.backends.util import CursorWrapper BaseDatabaseWrapper.make_debug_cursor = lambda self, cursor: CursorWrapper(cursor, self) disable_database_logging()
IlyaLab/CombiningDependentPvaluesUsingEBM
refs/heads/master
Python/PathwayParser.py
1
# -*- coding: utf-8 -*- """ Copyright 2015, Institute for Systems Biology. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Author: William Poole Email: william.poole@systemsbiology.org / tknijnen@systemsbiology.org Created: June 2015 """ from numpy import * from scipy.spatial.distance import squareform from scipy.cluster.hierarchy import linkage from scipy.cluster.hierarchy import leaves_list #import RandomPathwayParser as RPP print "parsing pathways" PathwayGeneDict = {} GenePathwayDict = {} f = open( "../Data/only_NCI_Nature_ver4.tab") for line in f: ind1 = line.index("\t") pathway = line[:ind1] ind2 = line[ind1+1:].index("\t")+ind1+1 genes = [] iterate = True ind1 = ind2 while (iterate): ind1 = ind2 try: ind2 = line[ind1+1:].index(",")+ind1+1 gene = line[ind1+1:ind2] genes.append(gene) if gene not in GenePathwayDict: GenePathwayDict[gene] = [pathway] else: GenePathwayDict[gene].append(pathway) if ind2 == ind1: iterate = False except ValueError: iterate = False PathwayGeneDict[pathway]=genes f.close() #Sort pathway list using hierarchical clustering global_pathway_list = [p for p in PathwayGeneDict.keys() if len(PathwayGeneDict[p])>=1] L = len(global_pathway_list) DM = zeros((L, L)) for i in range(L): for j in range(L): p1 = global_pathway_list[i] p2 = global_pathway_list[j] intersection = len([g for g in PathwayGeneDict[p1] if g in PathwayGeneDict[p2]]) union = len(set(PathwayGeneDict[p1]+PathwayGeneDict[p2])) DM[i, j] = 1.0-1.0*intersection/union DM_c = squareform(DM) linkageMatrix = linkage(DM_c) pathway_list_clustering = leaves_list(linkageMatrix) sorted_pathway_list = array(global_pathway_list)[pathway_list_clustering] pw_ontology = open( "../Data/pathway_ontology_annotations.txt") PathwayParentDict = {} PathwayChildDict = {} parent_list = [] child_list = [] leaf_list = [] all_pathway_list = [] for line in pw_ontology: L = line.replace("\n", "").replace("\r", "").split("\t") pathway = str.upper(L[3]) all_pathway_list.append(pathway) parent = str.upper(L[4]) if pathway in PathwayParentDict: PathwayParentDict[pathway].append(parent) else: PathwayParentDict[pathway]=[parent] if parent != '': child_list.append(pathway) if pathway in parent_list and parent != '': parent_list.remove(pathway) if parent not in child_list and parent not in parent_list: parent_list.append(parent) if parent not in PathwayChildDict: PathwayChildDict[parent] = [pathway] else: PathwayChildDict[parent].append(pathway) AllPathwayGeneDict = {} AllGenePathwayDict = {} def recursive_gene_pathway_merger(pathway_list): genes = [] for pathway in pathway_list: if pathway in PathwayGeneDict: sub_genes = list(set(PathwayGeneDict[pathway])) if pathway in PathwayChildDict: recursive_gene_pathway_merger(PathwayChildDict[pathway]) elif pathway in PathwayChildDict: sub_genes = recursive_gene_pathway_merger(PathwayChildDict[pathway]) else: sub_genes = [] genes += sub_genes AllPathwayGeneDict[pathway]=sub_genes return list(set(genes)) leaf_list = [p for p in PathwayParentDict if p not in PathwayChildDict] recursive_gene_pathway_merger(['']) for pathway in [p for p in AllPathwayGeneDict if p != ""]: genes = AllPathwayGeneDict[pathway] for gene in genes: if gene in AllGenePathwayDict: #print "can't be?" AllGenePathwayDict[gene].append(pathway) else: AllGenePathwayDict[gene]=[pathway] #print "could be?" original_leaves = [p for p in PathwayGeneDict if p in leaf_list and len(PathwayGeneDict[p])>0] def pathway_gene_overlap(pathway_list): overlap_matrix = zeros((len(pathway_list), len(pathway_list))) for i in range(len(pathway_list)): for j in range(i+1, len(pathway_list)): p1 = pathway_list[i] p2 = pathway_list[j] gl1 = AllPathwayGeneDict[p1] gl2 = AllPathwayGeneDict[p2] if len(gl1+gl2)>0: overlap_dist = 1.0*len([g for g in gl1 if g in gl2])/(len(gl1)+len(gl2)) overlap_matrix[i, j] = overlap_dist print pathway_list return overlap_matrix #Create Random Pathway original pathway mapping """ PRL = [(len(RPP.PathwayGeneDict[p]), p) for p in RPP.PathwayGeneDict if len(RPP.PathwayGeneDict[p])>0] PL = [(len(PathwayGeneDict[p]), p) for p in PathwayGeneDict if len(PathwayGeneDict[p]) > 0] PRL.sort() PL.sort() RandomPathwayMapping = {} for i in range(len(PL)): pathway = PL[i][1] random_pathway = PRL[i][1] RandomPathwayMapping[pathway] = random_pathway RandomPathwayMapping[random_pathway] = pathway """
pridemusvaire/youtube-dl
refs/heads/master
youtube_dl/extractor/aol.py
145
from __future__ import unicode_literals import re from .common import InfoExtractor class AolIE(InfoExtractor): IE_NAME = 'on.aol.com' _VALID_URL = r'''(?x) (?: aol-video:| http://on\.aol\.com/ (?: video/.*-| playlist/(?P<playlist_display_id>[^/?#]+?)-(?P<playlist_id>[0-9]+)[?#].*_videoid= ) ) (?P<id>[0-9]+) (?:$|\?) ''' _TESTS = [{ 'url': 'http://on.aol.com/video/u-s--official-warns-of-largest-ever-irs-phone-scam-518167793?icid=OnHomepageC2Wide_MustSee_Img', 'md5': '18ef68f48740e86ae94b98da815eec42', 'info_dict': { 'id': '518167793', 'ext': 'mp4', 'title': 'U.S. Official Warns Of \'Largest Ever\' IRS Phone Scam', }, 'add_ie': ['FiveMin'], }, { 'url': 'http://on.aol.com/playlist/brace-yourself---todays-weirdest-news-152147?icid=OnHomepageC4_Omg_Img#_videoid=518184316', 'info_dict': { 'id': '152147', 'title': 'Brace Yourself - Today\'s Weirdest News', }, 'playlist_mincount': 10, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') playlist_id = mobj.group('playlist_id') if not playlist_id or self._downloader.params.get('noplaylist'): return self.url_result('5min:%s' % video_id) self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id)) webpage = self._download_webpage(url, playlist_id) title = self._html_search_regex( r'<h1 class="video-title[^"]*">(.+?)</h1>', webpage, 'title') playlist_html = self._search_regex( r"(?s)<ul\s+class='video-related[^']*'>(.*?)</ul>", webpage, 'playlist HTML') entries = [{ '_type': 'url', 'url': 'aol-video:%s' % m.group('id'), 'ie_key': 'Aol', } for m in re.finditer( r"<a\s+href='.*videoid=(?P<id>[0-9]+)'\s+class='video-thumb'>", playlist_html)] return { '_type': 'playlist', 'id': playlist_id, 'display_id': mobj.group('playlist_display_id'), 'title': title, 'entries': entries, }
JakeLowey/HackRPI2
refs/heads/master
django/contrib/staticfiles/handlers.py
85
import urllib from urlparse import urlparse from django.conf import settings from django.core.handlers.wsgi import WSGIHandler from django.contrib.staticfiles import utils from django.contrib.staticfiles.views import serve class StaticFilesHandler(WSGIHandler): """ WSGI middleware that intercepts calls to the static files directory, as defined by the STATIC_URL setting, and serves those files. """ def __init__(self, application, base_dir=None): self.application = application if base_dir: self.base_dir = base_dir else: self.base_dir = self.get_base_dir() self.base_url = urlparse(self.get_base_url()) super(StaticFilesHandler, self).__init__() def get_base_dir(self): return settings.STATIC_ROOT def get_base_url(self): utils.check_settings() return settings.STATIC_URL def _should_handle(self, path): """ Checks if the path should be handled. Ignores the path if: * the host is provided as part of the base_url * the request's path isn't under the media path (or equal) """ return path.startswith(self.base_url[2]) and not self.base_url[1] def file_path(self, url): """ Returns the relative path to the media file on disk for the given URL. """ relative_url = url[len(self.base_url[2]):] return urllib.url2pathname(relative_url) def serve(self, request): """ Actually serves the request path. """ return serve(request, self.file_path(request.path), insecure=True) def get_response(self, request): from django.http import Http404 if self._should_handle(request.path): try: return self.serve(request) except Http404, e: if settings.DEBUG: from django.views import debug return debug.technical_404_response(request, e) return super(StaticFilesHandler, self).get_response(request) def __call__(self, environ, start_response): if not self._should_handle(environ['PATH_INFO']): return self.application(environ, start_response) return super(StaticFilesHandler, self).__call__(environ, start_response)
StefanRijnhart/odoomrp-wip
refs/heads/8.0
mrp_bom_component_change/__openerp__.py
2
# -*- encoding: utf-8 -*- ############################################################################## # # Daniel Campos (danielcampos@avanzosc.es) Date: 02/10/2014 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## { "name": "MRP BoM component massive change", "version": "1.0", "description": """ This module allows to change massively one component by another on a list of BoMs. """, 'author': 'OdooMRP team', 'website': "http://www.odoomrp.com", "depends": ['mrp'], "category": "Manufacturing", "data": ['views/mrp_bom_change_view.xml', 'security/ir.model.access.csv' ], "installable": True }
ReganBell/QReview
refs/heads/master
build/lib/networkx/algorithms/cluster.py
29
# -*- coding: utf-8 -*- """Algorithms to characterize the number of triangles in a graph.""" from itertools import combinations import networkx as nx from networkx import NetworkXError __author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>', 'Dan Schult (dschult@colgate.edu)', 'Pieter Swart (swart@lanl.gov)', 'Jordi Torrents <jtorrents@milnou.net>']) # Copyright (C) 2004-2011 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. __all__= ['triangles', 'average_clustering', 'clustering', 'transitivity', 'square_clustering'] def triangles(G, nodes=None): """Compute the number of triangles. Finds the number of triangles that include a node as one vertex. Parameters ---------- G : graph A networkx graph nodes : container of nodes, optional (default= all nodes in G) Compute triangles for nodes in this container. Returns ------- out : dictionary Number of triangles keyed by node label. Examples -------- >>> G=nx.complete_graph(5) >>> print(nx.triangles(G,0)) 6 >>> print(nx.triangles(G)) {0: 6, 1: 6, 2: 6, 3: 6, 4: 6} >>> print(list(nx.triangles(G,(0,1)).values())) [6, 6] Notes ----- When computing triangles for the entire graph each triangle is counted three times, once at each node. Self loops are ignored. """ if G.is_directed(): raise NetworkXError("triangles() is not defined for directed graphs.") if nodes in G: # return single value return next(_triangles_and_degree_iter(G,nodes))[2] // 2 return dict( (v,t // 2) for v,d,t in _triangles_and_degree_iter(G,nodes)) def _triangles_and_degree_iter(G,nodes=None): """ Return an iterator of (node, degree, triangles). This double counts triangles so you may want to divide by 2. See degree() and triangles() for definitions and details. """ if G.is_multigraph(): raise NetworkXError("Not defined for multigraphs.") if nodes is None: nodes_nbrs = G.adj.items() else: nodes_nbrs= ( (n,G[n]) for n in G.nbunch_iter(nodes) ) for v,v_nbrs in nodes_nbrs: vs=set(v_nbrs)-set([v]) ntriangles=0 for w in vs: ws=set(G[w])-set([w]) ntriangles+=len(vs.intersection(ws)) yield (v,len(vs),ntriangles) def _weighted_triangles_and_degree_iter(G, nodes=None, weight='weight'): """ Return an iterator of (node, degree, weighted_triangles). Used for weighted clustering. """ if G.is_multigraph(): raise NetworkXError("Not defined for multigraphs.") if weight is None or G.edges()==[]: max_weight=1.0 else: max_weight=float(max(d.get(weight,1.0) for u,v,d in G.edges(data=True))) if nodes is None: nodes_nbrs = G.adj.items() else: nodes_nbrs= ( (n,G[n]) for n in G.nbunch_iter(nodes) ) for i,nbrs in nodes_nbrs: inbrs=set(nbrs)-set([i]) weighted_triangles=0.0 seen=set() for j in inbrs: wij=G[i][j].get(weight,1.0)/max_weight seen.add(j) jnbrs=set(G[j])-seen # this keeps from double counting for k in inbrs&jnbrs: wjk=G[j][k].get(weight,1.0)/max_weight wki=G[i][k].get(weight,1.0)/max_weight weighted_triangles+=(wij*wjk*wki)**(1.0/3.0) yield (i,len(inbrs),weighted_triangles*2) def average_clustering(G, nodes=None, weight=None, count_zeros=True): r"""Compute the average clustering coefficient for the graph G. The clustering coefficient for the graph is the average, .. math:: C = \frac{1}{n}\sum_{v \in G} c_v, where `n` is the number of nodes in `G`. Parameters ---------- G : graph nodes : container of nodes, optional (default=all nodes in G) Compute average clustering for nodes in this container. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. count_zeros : bool (default=False) If False include only the nodes with nonzero clustering in the average. Returns ------- avg : float Average clustering Examples -------- >>> G=nx.complete_graph(5) >>> print(nx.average_clustering(G)) 1.0 Notes ----- This is a space saving routine; it might be faster to use the clustering function to get a list and then take the average. Self loops are ignored. References ---------- .. [1] Generalizations of the clustering coefficient to weighted complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela, K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007). http://jponnela.com/web_documents/a9.pdf .. [2] Marcus Kaiser, Mean clustering coefficients: the role of isolated nodes and leafs on clustering measures for small-world networks. http://arxiv.org/abs/0802.2512 """ c=clustering(G,nodes,weight=weight).values() if not count_zeros: c = [v for v in c if v > 0] return sum(c)/float(len(c)) def clustering(G, nodes=None, weight=None): r"""Compute the clustering coefficient for nodes. For unweighted graphs, the clustering of a node `u` is the fraction of possible triangles through that node that exist, .. math:: c_u = \frac{2 T(u)}{deg(u)(deg(u)-1)}, where `T(u)` is the number of triangles through node `u` and `deg(u)` is the degree of `u`. For weighted graphs, the clustering is defined as the geometric average of the subgraph edge weights [1]_, .. math:: c_u = \frac{1}{deg(u)(deg(u)-1))} \sum_{uv} (\hat{w}_{uv} \hat{w}_{uw} \hat{w}_{vw})^{1/3}. The edge weights `\hat{w}_{uv}` are normalized by the maximum weight in the network `\hat{w}_{uv} = w_{uv}/\max(w)`. The value of `c_u` is assigned to 0 if `deg(u) < 2`. Parameters ---------- G : graph nodes : container of nodes, optional (default=all nodes in G) Compute clustering for nodes in this container. weight : string or None, optional (default=None) The edge attribute that holds the numerical value used as a weight. If None, then each edge has weight 1. Returns ------- out : float, or dictionary Clustering coefficient at specified nodes Examples -------- >>> G=nx.complete_graph(5) >>> print(nx.clustering(G,0)) 1.0 >>> print(nx.clustering(G)) {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0} Notes ----- Self loops are ignored. References ---------- .. [1] Generalizations of the clustering coefficient to weighted complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela, K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007). http://jponnela.com/web_documents/a9.pdf """ if G.is_directed(): raise NetworkXError('Clustering algorithms are not defined ', 'for directed graphs.') if weight is not None: td_iter=_weighted_triangles_and_degree_iter(G,nodes,weight) else: td_iter=_triangles_and_degree_iter(G,nodes) clusterc={} for v,d,t in td_iter: if t==0: clusterc[v]=0.0 else: clusterc[v]=t/float(d*(d-1)) if nodes in G: return list(clusterc.values())[0] # return single value return clusterc def transitivity(G): r"""Compute graph transitivity, the fraction of all possible triangles present in G. Possible triangles are identified by the number of "triads" (two edges with a shared vertex). The transitivity is .. math:: T = 3\frac{\#triangles}{\#triads}. Parameters ---------- G : graph Returns ------- out : float Transitivity Examples -------- >>> G = nx.complete_graph(5) >>> print(nx.transitivity(G)) 1.0 """ triangles=0 # 6 times number of triangles contri=0 # 2 times number of connected triples for v,d,t in _triangles_and_degree_iter(G): contri += d*(d-1) triangles += t if triangles==0: # we had no triangles or possible triangles return 0.0 else: return triangles/float(contri) def square_clustering(G, nodes=None): r""" Compute the squares clustering coefficient for nodes. For each node return the fraction of possible squares that exist at the node [1]_ .. math:: C_4(v) = \frac{ \sum_{u=1}^{k_v} \sum_{w=u+1}^{k_v} q_v(u,w) }{ \sum_{u=1}^{k_v} \sum_{w=u+1}^{k_v} [a_v(u,w) + q_v(u,w)]}, where `q_v(u,w)` are the number of common neighbors of `u` and `w` other than `v` (ie squares), and `a_v(u,w) = (k_u - (1+q_v(u,w)+\theta_{uv}))(k_w - (1+q_v(u,w)+\theta_{uw}))`, where `\theta_{uw} = 1` if `u` and `w` are connected and 0 otherwise. Parameters ---------- G : graph nodes : container of nodes, optional (default=all nodes in G) Compute clustering for nodes in this container. Returns ------- c4 : dictionary A dictionary keyed by node with the square clustering coefficient value. Examples -------- >>> G=nx.complete_graph(5) >>> print(nx.square_clustering(G,0)) 1.0 >>> print(nx.square_clustering(G)) {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0} Notes ----- While `C_3(v)` (triangle clustering) gives the probability that two neighbors of node v are connected with each other, `C_4(v)` is the probability that two neighbors of node v share a common neighbor different from v. This algorithm can be applied to both bipartite and unipartite networks. References ---------- .. [1] Pedro G. Lind, Marta C. González, and Hans J. Herrmann. 2005 Cycles and clustering in bipartite networks. Physical Review E (72) 056127. """ if nodes is None: node_iter = G else: node_iter = G.nbunch_iter(nodes) clustering = {} for v in node_iter: clustering[v] = 0.0 potential=0 for u,w in combinations(G[v], 2): squares = len((set(G[u]) & set(G[w])) - set([v])) clustering[v] += squares degm = squares + 1.0 if w in G[u]: degm += 1 potential += (len(G[u]) - degm) * (len(G[w]) - degm) + squares if potential > 0: clustering[v] /= potential if nodes in G: return list(clustering.values())[0] # return single value return clustering
pizzapanther/Django-Pizza
refs/heads/master
pizza/calendar/migrations/0001_initial.py
1
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Category' db.create_table(u'calendar_category', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=200)), )) db.send_create_signal(u'calendar', ['Category']) # Adding model 'Series' db.create_table(u'calendar_series', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=255)), )) db.send_create_signal(u'calendar', ['Series']) # Adding model 'Event' db.create_table(u'calendar_event', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=255)), ('start_dt', self.gf('django.db.models.fields.DateTimeField')()), ('end_dt', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('all_day', self.gf('django.db.models.fields.BooleanField')(default=False)), ('body', self.gf('django.db.models.fields.TextField')()), ('image', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['kitchen_sink.Image'], null=True, blank=True)), ('imageset', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['kitchen_sink.ImageSet'], null=True, blank=True)), ('series', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['calendar.Series'], null=True, blank=True)), )) db.send_create_signal(u'calendar', ['Event']) # Adding M2M table for field categories on 'Event' m2m_table_name = db.shorten_name(u'calendar_event_categories') db.create_table(m2m_table_name, ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('event', models.ForeignKey(orm[u'calendar.event'], null=False)), ('category', models.ForeignKey(orm[u'calendar.category'], null=False)) )) db.create_unique(m2m_table_name, ['event_id', 'category_id']) def backwards(self, orm): # Deleting model 'Category' db.delete_table(u'calendar_category') # Deleting model 'Series' db.delete_table(u'calendar_series') # Deleting model 'Event' db.delete_table(u'calendar_event') # Removing M2M table for field categories on 'Event' db.delete_table(db.shorten_name(u'calendar_event_categories')) models = { u'calendar.category': { 'Meta': {'ordering': "('slug',)", 'object_name': 'Category'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'calendar.event': { 'Meta': {'ordering': "('start_dt', '-all_day', 'end_dt')", 'object_name': 'Event'}, 'all_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'body': ('django.db.models.fields.TextField', [], {}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['calendar.Category']", 'null': 'True', 'blank': 'True'}), 'end_dt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['kitchen_sink.Image']", 'null': 'True', 'blank': 'True'}), 'imageset': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['kitchen_sink.ImageSet']", 'null': 'True', 'blank': 'True'}), 'series': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['calendar.Series']", 'null': 'True', 'blank': 'True'}), 'start_dt': ('django.db.models.fields.DateTimeField', [], {}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'calendar.series': { 'Meta': {'object_name': 'Series'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'kitchen_sink.image': { 'Meta': {'ordering': "('title',)", 'object_name': 'Image'}, 'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'caption_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'credit': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'credit_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'kitchen_sink.imageset': { 'Meta': {'ordering': "('title',)", 'object_name': 'ImageSet'}, 'captype': ('django.db.models.fields.CharField', [], {'default': "'override'", 'max_length': '10'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) } } complete_apps = ['calendar']
idea4bsd/idea4bsd
refs/heads/idea4bsd-master
python/testData/refactoring/rename/renameInheritors_after.py
83
class A: def qu(self): pass class B(A): def qu(self): pass
absoludity/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/html5lib/html5lib/__init__.py
426
""" HTML parsing library based on the WHATWG "HTML5" specification. The parser is designed to be compatible with existing HTML found in the wild and implements well-defined error recovery that is largely compatible with modern desktop web browsers. Example usage: import html5lib f = open("my_document.html") tree = html5lib.parse(f) """ from __future__ import absolute_import, division, unicode_literals from .html5parser import HTMLParser, parse, parseFragment from .treebuilders import getTreeBuilder from .treewalkers import getTreeWalker from .serializer import serialize __all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder", "getTreeWalker", "serialize"] __version__ = "0.9999-dev"
FX31337/FX-BT-Scripts
refs/heads/master
bstruct.py
1
# -*- coding: utf-8 -*- import struct import datetime import binascii def get_fields_size(spec): # Prepend an endianness mark to prevent calcsize to insert padding bytes fmt_str = "=" + "".join(x[1] for x in spec) return struct.calcsize(fmt_str) class BStruct: def __init__(self, buf, offset=0): for (name, fmt, *rest) in self._fields: field_size = struct.calcsize(fmt) val = struct.unpack_from(self._endianness + fmt, buf, offset) # Flatten the single-element arrays if type(val) is tuple and len(val) == 1: val = val[0] setattr(self, name, val) offset += field_size def __str__(self): ret = "" for (name, _, *fmt) in self._fields: val_repr = getattr(self, name) # Pretty print the value using the custom formatter. if len(fmt): (pp,) = fmt val_repr = pp(self, getattr(self, name)) ret += "{} = {}\n".format(name, val_repr) return ret def repack(self): blob_size = get_fields_size(self._fields) if blob_size == 0: return b"" offset = 0 blob = bytearray(b"\x00" * blob_size) for (name, fmt, *_) in self._fields: field_size = struct.calcsize(fmt) v = getattr(self, name) if fmt[-1] == "s" or len(fmt) == 1: v = [v] struct.pack_into(self._endianness + fmt, blob, offset, *v) offset += field_size return blob # # Pretty printers # def pretty_print_time(obj, x): return datetime.datetime.fromtimestamp(x) def pretty_print_string(obj, x): return x.decode("utf-8").rstrip("\0") def pretty_print_wstring(obj, x): return x.decode("utf-16").rstrip("\0") def pretty_print_bstring(obj, x): return binascii.hexlify(x) def pretty_print_ignore(obj, x): return "<...>" def pretty_print_hex(obj, x): return "{:08x}".format(x) def pretty_print_compact(obj, x): if any(x): return x return "[\\x00] * {}".format(len(x))
inklesspen/endpoints-management-python
refs/heads/master
test/test_label_descriptor.py
3
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import base64 import datetime import unittest2 from expects import be_none, be_true, expect, equal, raise_error from endpoints_management.control import (label_descriptor, sm_messages, report_request) _KNOWN = label_descriptor.KnownLabels ValueType = label_descriptor.ValueType class KnownLabelsBase(object): SUBJECT = None GIVEN_INFO = report_request.Info( api_method = u'dummy_method', api_version = u'dummy_version', location = u'dummy_location', referer = u'dummy_referer', consumer_project_number=1234) WANTED_LABEL_DICT = {} def _matching_descriptor(self, hide_default=False): res = sm_messages.LabelDescriptor( key=self.SUBJECT.label_name, valueType=self.SUBJECT.value_type) if res.valueType == ValueType.STRING and hide_default: res.valueType = None return res def _not_matched(self): d = self._matching_descriptor() d.valueType = ValueType.INT64 # no known labels have this type return d def test_should_be_supported(self): expect(_KNOWN.is_supported(self._matching_descriptor())).to(be_true) expect(_KNOWN.is_supported( self._matching_descriptor(hide_default=True))).to(be_true) expect(_KNOWN.is_supported(self._not_matched())).not_to(be_true) def test_should_be_matched_correctly(self): expect(self.SUBJECT.matches(self._matching_descriptor())).to(be_true) expect(self.SUBJECT.matches( self._matching_descriptor(hide_default=True))).to(be_true) expect(self.SUBJECT.matches(self._not_matched())).not_to(be_true) def test_should_update_request_info(self): given_dict = {} self.SUBJECT.do_labels_update(self.GIVEN_INFO, given_dict) expect(given_dict).to(equal(self.WANTED_LABEL_DICT)) class TestCredentialIdWithNoCreds(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.CREDENTIAL_ID class TestCredentialIdWithApiKey(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.CREDENTIAL_ID GIVEN_INFO = report_request.Info( api_key = u'dummy_api_key', ) WANTED_LABEL_DICT = {SUBJECT.label_name: b'apiKey:dummy_api_key'} class TestCredentialIdWithAuthIssuer(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.CREDENTIAL_ID GIVEN_INFO = report_request.Info( auth_issuer = u'dummy_issuer', auth_audience = u'dummy_audience') WANTED_VALUE = b'jwtAuth:issuer=' + base64.urlsafe_b64encode(b'dummy_issuer') WANTED_VALUE += b'&audience=' + base64.urlsafe_b64encode(b'dummy_audience') WANTED_LABEL_DICT = {SUBJECT.label_name: WANTED_VALUE} class EndUser(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.END_USER class EndUserCountry(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.END_USER_COUNTRY class ErrorType(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.ERROR_TYPE WANTED_LABEL_DICT = {SUBJECT.label_name: u'2xx'} class Protocol(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.PROTOCOL WANTED_LABEL_DICT = { SUBJECT.label_name: report_request.ReportedProtocols.UNKNOWN.name } class Referer(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.REFERER WANTED_LABEL_DICT = {SUBJECT.label_name: u'dummy_referer'} class ResponseCode(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.RESPONSE_CODE WANTED_LABEL_DICT = {SUBJECT.label_name: u'200'} class ResponseCodeClass(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.RESPONSE_CODE_CLASS WANTED_LABEL_DICT = {SUBJECT.label_name: u'2xx'} class StatusCodeWithOkStatus(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.STATUS_CODE WANTED_LABEL_DICT = {SUBJECT.label_name: u'0'} class StatusCodeWithKnown4XXStatus(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.STATUS_CODE GIVEN_INFO = report_request.Info( response_code = 401, ) WANTED_LABEL_DICT = {SUBJECT.label_name: u'16'} class StatusCodeWithUnknown4XXStatus(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.STATUS_CODE GIVEN_INFO = report_request.Info( response_code = 477, ) WANTED_LABEL_DICT = {SUBJECT.label_name: u'9'} class StatusCodeWithKnown5XXStatus(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.STATUS_CODE GIVEN_INFO = report_request.Info( response_code = 501, ) WANTED_LABEL_DICT = {SUBJECT.label_name: u'12'} class StatusCodeWithUnknown5XXStatus(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.STATUS_CODE GIVEN_INFO = report_request.Info( response_code = 577, ) WANTED_LABEL_DICT = {SUBJECT.label_name: u'13'} class StatusCodeWithUnknownStatus(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.STATUS_CODE GIVEN_INFO = report_request.Info( response_code = 777, ) WANTED_LABEL_DICT = {SUBJECT.label_name: u'2'} class GaeCloneId(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GAE_CLONE_ID class GaeModuleId(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GAE_MODULE_ID class GaeReplicaIndex(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GAE_REPLICA_INDEX class GaeVersionId(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GAE_VERSION_ID class GcpLocation(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GCP_LOCATION WANTED_LABEL_DICT = {SUBJECT.label_name: u'dummy_location'} class GcpProject(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GCP_PROJECT class GcpRegion(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GCP_REGION class GcpResourceId(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GCP_RESOURCE_ID class GcpResourceType(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GCP_RESOURCE_TYPE class GcpService(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GCP_SERVICE class GcpZone(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GCP_ZONE class GcpUid(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GCP_UID class GcpApiMethod(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GCP_API_METHOD WANTED_LABEL_DICT = {SUBJECT.label_name: u'dummy_method'} class GcpApiVersion(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.GCP_API_VERSION WANTED_LABEL_DICT = {SUBJECT.label_name: u'dummy_version'} class SccAndroidCertFingerprint(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.SCC_ANDROID_CERT_FINGERPRINT class SccAndroidPackageName(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.SCC_ANDROID_PACKAGE_NAME class SccCallerIp(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.SCC_CALLER_IP class SccIosBundleId(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.SCC_IOS_BUNDLE_ID class SccPlatform(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.SCC_PLATFORM WANTED_LABEL_DICT = { SUBJECT.label_name: report_request.ReportedPlatforms.UNKNOWN.name } class SccReferer(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.SCC_REFERER class SccServiceAgent(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.SCC_SERVICE_AGENT WANTED_LABEL_DICT = {SUBJECT.label_name: label_descriptor.SERVICE_AGENT} class SccUserAgent(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.SCC_USER_AGENT WANTED_LABEL_DICT = {SUBJECT.label_name: label_descriptor.USER_AGENT} class SccConsumerProject(KnownLabelsBase, unittest2.TestCase): SUBJECT = _KNOWN.SCC_CONSUMER_PROJECT WANTED_LABEL_DICT = {SUBJECT.label_name: "1234"}
glorotxa/SME
refs/heads/master
FB15k_Tri.py
5
from FB15k_exp import * from FB15k_evaluation import * savepath='FB15k_Tri' datapath='data/' launch(op='Tri', ndim=50, marge=0.25, rhoL=5, lremb=0.01, nbatches=150, dataset='FB', totepochs=500, test_all=500, neval=1000, savepath=savepath, datapath=datapath) print "\n##### EVALUATION #####\n" MR, T10 = RankingEvalFil(datapath=datapath, dataset='FB', op='Tri', loadmodel=savepath+'/best_valid_model.pkl', Nrel=1345, Nsyn=14951) print "\n##### MEAN RANK: %s #####\n" % (MR) print "\n##### HITS@10: %s #####\n" % (T10)
elancom/elasticsearch
refs/heads/master
dev-tools/create_bwc_index_with_some_ancient_segments.py
217
import create_bwc_index import logging import os import random import shutil import subprocess import sys import tempfile def fetch_version(version): logging.info('fetching ES version %s' % version) if subprocess.call([sys.executable, os.path.join(os.path.split(sys.argv[0])[0], 'get-bwc-version.py'), version]) != 0: raise RuntimeError('failed to download ES version %s' % version) def main(): ''' Creates a static back compat index (.zip) with mixed 0.20 (Lucene 3.x) and 0.90 (Lucene 4.x) segments. ''' logging.basicConfig(format='[%(levelname)s] [%(asctime)s] %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %I:%M:%S %p') logging.getLogger('elasticsearch').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.WARN) tmp_dir = tempfile.mkdtemp() try: data_dir = os.path.join(tmp_dir, 'data') repo_dir = os.path.join(tmp_dir, 'repo') logging.info('Temp data dir: %s' % data_dir) logging.info('Temp repo dir: %s' % repo_dir) first_version = '0.20.6' second_version = '0.90.6' index_name = 'index-%s-and-%s' % (first_version, second_version) # Download old ES releases if necessary: release_dir = os.path.join('backwards', 'elasticsearch-%s' % first_version) if not os.path.exists(release_dir): fetch_version(first_version) node = create_bwc_index.start_node(first_version, release_dir, data_dir, repo_dir, cluster_name=index_name) client = create_bwc_index.create_client() # Creates the index & indexes docs w/ first_version: create_bwc_index.generate_index(client, first_version, index_name) # Make sure we write segments: flush_result = client.indices.flush(index=index_name) if not flush_result['ok']: raise RuntimeError('flush failed: %s' % str(flush_result)) segs = client.indices.segments(index=index_name) shards = segs['indices'][index_name]['shards'] if len(shards) != 1: raise RuntimeError('index should have 1 shard but got %s' % len(shards)) first_version_segs = shards['0'][0]['segments'].keys() create_bwc_index.shutdown_node(node) print('%s server output:\n%s' % (first_version, node.stdout.read().decode('utf-8'))) node = None release_dir = os.path.join('backwards', 'elasticsearch-%s' % second_version) if not os.path.exists(release_dir): fetch_version(second_version) # Now also index docs with second_version: node = create_bwc_index.start_node(second_version, release_dir, data_dir, repo_dir, cluster_name=index_name) client = create_bwc_index.create_client() # If we index too many docs, the random refresh/flush causes the ancient segments to be merged away: num_docs = 10 create_bwc_index.index_documents(client, index_name, 'doc', num_docs) # Make sure we get a segment: flush_result = client.indices.flush(index=index_name) if not flush_result['ok']: raise RuntimeError('flush failed: %s' % str(flush_result)) # Make sure we see mixed segments (it's possible Lucene could have "accidentally" merged away the first_version segments): segs = client.indices.segments(index=index_name) shards = segs['indices'][index_name]['shards'] if len(shards) != 1: raise RuntimeError('index should have 1 shard but got %s' % len(shards)) second_version_segs = shards['0'][0]['segments'].keys() #print("first: %s" % first_version_segs) #print("second: %s" % second_version_segs) for segment_name in first_version_segs: if segment_name in second_version_segs: # Good: an ancient version seg "survived": break else: raise RuntimeError('index has no first_version segs left') for segment_name in second_version_segs: if segment_name not in first_version_segs: # Good: a second_version segment was written break else: raise RuntimeError('index has no second_version segs left') create_bwc_index.shutdown_node(node) print('%s server output:\n%s' % (second_version, node.stdout.read().decode('utf-8'))) node = None create_bwc_index.compress_index('%s-and-%s' % (first_version, second_version), tmp_dir, 'core/src/test/resources/org/elasticsearch/action/admin/indices/upgrade') finally: if node is not None: create_bwc_index.shutdown_node(node) shutil.rmtree(tmp_dir) if __name__ == '__main__': main()
CodeForPhilly/chime
refs/heads/dependabot/pip/urllib3-1.26.5
src/chime_dash/app/pages/sidebar.py
1
"""components/sidebar Initializes the side bar containing the various inputs for the model #! _SIDEBAR_ELEMENTS should be considered for moving else where """ from collections import OrderedDict from datetime import date, datetime from typing import List from dash.development.base_component import ComponentMeta from dash_core_components import Store from dash_html_components import Nav, Div from chime_dash.app.components.base import Page from chime_dash.app.services.callbacks import SidebarCallbacks from chime_dash.app.utils import ReadOnlyDict from chime_dash.app.utils.templates import ( create_switch_input, create_number_input, create_date_input, create_header, create_line_break, ) FLOAT_INPUT_MIN = 0.001 FLOAT_INPUT_STEP = "any" _SIDEBAR_ELEMENTS = ReadOnlyDict(OrderedDict( ### hospital_parameters={"type": "header", "size": "h3"}, population={"type": "number", "min": 1, "step": 1}, market_share={ "type": "number", "min": FLOAT_INPUT_MIN, "step": FLOAT_INPUT_STEP, "max": 100.0, "percent": True, }, current_hospitalized={"type": "number", "min": 0, "step": 1}, ### line_break_1={"type": "linebreak"}, spread_parameters={"type": "header", "size": "h4"}, spread_parameters_checkbox={"type": "switch", "on": False}, date_first_hospitalized={ "type": "date", "min_date_allowed": datetime(2019, 10, 1), "max_date_allowed": datetime(2021, 12, 31) }, doubling_time={"type": "number", "min": FLOAT_INPUT_MIN, "step": FLOAT_INPUT_STEP}, social_distancing_checkbox={"type": "switch", "on": False}, social_distancing_start_date={ "type": "date", "min_date_allowed": datetime(2019, 10, 1), "max_date_allowed": datetime(2021, 12, 31), }, relative_contact_rate={ "type": "number", "min": 0.0, "step": FLOAT_INPUT_STEP, "max": 100.0, "percent": True, }, ### line_break_2={"type": "linebreak"}, severity_parameters={"type": "header", "size": "h4"}, hospitalized_rate={ "type": "number", "min": 0.0, "step": FLOAT_INPUT_STEP, "max": 100.0, "percent": True, }, icu_rate={ "type": "number", "min": 0.0, "step": FLOAT_INPUT_STEP, "max": 100.0, "percent": True, }, ventilated_rate={ "type": "number", "min": 0.0, "step": FLOAT_INPUT_STEP, "max": 100.0, "percent": True, }, infectious_days={"type": "number", "min": 0, "step": 1}, hospitalized_los={"type": "number", "min": 0, "step": 1}, icu_los={"type": "number", "min": 0, "step": 1}, ventilated_los={"type": "number", "min": 0, "step": 1}, ### line_break_3={"type": "linebreak"}, display_parameters={"type": "header", "size": "h4"}, n_days={"type": "number", "min": 30, "step": 1}, current_date={ "type": "date", "min_date_allowed": datetime(2019, 10, 1), "max_date_allowed": datetime(2021, 12, 31), "initial_visible_month": date.today(), "date": date.today(), }, show_tables={"type": "switch", "value": False} )) class Sidebar(Page): """Sidebar to the left of the screen contains the various inputs used to interact with the model. """ callbacks_cls = SidebarCallbacks # localization temp. for widget descriptions localization_file = "sidebar.yml" # Different kind of inputs store different kind of "values" # This tells the callback output for which field to look input_type_map = ReadOnlyDict(OrderedDict( (key, value["type"]) for key, value in _SIDEBAR_ELEMENTS.items() if value["type"] not in ("header", "linebreak") )) input_value_map = ReadOnlyDict(OrderedDict( (key, {"number": "value", "date": "date", "switch": "on"}.get(value, "value")) for key, value in input_type_map.items() )) input_state_map = ReadOnlyDict(OrderedDict( [ ('group_date_first_hospitalized', 'style'), ('group_doubling_time', 'style'), ('group_social_distancing_start_date', 'style'), ('group_relative_contact_rate', 'style'), ] )) def get_html(self) -> List[ComponentMeta]: """Initializes the view """ elements = [ Store(id="sidebar-store") ] for idx, data in _SIDEBAR_ELEMENTS.items(): if data["type"] == "number": element = create_number_input(idx, data, self.content, self.defaults) elif data["type"] == "switch": element = create_switch_input(idx, data, self.content) elif data["type"] == "date": element = create_date_input(idx, data, self.content, self.defaults) elif data["type"] == "header": element = create_header(idx, self.content) elif data["type"] == "linebreak": element = create_line_break(idx) else: raise ValueError( "Failed to parse input '{idx}' with data '{data}'".format( idx=idx, data=data ) ) elements.append(element) sidebar = Nav( className="bg-light border-right", children=Div( children=elements, className="px-3 pb-5", ), style={ "bottom": 0, "left": 0, "overflowY": "scroll", "position": "fixed", "top": "56px", "width": "320px", "zIndex": 1, "padding-top": "1rem", }, ) return [sidebar]
msdgwzhy6/enjarify
refs/heads/master
enjarify/jvm/writeir.py
2
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections, struct from functools import partial from . import ir from .. import flags, dalvik from .jvmops import * from . import arraytypes as arrays from . import scalartypes as scalars from . import typeinference, mathops from .optimization import stack from .. import util # Code for converting dalvik bytecode to intermediate representation # effectively this is just Java bytecode instructions with some abstractions for # later optimization _ilfdaOrd = [scalars.INT, scalars.LONG, scalars.FLOAT, scalars.DOUBLE, scalars.OBJ].index _newArrayCodes = {('['+t).encode(): v for t, v in zip('ZCFDBSIJ', range(4, 12))} _arrStoreOps = {t.encode(): v for t, v in zip('IJFD BCS', range(IASTORE, SASTORE+1))} _arrLoadOps = {t.encode(): v for t, v in zip('IJFD BCS', range(IALOAD, SALOAD+1))} # For generating IR instructions corresponding to a single Dalvik instruction class IRBlock: def __init__(self, parent, pos): self.type_data = parent.types[pos] self.pool = parent.pool self.delay_consts = parent.opts.delay_consts self.pos = pos self.instructions = [ir.Label(pos)] self.except_labels = None def add(self, jvm_instr): self.instructions.append(jvm_instr) def _other(self, bytecode): self.add(ir.Other(bytecode=bytecode)) def u8(self, op): self._other(struct.pack('>B', op)) def u8u8(self, op, x): self._other(struct.pack('>BB', op, x)) def u8u16(self, op, x): self._other(struct.pack('>BH', op, x)) # wide non iinc def u8u8u16(self, op, op2, x): self._other(struct.pack('>BBH', op, op2, x)) # invokeinterface def u8u16u8u8(self, op, x, y, z): self._other(struct.pack('>BHBB', op, x, y, z)) def ldc(self, index): if index < 256: self.add(ir.OtherConstant(bytecode=struct.pack('>BB', LDC, index))) else: self.add(ir.OtherConstant(bytecode=struct.pack('>BH', LDC_W, index))) def load(self, reg, stype, desc=None, clsname=None): # if we know the register to be 0/null, don't bother loading if self.type_data.arrs[reg] == arrays.NULL: self.const(0, stype) else: self.add(ir.RegAccess(reg, stype, store=False)) # cast to appropriate type if tainted if stype == scalars.OBJ and self.type_data.tainted[reg]: assert(desc is None or clsname is None) if clsname is None: # remember to handle arrays - also fallthrough if desc is None clsname = desc[1:-1] if (desc and desc.startswith(b'L')) else desc if clsname is not None and clsname != b'java/lang/Object': self.u8u16(CHECKCAST, self.pool.class_(clsname)) def loadAsArray(self, reg): at = self.type_data.arrs[reg] if at == arrays.NULL: self.const_null() else: self.add(ir.RegAccess(reg, scalars.OBJ, store=False)) if self.type_data.tainted[reg]: if at == arrays.INVALID: # needs to be some type of object array, so just cast to Object[] self.u8u16(CHECKCAST, self.pool.class_(b'[Ljava/lang/Object;')) else: # note - will throw if actual type is boolean[] but there's not # much we can do in this case self.u8u16(CHECKCAST, self.pool.class_(at)) def store(self, reg, stype): self.add(ir.RegAccess(reg, stype, store=True)) def return_(self, stype=None): if stype is None: self.u8(RETURN) else: self.u8(IRETURN + _ilfdaOrd(stype)) def const(self, val, stype): assert((1<<64) > val >= 0) if stype == scalars.OBJ: assert(val == 0) self.const_null() else: # If constant pool is simple, assume we're in non-opt mode and only use # the constant pool for generating constants instead of calculating # bytecode sequences for them. If we're in opt mode, pass None for pool # to generate bytecode instead pool = None if self.delay_consts else self.pool self.add(ir.PrimConstant(stype, val, pool=pool)) def const_null(self): self.add(ir.OtherConstant(bytecode=bytes([ACONST_NULL]))) def fillarraysub(self, op, cbs, pop=True): gen = stack.genDups(len(cbs), 0 if pop else 1) for i, cb in enumerate(cbs): for bytecode in next(gen): self._other(bytecode) self.const(i, scalars.INT) cb() self.u8(op) # may need to pop at end for bytecode in next(gen): self._other(bytecode) def new(self, desc): self.u8u16(NEW, self.pool.class_(desc)) def newarray(self, desc): if desc in _newArrayCodes: self.u8u8(NEWARRAY, _newArrayCodes[desc]) else: # can be either multidim array or object array descriptor desc = desc[1:] if desc.startswith(b'L'): desc = desc[1:-1] self.u8u16(ANEWARRAY, self.pool.class_(desc)) def fillarraydata(self, op, stype, vals): self.fillarraysub(op, [partial(self.const, val, stype) for val in vals]) def cast(self, dex, reg, index): self.load(reg, scalars.OBJ) self.u8u16(CHECKCAST, self.pool.class_(dex.clsType(index))) self.store(reg, scalars.OBJ) def goto(self, target): self.add(ir.Goto(target)) def if_(self, op, target): self.add(ir.If(op, target)) def switch(self, default, jumps): jumps = {util.s32(k):v for k,v in jumps.items() if v != default} if jumps: self.add(ir.Switch(default, jumps)) else: self.goto(default) def addExceptLabels(self): if self.except_labels is None: s_ind = 0 e_ind = len(self.instructions) # assume only Other instructions can throw while s_ind < e_ind and not isinstance(self.instructions[s_ind], ir.Other): s_ind += 1 while s_ind < e_ind and not isinstance(self.instructions[e_ind-1], ir.Other): e_ind -= 1 assert(s_ind < e_ind) if s_ind < e_ind: self.except_labels = start_lbl, end_lbl = ir.Label(), ir.Label() self.instructions.insert(s_ind, start_lbl) self.instructions.insert(e_ind+1, end_lbl) return self.except_labels class IRWriter: def __init__(self, pool, method, types, opts): self.pool = pool self.method = method self.types = types self.opts = opts self.iblocks = {} self.flat_instructions = None self.excepts = [] self.labels = {} self.initial_args = None self.exception_redirects = {} self.except_starts = set() self.except_ends = set() self.jump_targets = set() # used to detect jump targets with a unique predecessor self.target_pred_counts = collections.defaultdict(int) self.numregs = None # will be set once registers are allocated (see registers.py) self.upper_bound = None # upper bound on code length def calcInitialArgs(self, nregs, scalar_ptypes): self.initial_args = args = [] regoff = nregs - len(scalar_ptypes) for i, st in enumerate(scalar_ptypes): if st == scalars.INVALID: args.append(None) else: args.append((i + regoff, st)) def addExceptionRedirect(self, target): return self.exception_redirects.setdefault(target, ir.Label()) def createBlock(self, instr): block = IRBlock(self, instr.pos) self.iblocks[block.pos] = block self.labels[block.pos] = block.instructions[0] return block def flatten(self): instructions = [] for pos in sorted(self.iblocks): if pos in self.exception_redirects: # check if we can put handler pop in front of block if instructions and not instructions[-1].fallsthrough(): instructions.append(self.exception_redirects.pop(pos)) instructions.append(ir.Other(bytecode=bytes([POP]))) # if not, leave it in dict to be redirected later # now add instructions for actual block instructions += self.iblocks[pos].instructions # exception handler pops that couldn't be placed inline # in this case, just put them at the end with a goto back to the handler for target in sorted(self.exception_redirects): instructions.append(self.exception_redirects[target]) instructions.append(ir.Other(bytecode=bytes([POP]))) instructions.append(ir.Goto(target)) self.flat_instructions = instructions self.iblocks = self.exception_redirects = None def replaceInstrs(self, replace): if replace: instructions = [] for instr in self.flat_instructions: instructions.extend(replace.get(instr, [instr])) self.flat_instructions = instructions assert(len(set(instructions)) == len(instructions)) def calcUpperBound(self): # Get an uppper bound on the size of the bytecode size = 0 for ins in self.flat_instructions: if ins.bytecode is None: size += ins.max else: size += len(ins.bytecode) self.upper_bound = size return size ################################################################################ def visitNop(method, dex, instr_d, type_data, block, instr): pass def visitMoveResult(method, dex, instr_d, type_data, block, instr): st = scalars.fromDesc(instr.prev_result) block.store(instr.args[0], st) def visitMove(method, dex, instr_d, type_data, block, instr): for st in (scalars.INT, scalars.OBJ, scalars.FLOAT): if st & type_data.prims[instr.args[1]]: block.load(instr.args[1], st) block.store(instr.args[0], st) def visitMoveWide(method, dex, instr_d, type_data, block, instr): for st in (scalars.LONG, scalars.DOUBLE): if st & type_data.prims[instr.args[1]]: block.load(instr.args[1], st) block.store(instr.args[0], st) def visitReturn(method, dex, instr_d, type_data, block, instr): if method.id.return_type == b'V': block.return_() else: st = scalars.fromDesc(method.id.return_type) block.load(instr.args[0], st, desc=method.id.return_type) block.return_(st) def visitConst32(method, dex, instr_d, type_data, block, instr): val = instr.args[1] % (1<<32) block.const(val, scalars.INT) block.store(instr.args[0], scalars.INT) block.const(val, scalars.FLOAT) block.store(instr.args[0], scalars.FLOAT) if not val: block.const(val, scalars.OBJ) block.store(instr.args[0], scalars.OBJ) def visitConst64(method, dex, instr_d, type_data, block, instr): val = instr.args[1] % (1<<64) block.const(val, scalars.LONG) block.store(instr.args[0], scalars.LONG) block.const(val, scalars.DOUBLE) block.store(instr.args[0], scalars.DOUBLE) def visitConstString(method, dex, instr_d, type_data, block, instr): val = dex.string(instr.args[1]) block.ldc(block.pool.string(val)) block.store(instr.args[0], scalars.OBJ) def visitConstClass(method, dex, instr_d, type_data, block, instr): # Could use dex.type here since the JVM doesn't care, but this is cleaner val = dex.clsType(instr.args[1]) block.ldc(block.pool.class_(val)) block.store(instr.args[0], scalars.OBJ) def visitMonitorEnter(method, dex, instr_d, type_data, block, instr): block.load(instr.args[0], scalars.OBJ) block.u8(MONITORENTER) def visitMonitorExit(method, dex, instr_d, type_data, block, instr): block.load(instr.args[0], scalars.OBJ) block.u8(MONITOREXIT) def visitCheckCast(method, dex, instr_d, type_data, block, instr): block.cast(dex, instr.args[0], instr.args[1]) def visitInstanceOf(method, dex, instr_d, type_data, block, instr): block.load(instr.args[1], scalars.OBJ) block.u8u16(INSTANCEOF, block.pool.class_(dex.clsType(instr.args[2]))) block.store(instr.args[0], scalars.INT) def visitArrayLen(method, dex, instr_d, type_data, block, instr): block.loadAsArray(instr.args[1]) block.u8(ARRAYLENGTH) block.store(instr.args[0], scalars.INT) def visitNewInstance(method, dex, instr_d, type_data, block, instr): block.new(dex.clsType(instr.args[1])) block.store(instr.args[0], scalars.OBJ) def visitNewArray(method, dex, instr_d, type_data, block, instr): block.load(instr.args[1], scalars.INT) block.newarray(dex.type(instr.args[2])) block.store(instr.args[0], scalars.OBJ) def visitFilledNewArray(method, dex, instr_d, type_data, block, instr): regs = instr.args[1] block.const(len(regs), scalars.INT) block.newarray(dex.type(instr.args[0])) st, elet = arrays.eletPair(arrays.fromDesc(dex.type(instr.args[0]))) op = _arrStoreOps.get(elet, AASTORE) cbs = [partial(block.load, reg, st) for reg in regs] # if not followed by move-result, don't leave it on the stack mustpop = not isinstance(instr_d.get(instr.pos2), dalvik.MoveResult) block.fillarraysub(op, cbs, pop=mustpop) def visitFillArrayData(method, dex, instr_d, type_data, block, instr): width, arrdata = instr_d[instr.args[1]].fillarrdata at = type_data.arrs[instr.args[0]] block.loadAsArray(instr.args[0]) if at is arrays.NULL: block.u8(ATHROW) else: if len(arrdata) == 0: # fill-array-data throws a NPE if array is null even when # there is 0 data, so we need to add an instruction that # throws a NPE in this case block.u8(ARRAYLENGTH) block.u8(POP) else: st, elet = arrays.eletPair(at) # check if we need to sign extend if elet == b'B': arrdata = [util.signExtend(x, 8) & 0xFFFFFFFF for x in arrdata] elif elet == b'S': arrdata = [util.signExtend(x, 16) & 0xFFFFFFFF for x in arrdata] block.fillarraydata(_arrStoreOps.get(elet, AASTORE), st, arrdata) def visitThrow(method, dex, instr_d, type_data, block, instr): block.load(instr.args[0], scalars.OBJ, clsname=b'java/lang/Throwable') block.u8(ATHROW) def visitGoto(method, dex, instr_d, type_data, block, instr): block.goto(instr.args[0]) def visitSwitch(method, dex, instr_d, type_data, block, instr): block.load(instr.args[0], scalars.INT) switchdata = instr_d[instr.args[1]].switchdata default = instr.pos2 jumps = {k:(offset + instr.pos) % (1<<32) for k, offset in switchdata.items()} block.switch(default, jumps) def visitCmp(method, dex, instr_d, type_data, block, instr): op = [FCMPL, FCMPG, DCMPL, DCMPG, LCMP][instr.opcode - 0x2d] st = [scalars.FLOAT, scalars.FLOAT, scalars.DOUBLE, scalars.DOUBLE, scalars.LONG][instr.opcode - 0x2d] block.load(instr.args[1], st) block.load(instr.args[2], st) block.u8(op) block.store(instr.args[0], scalars.INT) def visitIf(method, dex, instr_d, type_data, block, instr): st = type_data.prims[instr.args[0]] & type_data.prims[instr.args[1]] if st & scalars.INT: block.load(instr.args[0], scalars.INT) block.load(instr.args[1], scalars.INT) op = [IF_ICMPEQ, IF_ICMPNE, IF_ICMPLT, IF_ICMPGE, IF_ICMPGT, IF_ICMPLE][instr.opcode - 0x32] else: block.load(instr.args[0], scalars.OBJ) block.load(instr.args[1], scalars.OBJ) op = [IF_ACMPEQ, IF_ACMPNE][instr.opcode - 0x32] block.if_(op, instr.args[2]) def visitIfZ(method, dex, instr_d, type_data, block, instr): if type_data.prims[instr.args[0]] & scalars.INT: block.load(instr.args[0], scalars.INT) op = [IFEQ, IFNE, IFLT, IFGE, IFGT, IFLE][instr.opcode - 0x38] else: block.load(instr.args[0], scalars.OBJ) op = [IFNULL, IFNONNULL][instr.opcode - 0x38] block.if_(op, instr.args[1]) def visitArrayGet(method, dex, instr_d, type_data, block, instr): at = type_data.arrs[instr.args[1]] if at is arrays.NULL: block.const_null() block.u8(ATHROW) else: block.loadAsArray(instr.args[1]) block.load(instr.args[2], scalars.INT) st, elet = arrays.eletPair(at) block.u8(_arrLoadOps.get(elet, AALOAD)) block.store(instr.args[0], st) def visitArrayPut(method, dex, instr_d, type_data, block, instr): at = type_data.arrs[instr.args[1]] if at is arrays.NULL: block.const_null() block.u8(ATHROW) else: block.loadAsArray(instr.args[1]) block.load(instr.args[2], scalars.INT) st, elet = arrays.eletPair(at) block.load(instr.args[0], st) block.u8(_arrStoreOps.get(elet, AASTORE)) def visitInstanceGet(method, dex, instr_d, type_data, block, instr): field_id = dex.field_id(instr.args[2]) st = scalars.fromDesc(field_id.desc) block.load(instr.args[1], scalars.OBJ, clsname=field_id.cname) block.u8u16(GETFIELD, block.pool.field(field_id.triple())) block.store(instr.args[0], st) def visitInstancePut(method, dex, instr_d, type_data, block, instr): field_id = dex.field_id(instr.args[2]) st = scalars.fromDesc(field_id.desc) block.load(instr.args[1], scalars.OBJ, clsname=field_id.cname) block.load(instr.args[0], st, desc=field_id.desc) block.u8u16(PUTFIELD, block.pool.field(field_id.triple())) def visitStaticGet(method, dex, instr_d, type_data, block, instr): field_id = dex.field_id(instr.args[1]) st = scalars.fromDesc(field_id.desc) block.u8u16(GETSTATIC, block.pool.field(field_id.triple())) block.store(instr.args[0], st) def visitStaticPut(method, dex, instr_d, type_data, block, instr): field_id = dex.field_id(instr.args[1]) st = scalars.fromDesc(field_id.desc) block.load(instr.args[0], st, desc=field_id.desc) block.u8u16(PUTSTATIC, block.pool.field(field_id.triple())) def visitInvoke(method, dex, instr_d, type_data, block, instr): isstatic = isinstance(instr, dalvik.InvokeStatic) called_id = dex.method_id(instr.args[0]) sts = scalars.paramTypes(called_id, static=isstatic) descs = called_id.getSpacedParamTypes(isstatic=isstatic) assert(len(sts) == len(instr.args[1]) == len(descs)) for st, desc, reg in zip(sts, descs, instr.args[1]): if st != scalars.INVALID: # skip long/double tops block.load(reg, st, desc=desc) op = { dalvik.InvokeVirtual: INVOKEVIRTUAL, dalvik.InvokeSuper: INVOKESPECIAL, dalvik.InvokeDirect: INVOKESPECIAL, dalvik.InvokeStatic: INVOKESTATIC, dalvik.InvokeInterface: INVOKEINTERFACE, }[type(instr)] if isinstance(instr, dalvik.InvokeInterface): count = len(called_id.getSpacedParamTypes(False)) block.u8u16u8u8(op, block.pool.imethod(called_id.triple()), count, 0) else: block.u8u16(op, block.pool.method(called_id.triple())) # check if we need to pop result instead of leaving on stack if not isinstance(instr_d.get(instr.pos2), dalvik.MoveResult): if called_id.return_type != b'V': st = scalars.fromDesc(called_id.return_type) block.u8(POP2 if scalars.iswide(st) else POP) def visitUnaryOp(method, dex, instr_d, type_data, block, instr): op, srct, destt = mathops.UNARY[instr.opcode] block.load(instr.args[1], srct) # *not requires special handling since there's no direct Java equivalent. Instead we have to do x ^ -1 if op == IXOR: block.u8(ICONST_M1) elif op == LXOR: block.u8(ICONST_M1) block.u8(I2L) block.u8(op) block.store(instr.args[0], destt) def visitBinaryOp(method, dex, instr_d, type_data, block, instr): op, st, st2 = mathops.BINARY[instr.opcode] # index arguments as negative so it works for regular and 2addr forms block.load(instr.args[-2], st) block.load(instr.args[-1], st2) block.u8(op) block.store(instr.args[0], st) def visitBinaryOpConst(method, dex, instr_d, type_data, block, instr): op = mathops.BINARY_LIT[instr.opcode] if op == ISUB: # rsub block.const(instr.args[2] % (1<<32), scalars.INT) block.load(instr.args[1], scalars.INT) else: block.load(instr.args[1], scalars.INT) block.const(instr.args[2] % (1<<32), scalars.INT) block.u8(op) block.store(instr.args[0], scalars.INT) ################################################################################ VISIT_FUNCS = { dalvik.Nop: visitNop, dalvik.Move: visitMove, dalvik.MoveWide: visitMoveWide, dalvik.MoveResult: visitMoveResult, dalvik.Return: visitReturn, dalvik.Const32: visitConst32, dalvik.Const64: visitConst64, dalvik.ConstString: visitConstString, dalvik.ConstClass: visitConstClass, dalvik.MonitorEnter: visitMonitorEnter, dalvik.MonitorExit: visitMonitorExit, dalvik.CheckCast: visitCheckCast, dalvik.InstanceOf: visitInstanceOf, dalvik.ArrayLen: visitArrayLen, dalvik.NewInstance: visitNewInstance, dalvik.NewArray: visitNewArray, dalvik.FilledNewArray: visitFilledNewArray, dalvik.FillArrayData: visitFillArrayData, dalvik.Throw: visitThrow, dalvik.Goto: visitGoto, dalvik.Switch: visitSwitch, dalvik.Cmp: visitCmp, dalvik.If: visitIf, dalvik.IfZ: visitIfZ, dalvik.ArrayGet: visitArrayGet, dalvik.ArrayPut: visitArrayPut, dalvik.InstanceGet: visitInstanceGet, dalvik.InstancePut: visitInstancePut, dalvik.StaticGet: visitStaticGet, dalvik.StaticPut: visitStaticPut, dalvik.InvokeVirtual: visitInvoke, dalvik.InvokeSuper: visitInvoke, dalvik.InvokeDirect: visitInvoke, dalvik.InvokeStatic: visitInvoke, dalvik.InvokeInterface: visitInvoke, dalvik.UnaryOp: visitUnaryOp, dalvik.BinaryOp: visitBinaryOp, dalvik.BinaryOpConst: visitBinaryOpConst, } def writeBytecode(pool, method, opts): dex = method.dex code = method.code instr_d = {instr.pos: instr for instr in code.bytecode} types, all_handlers = typeinference.doInference(dex, method, code, code.bytecode, instr_d) scalar_ptypes = scalars.paramTypes(method.id, static=(method.access & flags.ACC_STATIC)) writer = IRWriter(pool, method, types, opts) writer.calcInitialArgs(code.nregs, scalar_ptypes) for instr in code.bytecode: if instr.pos not in types: # skip unreachable instructions continue type_data = types[instr.pos] block = writer.createBlock(instr) VISIT_FUNCS[type(instr)](method, dex, instr_d, type_data, block, instr) for instr in sorted(all_handlers, key=lambda instr: instr.pos): assert(all_handlers[instr]) if instr.pos not in types: # skip unreachable instructions continue start, end = writer.iblocks[instr.pos].addExceptLabels() writer.except_starts.add(start) writer.except_ends.add(end) for ctype, handler_pos in all_handlers[instr]: # If handler doesn't use the caught exception, we need to redirect to a pop instead if not isinstance(instr_d.get(handler_pos), dalvik.MoveResult): target = writer.addExceptionRedirect(handler_pos) else: target = writer.labels[handler_pos] writer.jump_targets.add(target) writer.target_pred_counts[target] += 1 # When catching Throwable, we can use the special index 0 instead, # potentially saving a constant pool entry or two jctype = 0 if ctype == b'java/lang/Throwable' else pool.class_(ctype) writer.excepts.append((start, end, target, jctype)) writer.flatten() # find jump targets (in addition to exception handler targets) for instr in writer.flat_instructions: for target in instr.targets(): label = writer.labels[target] writer.jump_targets.add(label) writer.target_pred_counts[label] += 1 return writer
ytoyama/yans_chainer_hackathon
refs/heads/master
tests/cupy_tests/math_tests/test_misc.py
11
import unittest import numpy from cupy import testing @testing.gpu class TestMisc(unittest.TestCase): _multiprocess_can_split_ = True @testing.for_all_dtypes() @testing.numpy_cupy_allclose(atol=1e-5) def check_unary(self, name, xp, dtype, no_bool=False): if no_bool and numpy.dtype(dtype).char == '?': return 0 a = testing.shaped_arange((2, 3), xp, dtype) return getattr(xp, name)(a) @testing.for_all_dtypes() @testing.numpy_cupy_allclose(atol=1e-5) def check_binary(self, name, xp, dtype, no_bool=False): if no_bool and numpy.dtype(dtype).char == '?': return 0 a = testing.shaped_arange((2, 3), xp, dtype) b = testing.shaped_reverse_arange((2, 3), xp, dtype) return getattr(xp, name)(a, b) @testing.for_dtypes(['?', 'b', 'h', 'i', 'q', 'e', 'f', 'd']) @testing.numpy_cupy_allclose(atol=1e-5) def check_unary_negative(self, name, xp, dtype, no_bool=False): if no_bool and numpy.dtype(dtype).char == '?': return 0 a = xp.array([-3, -2, -1, 1, 2, 3], dtype=dtype) return getattr(xp, name)(a) @testing.for_float_dtypes() @testing.numpy_cupy_array_equal() def check_binary_nan(self, name, xp, dtype): a = xp.array([-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, 2], dtype=dtype) b = xp.array([numpy.NAN, numpy.NAN, 1, 0, numpy.NAN, -1, -2], dtype=dtype) return getattr(xp, name)(a, b) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_clip1(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) return a.clip(3, 13) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_clip2(self, xp, dtype): a = testing.shaped_arange((2, 3, 4), xp, dtype) a_min = xp.array([3, 4, 5, 6], dtype=dtype) a_max = xp.array([[10], [9], [8]], dtype=dtype) return a.clip(a_min, a_max) def test_sqrt(self): self.check_unary('sqrt') def test_square(self): self.check_unary('square') def test_absolute(self): self.check_unary('absolute') def test_absolute_negative(self): self.check_unary_negative('absolute') def test_sign(self): self.check_unary('sign', no_bool=True) def test_sign_negative(self): self.check_unary_negative('sign', no_bool=True) def test_maximum(self): self.check_binary('maximum') def test_maximum_nan(self): self.check_binary_nan('maximum') def test_minimum(self): self.check_binary('minimum') def test_minimum_nan(self): self.check_binary_nan('minimum') def test_fmax(self): self.check_binary('fmax') def test_fmax_nan(self): self.check_binary_nan('fmax') def test_fmin(self): self.check_binary('fmin') def test_fmin_nan(self): self.check_binary_nan('fmin')
Piasy/proxy-searcher
refs/heads/master
site-packages/django/db/backends/oracle/creation.py
29
import sys import time from django.db.backends.creation import BaseDatabaseCreation TEST_DATABASE_PREFIX = 'test_' PASSWORD = 'Im_a_lumberjack' class DatabaseCreation(BaseDatabaseCreation): # This dictionary maps Field objects to their associated Oracle column # types, as strings. Column-type strings can contain format strings; they'll # be interpolated against the values of Field.__dict__ before being output. # If a column type is set to None, it won't be included in the output. # # Any format strings starting with "qn_" are quoted before being used in the # output (the "qn_" prefix is stripped before the lookup is performed. data_types = { 'AutoField': 'NUMBER(11)', 'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))', 'CharField': 'NVARCHAR2(%(max_length)s)', 'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)', 'DateField': 'DATE', 'DateTimeField': 'TIMESTAMP', 'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)', 'FileField': 'NVARCHAR2(%(max_length)s)', 'FilePathField': 'NVARCHAR2(%(max_length)s)', 'FloatField': 'DOUBLE PRECISION', 'IntegerField': 'NUMBER(11)', 'BigIntegerField': 'NUMBER(19)', 'IPAddressField': 'VARCHAR2(15)', 'GenericIPAddressField': 'VARCHAR2(39)', 'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))', 'OneToOneField': 'NUMBER(11)', 'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)', 'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)', 'SlugField': 'NVARCHAR2(%(max_length)s)', 'SmallIntegerField': 'NUMBER(11)', 'TextField': 'NCLOB', 'TimeField': 'TIMESTAMP', 'URLField': 'VARCHAR2(%(max_length)s)', } def __init__(self, connection): super(DatabaseCreation, self).__init__(connection) def _create_test_db(self, verbosity=1, autoclobber=False): TEST_NAME = self._test_database_name() TEST_USER = self._test_database_user() TEST_PASSWD = self._test_database_passwd() TEST_TBLSPACE = self._test_database_tblspace() TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp() parameters = { 'dbname': TEST_NAME, 'user': TEST_USER, 'password': TEST_PASSWD, 'tblspace': TEST_TBLSPACE, 'tblspace_temp': TEST_TBLSPACE_TMP, } cursor = self.connection.cursor() if self._test_database_create(): try: self._execute_test_db_creation(cursor, parameters, verbosity) except Exception, e: sys.stderr.write("Got an error creating the test database: %s\n" % e) if not autoclobber: confirm = raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME) if autoclobber or confirm == 'yes': try: if verbosity >= 1: print "Destroying old test database '%s'..." % self.connection.alias self._execute_test_db_destruction(cursor, parameters, verbosity) self._execute_test_db_creation(cursor, parameters, verbosity) except Exception, e: sys.stderr.write("Got an error recreating the test database: %s\n" % e) sys.exit(2) else: print "Tests cancelled." sys.exit(1) if self._test_user_create(): if verbosity >= 1: print "Creating test user..." try: self._create_test_user(cursor, parameters, verbosity) except Exception, e: sys.stderr.write("Got an error creating the test user: %s\n" % e) if not autoclobber: confirm = raw_input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER) if autoclobber or confirm == 'yes': try: if verbosity >= 1: print "Destroying old test user..." self._destroy_test_user(cursor, parameters, verbosity) if verbosity >= 1: print "Creating test user..." self._create_test_user(cursor, parameters, verbosity) except Exception, e: sys.stderr.write("Got an error recreating the test user: %s\n" % e) sys.exit(2) else: print "Tests cancelled." sys.exit(1) self.connection.settings_dict['SAVED_USER'] = self.connection.settings_dict['USER'] self.connection.settings_dict['SAVED_PASSWORD'] = self.connection.settings_dict['PASSWORD'] self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict['USER'] = TEST_USER self.connection.settings_dict['PASSWORD'] = TEST_PASSWD return self.connection.settings_dict['NAME'] def _destroy_test_db(self, test_database_name, verbosity=1): """ Destroy a test database, prompting the user for confirmation if the database already exists. Returns the name of the test database created. """ TEST_NAME = self._test_database_name() TEST_USER = self._test_database_user() TEST_PASSWD = self._test_database_passwd() TEST_TBLSPACE = self._test_database_tblspace() TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp() self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER'] self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] parameters = { 'dbname': TEST_NAME, 'user': TEST_USER, 'password': TEST_PASSWD, 'tblspace': TEST_TBLSPACE, 'tblspace_temp': TEST_TBLSPACE_TMP, } cursor = self.connection.cursor() time.sleep(1) # To avoid "database is being accessed by other users" errors. if self._test_user_create(): if verbosity >= 1: print 'Destroying test user...' self._destroy_test_user(cursor, parameters, verbosity) if self._test_database_create(): if verbosity >= 1: print 'Destroying test database tables...' self._execute_test_db_destruction(cursor, parameters, verbosity) self.connection.close() def _execute_test_db_creation(self, cursor, parameters, verbosity): if verbosity >= 2: print "_create_test_db(): dbname = %s" % parameters['dbname'] statements = [ """CREATE TABLESPACE %(tblspace)s DATAFILE '%(tblspace)s.dbf' SIZE 20M REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M """, """CREATE TEMPORARY TABLESPACE %(tblspace_temp)s TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M """, ] self._execute_statements(cursor, statements, parameters, verbosity) def _create_test_user(self, cursor, parameters, verbosity): if verbosity >= 2: print "_create_test_user(): username = %s" % parameters['user'] statements = [ """CREATE USER %(user)s IDENTIFIED BY %(password)s DEFAULT TABLESPACE %(tblspace)s TEMPORARY TABLESPACE %(tblspace_temp)s QUOTA UNLIMITED ON %(tblspace)s """, """GRANT CONNECT, RESOURCE TO %(user)s""", ] self._execute_statements(cursor, statements, parameters, verbosity) def _execute_test_db_destruction(self, cursor, parameters, verbosity): if verbosity >= 2: print "_execute_test_db_destruction(): dbname=%s" % parameters['dbname'] statements = [ 'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS', 'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS', ] self._execute_statements(cursor, statements, parameters, verbosity) def _destroy_test_user(self, cursor, parameters, verbosity): if verbosity >= 2: print "_destroy_test_user(): user=%s" % parameters['user'] print "Be patient. This can take some time..." statements = [ 'DROP USER %(user)s CASCADE', ] self._execute_statements(cursor, statements, parameters, verbosity) def _execute_statements(self, cursor, statements, parameters, verbosity): for template in statements: stmt = template % parameters if verbosity >= 2: print stmt try: cursor.execute(stmt) except Exception, err: sys.stderr.write("Failed (%s)\n" % (err)) raise def _test_database_name(self): name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] try: if self.connection.settings_dict['TEST_NAME']: name = self.connection.settings_dict['TEST_NAME'] except AttributeError: pass return name def _test_database_create(self): return self.connection.settings_dict.get('TEST_CREATE', True) def _test_user_create(self): return self.connection.settings_dict.get('TEST_USER_CREATE', True) def _test_database_user(self): name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER'] try: if self.connection.settings_dict['TEST_USER']: name = self.connection.settings_dict['TEST_USER'] except KeyError: pass return name def _test_database_passwd(self): name = PASSWORD try: if self.connection.settings_dict['TEST_PASSWD']: name = self.connection.settings_dict['TEST_PASSWD'] except KeyError: pass return name def _test_database_tblspace(self): name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] try: if self.connection.settings_dict['TEST_TBLSPACE']: name = self.connection.settings_dict['TEST_TBLSPACE'] except KeyError: pass return name def _test_database_tblspace_tmp(self): name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp' try: if self.connection.settings_dict['TEST_TBLSPACE_TMP']: name = self.connection.settings_dict['TEST_TBLSPACE_TMP'] except KeyError: pass return name def _get_test_db_name(self): """ We need to return the 'production' DB name to get the test DB creation machinery to work. This isn't a great deal in this case because DB names as handled by Django haven't real counterparts in Oracle. """ return self.connection.settings_dict['NAME'] def test_db_signature(self): settings_dict = self.connection.settings_dict return ( settings_dict['HOST'], settings_dict['PORT'], settings_dict['ENGINE'], settings_dict['NAME'], self._test_database_user(), ) def set_autocommit(self): self.connection.connection.autocommit = True
CCS-Lab/hBayesDM
refs/heads/master
Python/hbayesdm/models/_bandit4arm_lapse.py
1
from typing import Sequence, Union, Any from collections import OrderedDict from numpy import Inf, exp import pandas as pd from hbayesdm.base import TaskModel from hbayesdm.preprocess_funcs import bandit4arm_preprocess_func __all__ = ['bandit4arm_lapse'] class Bandit4ArmLapse(TaskModel): def __init__(self, **kwargs): super().__init__( task_name='bandit4arm', model_name='lapse', model_type='', data_columns=( 'subjID', 'choice', 'gain', 'loss', ), parameters=OrderedDict([ ('Arew', (0, 0.1, 1)), ('Apun', (0, 0.1, 1)), ('R', (0, 1, 30)), ('P', (0, 1, 30)), ('xi', (0, 0.1, 1)), ]), regressors=OrderedDict([ ]), postpreds=['y_pred'], parameters_desc=OrderedDict([ ('Arew', 'reward learning rate'), ('Apun', 'punishment learning rate'), ('R', 'reward sensitivity'), ('P', 'punishment sensitivity'), ('xi', 'noise'), ]), additional_args_desc=OrderedDict([ ]), **kwargs, ) _preprocess_func = bandit4arm_preprocess_func def bandit4arm_lapse( data: Union[pd.DataFrame, str, None] = None, niter: int = 4000, nwarmup: int = 1000, nchain: int = 4, ncore: int = 1, nthin: int = 1, inits: Union[str, Sequence[float]] = 'vb', ind_pars: str = 'mean', model_regressor: bool = False, vb: bool = False, inc_postpred: bool = False, adapt_delta: float = 0.95, stepsize: float = 1, max_treedepth: int = 10, **additional_args: Any) -> TaskModel: """4-Armed Bandit Task - 5 Parameter Model, without C (choice perseveration) but with xi (noise) Hierarchical Bayesian Modeling of the 4-Armed Bandit Task using 5 Parameter Model, without C (choice perseveration) but with xi (noise) [Seymour2012]_ with the following parameters: "Arew" (reward learning rate), "Apun" (punishment learning rate), "R" (reward sensitivity), "P" (punishment sensitivity), "xi" (noise). .. [Seymour2012] Seymour, Daw, Roiser, Dayan, & Dolan (2012). Serotonin Selectively Modulates Reward Value in Human Decision-Making. J Neuro, 32(17), 5833-5842. User data should contain the behavioral data-set of all subjects of interest for the current analysis. When loading from a file, the datafile should be a **tab-delimited** text file, whose rows represent trial-by-trial observations and columns represent variables. For the 4-Armed Bandit Task, there should be 4 columns of data with the labels "subjID", "choice", "gain", "loss". It is not necessary for the columns to be in this particular order; however, it is necessary that they be labeled correctly and contain the information below: - "subjID": A unique identifier for each subject in the data-set. - "choice": Integer value representing the option chosen on the given trial: 1, 2, 3, or 4. - "gain": Floating point value representing the amount of currency won on the given trial (e.g. 50, 100). - "loss": Floating point value representing the amount of currency lost on the given trial (e.g. 0, -50). .. note:: User data may contain other columns of data (e.g. ``ReactionTime``, ``trial_number``, etc.), but only the data within the column names listed above will be used during the modeling. As long as the necessary columns mentioned above are present and labeled correctly, there is no need to remove other miscellaneous data columns. .. note:: ``adapt_delta``, ``stepsize``, and ``max_treedepth`` are advanced options that give the user more control over Stan's MCMC sampler. It is recommended that only advanced users change the default values, as alterations can profoundly change the sampler's behavior. See [Hoffman2014]_ for more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC Algorithm Parameters' of the `Stan User's Guide and Reference Manual`__. .. [Hoffman2014] Hoffman, M. D., & Gelman, A. (2014). The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. Journal of Machine Learning Research, 15(1), 1593-1623. __ https://mc-stan.org/users/documentation/ Parameters ---------- data Data to be modeled. It should be given as a Pandas DataFrame object, a filepath for a data file, or ``"example"`` for example data. Data columns should be labeled as: "subjID", "choice", "gain", "loss". niter Number of iterations, including warm-up. Defaults to 4000. nwarmup Number of iterations used for warm-up only. Defaults to 1000. ``nwarmup`` is a numerical value that specifies how many MCMC samples should not be stored upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e., where the sampling chains begin) can have a heavy influence on the generated posterior distributions. The ``nwarmup`` argument can be set to a higher number in order to curb the effects that initial values have on the resulting posteriors. nchain Number of Markov chains to run. Defaults to 4. ``nchain`` is a numerical value that specifies how many chains (i.e., independent sampling sequences) should be used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling process, it is good practice to run multiple chains to ensure that a reasonably representative posterior is attained. When the sampling is complete, it is possible to check the multiple chains for convergence by running the following line of code: .. code:: python output.plot(type='trace') ncore Number of CPUs to be used for running. Defaults to 1. nthin Every ``nthin``-th sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. ``nthin`` is a numerical value that specifies the "skipping" behavior of the MCMC sampler. That is, only every ``nthin``-th sample is used to generate posterior distributions. By default, ``nthin`` is equal to 1, meaning that every sample is used to generate the posterior. inits String or list specifying how the initial values should be generated. Options are ``'fixed'`` or ``'random'``, or your own initial values. ind_pars String specifying how to summarize the individual parameters. Current options are: ``'mean'``, ``'median'``, or ``'mode'``. model_regressor Whether to export model-based regressors. Currently not available for this model. vb Whether to use variational inference to approximately draw from a posterior distribution. Defaults to ``False``. inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to ``False``. adapt_delta Floating point value representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See note below. stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See note below. max_treedepth Integer value specifying how many leapfrog steps the MCMC sampler can take on each new iteration. See note below. **additional_args Not used for this model. Returns ------- model_data An ``hbayesdm.TaskModel`` instance with the following components: - ``model``: String value that is the name of the model ('bandit4arm_lapse'). - ``all_ind_pars``: Pandas DataFrame containing the summarized parameter values (as specified by ``ind_pars``) for each subject. - ``par_vals``: OrderedDict holding the posterior samples over different parameters. - ``fit``: A PyStan StanFit object that contains the fitted Stan model. - ``raw_data``: Pandas DataFrame containing the raw data used to fit the model, as specified by the user. Examples -------- .. code:: python from hbayesdm import rhat, print_fit from hbayesdm.models import bandit4arm_lapse # Run the model and store results in "output" output = bandit4arm_lapse(data='example', niter=2000, nwarmup=1000, nchain=4, ncore=4) # Visually check convergence of the sampling chains (should look like "hairy caterpillars") output.plot(type='trace') # Plot posterior distributions of the hyper-parameters (distributions should be unimodal) output.plot() # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output, less=1.1) # Show the LOOIC and WAIC model fit estimates print_fit(output) """ return Bandit4ArmLapse( data=data, niter=niter, nwarmup=nwarmup, nchain=nchain, ncore=ncore, nthin=nthin, inits=inits, ind_pars=ind_pars, model_regressor=model_regressor, vb=vb, inc_postpred=inc_postpred, adapt_delta=adapt_delta, stepsize=stepsize, max_treedepth=max_treedepth, **additional_args)
dsajkl/123
refs/heads/master
common/djangoapps/cache_toolbox/relation.py
239
""" Caching instances via ``related_name`` -------------------------------------- ``cache_relation`` adds utility methods to a model to obtain ``related_name`` instances via the cache. Usage ~~~~~ :: from django.db import models from django.contrib.auth.models import User class Foo(models.Model): user = models.OneToOneField( User, primary_key=True, related_name='foo', ) name = models.CharField(max_length=20) cache_relation(User.foo) :: >>> user = User.objects.get(pk=1) >>> user.foo_cache # Cache miss - hits the database <Foo: > >>> user = User.objects.get(pk=1) >>> user.foo_cache # Cache hit - no database access <Foo: > >>> user = User.objects.get(pk=2) >>> user.foo # Regular lookup - hits the database <Foo: > >>> user.foo_cache # Special-case: Will not hit cache or database. <Foo: > Accessing ``user_instance.foo_cache`` (note the "_cache" suffix) will now obtain the related ``Foo`` instance via the cache. Accessing the original ``user_instance.foo`` attribute will perform the lookup as normal. Invalidation ~~~~~~~~~~~~ Upon saving (or deleting) the instance, the cache is cleared. For example:: >>> user = User.objects.get(pk=1) >>> foo = user.foo_cache # (Assume cache hit from previous session) >>> foo.name = "New name" >>> foo.save() # Cache is cleared on save >>> user = User.objects.get(pk=1) >>> user.foo_cache # Cache miss. <Foo: > Manual invalidation may also be performed using the following methods:: >>> user_instance.foo_cache_clear() >>> User.foo_cache_clear_fk(user_instance_pk) Manual invalidation is required if you use ``.update()`` methods which the ``post_save`` and ``post_delete`` hooks cannot intercept. Support ~~~~~~~ ``cache_relation`` currently only works with ``OneToOneField`` fields. Support for regular ``ForeignKey`` fields is planned. """ from django.db.models.signals import post_save, post_delete from .core import get_instance, delete_instance def cache_relation(descriptor, timeout=None): rel = descriptor.related related_name = '%s_cache' % rel.field.related_query_name() @property def get(self): # Always use the cached "real" instance if available try: return getattr(self, descriptor.cache_name) except AttributeError: pass # Lookup cached instance try: return getattr(self, '_%s_cache' % related_name) except AttributeError: pass # import logging # log = logging.getLogger("tracking") # log.info( "DEBUG: "+str(str(rel.model)+"/"+str(self.pk) )) instance = get_instance(rel.model, self.pk, timeout) setattr(self, '_%s_cache' % related_name, instance) return instance setattr(rel.parent_model, related_name, get) # Clearing cache def clear(self): delete_instance(rel.model, self) @classmethod def clear_pk(cls, *instances_or_pk): delete_instance(rel.model, *instances_or_pk) def clear_cache(sender, instance, *args, **kwargs): delete_instance(rel.model, instance) setattr(rel.parent_model, '%s_clear' % related_name, clear) setattr(rel.parent_model, '%s_clear_pk' % related_name, clear_pk) post_save.connect(clear_cache, sender=rel.model, weak=False) post_delete.connect(clear_cache, sender=rel.model, weak=False)
angstwad/ansible
refs/heads/devel
test/units/plugins/test_plugins.py
104
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ansible.compat.tests import unittest from ansible.compat.tests import BUILTINS from ansible.compat.tests.mock import mock_open, patch, MagicMock from ansible.plugins import MODULE_CACHE, PATH_CACHE, PLUGIN_PATH_CACHE, PluginLoader class TestErrors(unittest.TestCase): def setUp(self): pass def tearDown(self): pass @patch.object(PluginLoader, '_get_paths') def test_print_paths(self, mock_method): mock_method.return_value = ['/path/one', '/path/two', '/path/three'] pl = PluginLoader('foo', 'foo', '', 'test_plugins') paths = pl.print_paths() expected_paths = os.pathsep.join(['/path/one', '/path/two', '/path/three']) self.assertEqual(paths, expected_paths) def test_plugins__get_package_paths_no_package(self): pl = PluginLoader('test', '', 'test', 'test_plugin') self.assertEqual(pl._get_package_paths(), []) def test_plugins__get_package_paths_with_package(self): # the _get_package_paths() call uses __import__ to load a # python library, and then uses the __file__ attribute of # the result for that to get the library path, so we mock # that here and patch the builtin to use our mocked result foo = MagicMock() bar = MagicMock() bam = MagicMock() bam.__file__ = '/path/to/my/foo/bar/bam/__init__.py' bar.bam = bam foo.return_value.bar = bar pl = PluginLoader('test', 'foo.bar.bam', 'test', 'test_plugin') with patch('{0}.__import__'.format(BUILTINS), foo): self.assertEqual(pl._get_package_paths(), ['/path/to/my/foo/bar/bam']) def test_plugins__get_paths(self): pl = PluginLoader('test', '', 'test', 'test_plugin') pl._paths = ['/path/one', '/path/two'] self.assertEqual(pl._get_paths(), ['/path/one', '/path/two']) # NOT YET WORKING #def fake_glob(path): # if path == 'test/*': # return ['test/foo', 'test/bar', 'test/bam'] # elif path == 'test/*/*' #m._paths = None #mock_glob = MagicMock() #mock_glob.return_value = [] #with patch('glob.glob', mock_glob): # pass def assertPluginLoaderConfigBecomes(self, arg, expected): pl = PluginLoader('test', '', arg, 'test_plugin') self.assertEqual(pl.config, expected) def test_plugin__init_config_list(self): config = ['/one', '/two'] self.assertPluginLoaderConfigBecomes(config, config) def test_plugin__init_config_str(self): self.assertPluginLoaderConfigBecomes('test', ['test']) def test_plugin__init_config_none(self): self.assertPluginLoaderConfigBecomes(None, [])
chagn/chagn.github.com
refs/heads/master
node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/styles/trac.py
364
# -*- coding: utf-8 -*- """ pygments.styles.trac ~~~~~~~~~~~~~~~~~~~~ Port of the default trac highlighter design. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace class TracStyle(Style): """ Port of the default trac highlighter design. """ default_style = '' styles = { Whitespace: '#bbbbbb', Comment: 'italic #999988', Comment.Preproc: 'bold noitalic #999999', Comment.Special: 'bold #999999', Operator: 'bold', String: '#bb8844', String.Regex: '#808000', Number: '#009999', Keyword: 'bold', Keyword.Type: '#445588', Name.Builtin: '#999999', Name.Function: 'bold #990000', Name.Class: 'bold #445588', Name.Exception: 'bold #990000', Name.Namespace: '#555555', Name.Variable: '#008080', Name.Constant: '#008080', Name.Tag: '#000080', Name.Attribute: '#008080', Name.Entity: '#800080', Generic.Heading: '#999999', Generic.Subheading: '#aaaaaa', Generic.Deleted: 'bg:#ffdddd #000000', Generic.Inserted: 'bg:#ddffdd #000000', Generic.Error: '#aa0000', Generic.Emph: 'italic', Generic.Strong: 'bold', Generic.Prompt: '#555555', Generic.Output: '#888888', Generic.Traceback: '#aa0000', Error: 'bg:#e3d2d2 #a61717' }
donovanmuller/FrameworkBenchmarks
refs/heads/master
frameworks/Python/AsyncIO/yocto_http/hello/servers/__init__.py
79
from . import yocto_http
h4ck3rm1k3/pywikibot-core
refs/heads/master
scripts/archive/featured.py
4
#!/usr/bin/python # -*- coding: utf-8 -*- """ Manage featured/good article/list status template. This script understands various command-line arguments: Task commands: -featured use this script for featured articles. Default task if no task command is specified -good use this script for good articles. -lists use this script for featured lists. -former use this script for removing {{Link FA|xx}} from former fearured articles NOTE: you may have all of these commands in one run Option commands: -interactive: ask before changing each page -nocache doesn't include cache files file to remember if the article already was verified. -nocache:xx,yy you may ignore language codes xx,yy,... from cache file -fromlang:xx,yy xx,yy,zz,.. are the languages to be verified. -fromlang:ar--fi Another possible with range the languages -fromall to verify all languages. -tolang:xx,yy xx,yy,zz,.. are the languages to be updated -after:zzzz process pages after and including page zzzz (sorry, not implemented yet) -side use -side if you want to move all {{Link FA|lang}} next to the corresponding interwiki links. Default is placing {{Link FA|lang}} on top of the interwiki links. (This option is deprecated with wikidata) -count Only counts how many featured/good articles exist on all wikis (given with the "-fromlang" argument) or on several language(s) (when using the "-fromall" argument). Example: python pwb.py featured -fromlang:en,he -count counts how many featured articles exist in the en and he wikipedias. -quiet no corresponding pages are displayed. """ # # (C) Maxim Razin, 2005 # (C) Leonardo Gregianin, 2005-2008 # (C) xqt, 2009-2014 # (C) Pywikibot team, 2005-2015 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, unicode_literals __version__ = '$Id$' # import pickle import re import sys import pywikibot from pywikibot import i18n, textlib, config from pywikibot.pagegenerators import PreloadingGenerator from pywikibot.tools.formatter import color_format from pywikibot.tools import issue_deprecation_warning if sys.version_info[0] > 2: unichr = chr def CAT(site, name, hide): name = site.namespace(14) + ':' + name cat = pywikibot.Category(site, name) for article in cat.articles(endsort=hide): yield article if hide: for article in cat.articles(startFrom=unichr(ord(hide) + 1)): yield article def BACK(site, name, hide): # pylint: disable=unused-argument p = pywikibot.Page(site, name, ns=10) return [page for page in p.getReferences(follow_redirects=False, onlyTemplateInclusion=True)] def DATA(site, name, hide): dp = pywikibot.ItemPage(site.data_repository(), name) try: title = dp.getSitelink(site) except pywikibot.NoPage: return cat = pywikibot.Category(site, title) if isinstance(hide, dict): hide = hide.get(site.code) for article in cat.articles(endsort=hide): yield article if hide: for article in cat.articles(startsort=unichr(ord(hide) + 1)): yield article # not implemented yet def TMPL(site, name, hide): # pylint: disable=unused-argument return # ALL wikis use 'Link FA', and sometimes other localized templates. # We use _default AND the localized ones template = { '_default': ['Link FA'], 'als': ['LinkFA'], 'an': ['Destacato', 'Destacau'], 'ar': [u'وصلة مقالة مختارة'], 'ast': ['Enllaz AD'], 'az': ['Link FM'], 'br': ['Liamm PuB', 'Lien AdQ'], 'ca': [u'Enllaç AD', 'Destacat'], 'cy': ['Cyswllt erthygl ddethol', 'Dolen ED'], 'eo': ['LigoElstara'], 'en': ['Link FA', 'FA link'], 'es': ['Destacado'], 'eu': ['NA lotura'], 'fr': ['Lien AdQ'], 'fur': ['Leam VdC'], 'ga': ['Nasc AR'], 'gl': [u'Ligazón AD', 'Destacado'], 'hi': ['Link FA', 'Lien AdQ'], 'is': [u'Tengill ÚG'], 'it': ['Link V', 'Link AdQ'], 'no': ['Link UA'], 'oc': ['Ligam AdQ', 'Lien AdQ'], 'ro': [u'Legătură AC', u'Legătură AF'], 'sv': ['UA', 'Link UA'], 'tr': ['Link SM'], 'vi': [u'Liên kết chọn lọc'], 'vo': [u'Yüm YG'], 'yi': [u'רא'], } template_good = { '_default': ['Link GA'], 'ar': [u'وصلة مقالة جيدة'], 'ca': [u'Enllaç AB', 'Lien BA', 'Abo'], 'da': ['Link GA', 'Link AA'], 'eo': ['LigoLeginda'], 'es': ['Bueno'], 'fr': ['Lien BA'], 'gl': [u'Ligazón AB'], 'is': ['Tengill GG'], 'it': ['Link VdQ'], 'nn': ['Link AA'], 'no': ['Link AA'], 'pt': ['Bom interwiki'], # 'tr': ['Link GA', 'Link KM'], 'vi': [u'Liên kết bài chất lượng tốt'], 'wo': ['Lien BA'], } template_lists = { '_default': ['Link FL'], 'no': ['Link GL'], } featured_name = { 'wikidata': (DATA, u'Q4387444'), } good_name = { 'wikidata': (DATA, 'Q7045856'), } lists_name = { 'wikidata': (TMPL, 'Q5857568'), 'ar': (BACK, u'قائمة مختارة'), 'da': (BACK, u'FremragendeListe'), 'de': (BACK, u'Informativ'), 'en': (BACK, u'Featured list'), 'fa': (BACK, u"فهرست برگزیده"), 'id': (BACK, u'Featured list'), 'ja': (BACK, u'Featured List'), 'ksh': (CAT, 'Joode Leß'), 'no': (BACK, u'God liste'), 'pl': (BACK, u'Medalista'), 'pt': (BACK, u'Anexo destacado'), 'ro': (BACK, u'Listă de calitate'), 'ru': (BACK, u'Избранный список или портал'), 'tr': (BACK, u'Seçkin liste'), 'uk': (BACK, u'Вибраний список'), 'vi': (BACK, u'Sao danh sách chọn lọc'), 'zh': (BACK, u'特色列表'), } # Third parameter is the sort key indicating articles to hide from the given # list former_name = { 'wikidata': (DATA, 'Q7045853', {'en': '#'}) } class FeaturedBot(pywikibot.Bot): """Featured article bot.""" # Bot configuration. # Only the keys of the dict can be passed as init options # The values are the default values def __init__(self, **kwargs): """Only accepts options defined in availableOptions.""" self.availableOptions.update({ 'async': False, # True for asynchronously putting a page 'afterpage': u"!", 'count': False, # featuredcount 'featured': False, 'former': False, 'fromall': False, 'fromlang': None, 'good': False, 'lists': False, 'nocache': list(), 'side': False, # not template_on_top 'quiet': False, 'interactive': False, }) super(FeaturedBot, self).__init__(**kwargs) self.cache = dict() self.filename = None self.site = pywikibot.Site() self.repo = self.site.data_repository() # if no source site is given, give up if self.getOption('fromlang') is True: self.options['fromlang'] = False # setup tasks running self.tasks = [] for task in ('featured', 'good', 'lists', 'former'): if self.getOption(task): self.tasks.append(task) if not self.tasks: self.tasks = ['featured'] def itersites(self, task): """Generator for site codes to be processed.""" def _generator(): if task == 'good': item_no = good_name['wikidata'][1] elif task == 'featured': item_no = featured_name['wikidata'][1] elif task == 'former': item_no = former_name['wikidata'][1] dp = pywikibot.ItemPage(self.repo, item_no) dp.get() for key in sorted(dp.sitelinks.keys()): try: site = self.site.fromDBName(key) except pywikibot.SiteDefinitionError: pywikibot.output('"%s" is not a valid site. Skipping...' % key) else: if site.family == self.site.family: yield site generator = _generator() if self.getOption('fromall'): return generator elif self.getOption('fromlang'): fromlang = self.getOption('fromlang') if len(fromlang) == 1 and fromlang[0].find("--") >= 0: start, end = fromlang[0].split("--", 1) if not start: start = "" if not end: end = "zzzzzzz" return (site for site in generator if site.code >= start and site.code <= end) else: return (site for site in generator if site.code in fromlang) else: pywikibot.warning(u'No sites given to verify %s articles.\n' u'Please use -fromlang: or fromall option\n' % task) return () def hastemplate(self, task): add_tl, remove_tl = self.getTemplateList(self.site.code, task) for i, tl in enumerate(add_tl): tp = pywikibot.Page(self.site, tl, ns=10) if tp.exists(): return True else: pywikibot.output(tl + ' does not exist') # The first item is the default template to be added. # It must exist. Otherwise the script must not run. if i == 0: return else: return def readcache(self, task): if self.getOption('count') or self.getOption('nocache') is True: return self.filename = pywikibot.config.datafilepath("cache", task) try: f = open(self.filename, "rb") self.cache = pickle.load(f) f.close() pywikibot.output(u'Cache file %s found with %d items.' % (self.filename, len(self.cache))) except IOError: pywikibot.output(u'Cache file %s not found.' % self.filename) def writecache(self): if self.getOption('count'): return if not self.getOption('nocache') is True: pywikibot.output(u'Writing %d items to cache file %s.' % (len(self.cache), self.filename)) with open(self.filename, "wb") as f: pickle.dump(self.cache, f, protocol=config.pickle_protocol) self.cache = dict() def run(self): for task in self.tasks: self.run_task(task) pywikibot.output(u'%d pages written.' % self._save_counter) def run_task(self, task): if not self.hastemplate(task): pywikibot.output(u'\nNOTE: %s articles are not implemented at %s.' % (task, self.site)) return self.readcache(task) for site in self.itersites(task): try: self.treat(site, task) except KeyboardInterrupt: pywikibot.output('\nQuitting %s treat...' % task) break self.writecache() def treat(self, fromsite, task): if fromsite != self.site: self.featuredWithInterwiki(fromsite, task) def featuredArticles(self, site, task, cache): articles = [] info = globals()[task + '_name'] if task == 'lists': code = site.code else: code = 'wikidata' try: method = info[code][0] except KeyError: pywikibot.error( u'language %s doesn\'t has %s category source.' % (code, task)) return name = info[code][1] # hide #-sorted items on en-wiki try: hide = info[code][2] except IndexError: hide = None for p in method(site, name, hide): if p.namespace() == 0: # Article articles.append(p) # Article talk (like in English) elif p.namespace() == 1 and site.code != 'el': articles.append(pywikibot.Page(p.site, p.title(withNamespace=False))) pywikibot.output(color_format( '{lightred}** {0} has {1} {2} articles{default}', site, len(articles), task)) while articles: p = articles.pop(0) if p.title() < self.getOption('afterpage'): continue if u"/" in p.title() and p.namespace() != 0: pywikibot.output(u"%s is a subpage" % p.title()) continue if p.title() in cache: pywikibot.output(u"(cached) %s -> %s" % (p.title(), cache[p.title()])) continue yield p def findTranslated(self, page, oursite=None): quiet = self.getOption('quiet') if not oursite: oursite = self.site if page.isRedirectPage(): page = page.getRedirectTarget() ourpage = None for link in page.iterlanglinks(): if link.site == oursite: ourpage = pywikibot.Page(link) break if not ourpage: if not quiet: pywikibot.output(u"%s -> no corresponding page in %s" % (page.title(), oursite)) elif ourpage.section(): pywikibot.output(u"%s -> our page is a section link: %s" % (page.title(), ourpage.title())) elif not ourpage.exists(): pywikibot.output(u"%s -> our page doesn't exist: %s" % (page.title(), ourpage.title())) else: if ourpage.isRedirectPage(): ourpage = ourpage.getRedirectTarget() pywikibot.output(u"%s -> corresponding page is %s" % (page.title(), ourpage.title())) if ourpage.namespace() != 0: pywikibot.output(u"%s -> not in the main namespace, skipping" % page.title()) elif ourpage.isRedirectPage(): pywikibot.output(u"%s -> double redirect, skipping" % page.title()) elif not ourpage.exists(): pywikibot.output(u"%s -> page doesn't exist, skipping" % ourpage.title()) else: backpage = None for link in ourpage.iterlanglinks(): if link.site == page.site: backpage = pywikibot.Page(link) break if not backpage: pywikibot.output(u"%s -> no back interwiki ref" % page.title()) elif backpage == page: # everything is ok yield ourpage elif backpage.isRedirectPage(): backpage = backpage.getRedirectTarget() if backpage == page: # everything is ok yield ourpage else: pywikibot.output( u"%s -> back interwiki ref target is redirect to %s" % (page.title(), backpage.title())) else: pywikibot.output(u"%s -> back interwiki ref target is %s" % (page.title(), backpage.title())) def getTemplateList(self, code, task): add_templates = [] remove_templates = [] if task == 'featured': try: add_templates = template[code] add_templates += template['_default'] except KeyError: add_templates = template['_default'] try: remove_templates = template_good[code] remove_templates += template_good['_default'] except KeyError: remove_templates = template_good['_default'] elif task == 'good': try: add_templates = template_good[code] add_templates += template_good['_default'] except KeyError: add_templates = template_good['_default'] try: remove_templates = template[code] remove_templates += template['_default'] except KeyError: remove_templates = template['_default'] elif task == 'lists': try: add_templates = template_lists[code] add_templates += template_lists['_default'] except KeyError: add_templates = template_lists['_default'] else: # task == 'former' try: remove_templates = template[code] remove_templates += template['_default'] except KeyError: remove_templates = template['_default'] return add_templates, remove_templates def featuredWithInterwiki(self, fromsite, task): """Read featured articles and find the corresponding pages. Find corresponding pages on other sites, place the template and remember the page in the cache dict. """ tosite = self.site if fromsite.code not in self.cache: self.cache[fromsite.code] = {} if tosite.code not in self.cache[fromsite.code]: self.cache[fromsite.code][tosite.code] = {} cc = self.cache[fromsite.code][tosite.code] if self.getOption('nocache') is True or \ fromsite.code in self.getOption('nocache'): cc = {} gen = self.featuredArticles(fromsite, task, cc) if self.getOption('count'): next(gen, None) return # count only, we are ready here gen = PreloadingGenerator(gen) for source in gen: if source.isRedirectPage(): source = source.getRedirectTarget() if not source.exists(): pywikibot.output(u"source page doesn't exist: %s" % source) continue for dest in self.findTranslated(source, tosite): self.add_template(source, dest, task, fromsite) cc[source.title()] = dest.title() def add_template(self, source, dest, task, fromsite): """Place or remove the Link_GA/FA template on/from a page.""" def compile_link(site, templates): """Compile one link template list.""" findtemplate = '(%s)' % '|'.join(templates) return re.compile(r"\{\{%s\|%s\}\}" % (findtemplate.replace(u' ', u'[ _]'), site.code), re.IGNORECASE) tosite = dest.site add_tl, remove_tl = self.getTemplateList(tosite.code, task) re_Link_add = compile_link(fromsite, add_tl) re_Link_remove = compile_link(fromsite, remove_tl) text = dest.text m1 = add_tl and re_Link_add.search(text) m2 = remove_tl and re_Link_remove.search(text) changed = False interactive = self.getOption('interactive') if add_tl: if m1: pywikibot.output(u"(already added)") else: # insert just before interwiki if (not interactive or pywikibot.input_yn( u'Connecting %s -> %s. Proceed?' % (source.title(), dest.title()), default=False, automatic_quit=False)): if self.getOption('side'): # Placing {{Link FA|xx}} right next to # corresponding interwiki text = (text[:m1.end()] + u" {{%s|%s}}" % (add_tl[0], fromsite.code) + text[m1.end():]) else: # Moving {{Link FA|xx}} to top of interwikis iw = textlib.getLanguageLinks(text, tosite) text = textlib.removeLanguageLinks(text, tosite) text += u"%s{{%s|%s}}%s" % (config.LS, add_tl[0], fromsite.code, config.LS) text = textlib.replaceLanguageLinks(text, iw, tosite) changed = True if remove_tl: if m2: if (changed or # Don't force the user to say "Y" twice not interactive or pywikibot.input_yn( u'Connecting %s -> %s. Proceed?' % (source.title(), dest.title()), default=False, automatic_quit=False)): text = re.sub(re_Link_remove, '', text) changed = True elif task == 'former': pywikibot.output(u"(already removed)") if changed: comment = i18n.twtranslate(tosite, 'featured-' + task, {'page': source}) try: dest.put(text, comment) self._save_counter += 1 except pywikibot.LockedPage: pywikibot.output(u'Page %s is locked!' % dest.title()) except pywikibot.PageNotSaved: pywikibot.output(u"Page not saved") def main(*args): """ Process command line arguments and invoke bot. If args is an empty list, sys.argv is used. @param args: command line arguments @type args: list of unicode """ options = {} local_args = pywikibot.handle_args(args) issue_deprecation_warning( 'featured.py script', 'Wikibase Client extension', 0, UserWarning) for arg in local_args: if arg.startswith('-fromlang:'): options[arg[1:9]] = arg[10:].split(",") elif arg.startswith('-after:'): options['afterpage'] = arg[7:] elif arg.startswith('-nocache:'): options[arg[1:8]] = arg[9:].split(",") else: options[arg[1:].lower()] = True bot = FeaturedBot(**options) bot.run() if __name__ == "__main__": main()
isandlaTech/cohorte-demos
refs/heads/dev
led/dump/led-demo-yun/cohorte/dist/cohorte-1.0.0-20141216.234517-57-python-distribution/repo/sleekxmpp/stanza/rootstanza.py
11
""" SleekXMPP: The Sleek XMPP Library Copyright (C) 2010 Nathanael C. Fritz This file is part of SleekXMPP. See the file LICENSE for copying permission. """ import logging from sleekxmpp.exceptions import XMPPError, IqError, IqTimeout from sleekxmpp.stanza import Error from sleekxmpp.xmlstream import ET, StanzaBase, register_stanza_plugin log = logging.getLogger(__name__) class RootStanza(StanzaBase): """ A top-level XMPP stanza in an XMLStream. The RootStanza class provides a more XMPP specific exception handler than provided by the generic StanzaBase class. Methods: exception -- Overrides StanzaBase.exception """ def exception(self, e): """ Create and send an error reply. Typically called when an event handler raises an exception. The error's type and text content are based on the exception object's type and content. Overrides StanzaBase.exception. Arguments: e -- Exception object """ if isinstance(e, IqError): # We received an Iq error reply, but it wasn't caught # locally. Using the condition/text from that error # response could leak too much information, so we'll # only use a generic error here. self.reply() self['error']['condition'] = 'undefined-condition' self['error']['text'] = 'External error' self['error']['type'] = 'cancel' log.warning('You should catch IqError exceptions') self.send() elif isinstance(e, IqTimeout): self.reply() self['error']['condition'] = 'remote-server-timeout' self['error']['type'] = 'wait' log.warning('You should catch IqTimeout exceptions') self.send() elif isinstance(e, XMPPError): # We raised this deliberately self.reply(clear=e.clear) self['error']['condition'] = e.condition self['error']['text'] = e.text self['error']['type'] = e.etype if e.extension is not None: # Extended error tag extxml = ET.Element("{%s}%s" % (e.extension_ns, e.extension), e.extension_args) self['error'].append(extxml) self.send() else: # We probably didn't raise this on purpose, so send an error stanza self.reply() self['error']['condition'] = 'undefined-condition' self['error']['text'] = "SleekXMPP got into trouble." self['error']['type'] = 'cancel' self.send() # log the error log.exception('Error handling {%s}%s stanza', self.namespace, self.name) # Finally raise the exception to a global exception handler self.stream.exception(e) register_stanza_plugin(RootStanza, Error)
pavel-odintsov/shogun
refs/heads/develop
applications/easysvm/scripts/easysvm.py
31
#!/usr/bin/env python ############################################################################################# # # # This program is free software; you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation; either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program; if not, see http://www.gnu.org/licenses # # or write to the Free Software Foundation, Inc., 51 Franklin Street, # # Fifth Floor, Boston, MA 02110-1301 USA # # # ############################################################################################# import sys import random from esvm.experiment import svm_cv, svm_pred, svm_poim, svm_eval, svm_modelsel if __name__ == '__main__': if len(sys.argv)<2: sys.stderr.write("usage: %s [cv|pred|modelsel|eval|poim] parameters\n" % sys.argv[0]) sys.exit(-1) random.seed() topmode = sys.argv[1] if topmode == 'cv': svm_cv(sys.argv) elif topmode == 'pred': svm_pred(sys.argv) elif topmode == 'poim': svm_poim(sys.argv) elif topmode == 'eval': svm_eval(sys.argv) elif topmode == 'modelsel': svm_modelsel(sys.argv) else: sys.stderr.write( "unknown mode %s (use: cv, pred, poim, eval)\n" % topmode) sys.exit(-1) sys.exit(0)
tunneln/CarnotKE
refs/heads/master
jyhton/lib-python/2.7/test/test_unittest.py
130
import unittest.test from test import test_support def test_main(): test_support.run_unittest(unittest.test.suite()) test_support.reap_children() if __name__ == "__main__": test_main()
CLVsol/odoo_addons
refs/heads/8.0
clv_address/seq/clv_address_seq.py
1
# -*- encoding: utf-8 -*- ################################################################################ # # # Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU Affero General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU Affero General Public License for more details. # # # # You should have received a copy of the GNU Affero General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # ################################################################################ from openerp import models, fields, api def format_code(code_seq): code = map(int, str(code_seq)) code_len = len(code) while len(code) < 14: code.insert(0, 0) while len(code) < 16: n = sum([(len(code) + 1 - i) * v for i, v in enumerate(code)]) % 11 if n > 1: f = 11 - n else: f = 0 code.append(f) code_str = "%s.%s.%s.%s.%s-%s" % (str(code[0]) + str(code[1]), str(code[2]) + str(code[3]) + str(code[4]), str(code[5]) + str(code[6]) + str(code[7]), str(code[8]) + str(code[9]) + str(code[10]), str(code[11]) + str(code[12]) + str(code[13]), str(code[14]) + str(code[15])) if code_len <= 3: code_form = code_str[18 - code_len:21] elif code_len > 3 and code_len <= 6: code_form = code_str[17 - code_len:21] elif code_len > 6 and code_len <= 9: code_form = code_str[16 - code_len:21] elif code_len > 9 and code_len <= 12: code_form = code_str[15 - code_len:21] elif code_len > 12 and code_len <= 14: code_form = code_str[14 - code_len:21] return code_form class clv_address(models.Model): _inherit = 'clv_address' code = fields.Char('Address Code', size=64, select=1, required=False, readonly=False, default='/', help='Use "/" to get an automatic new Address Code.') @api.model def create(self, vals): if not 'code' in vals or ('code' in vals and vals['code'] == '/'): code_seq = self.pool.get('ir.sequence').get(self._cr, self._uid, 'clv_address.code') vals['code'] = format_code(code_seq) return super(clv_address, self).create(vals) @api.multi def write(self, vals): if 'code' in vals and vals['code'] == '/': code_seq = self.pool.get('ir.sequence').get(self._cr, self._uid, 'clv_address.code') vals['code'] = format_code(code_seq) return super(clv_address, self).write(vals) @api.one def copy(self, default=None): default = dict(default or {}) default.update({'code': '/',}) return super(clv_address, self).copy(default)
Yong-Lee/decode-Django
refs/heads/master
Django-1.5.1/tests/regressiontests/queries/tests.py
18
from __future__ import absolute_import,unicode_literals import datetime from operator import attrgetter import pickle import sys from django.conf import settings from django.core.exceptions import FieldError from django.db import DatabaseError, connection, connections, DEFAULT_DB_ALIAS from django.db.models import Count from django.db.models.query import Q, ITER_CHUNK_SIZE, EmptyQuerySet from django.db.models.sql.where import WhereNode, EverythingNode, NothingNode from django.db.models.sql.datastructures import EmptyResultSet from django.test import TestCase, skipUnlessDBFeature from django.test.utils import str_prefix from django.utils import unittest from django.utils.datastructures import SortedDict from .models import (Annotation, Article, Author, Celebrity, Child, Cover, Detail, DumbCategory, ExtraInfo, Fan, Item, LeafA, LoopX, LoopZ, ManagedModel, Member, NamedCategory, Note, Number, Plaything, PointerA, Ranking, Related, Report, ReservedName, Tag, TvChef, Valid, X, Food, Eaten, Node, ObjectA, ObjectB, ObjectC, CategoryItem, SimpleCategory, SpecialCategory, OneToOneCategory, NullableName, ProxyCategory, SingleObject, RelatedObject, ModelA, ModelD) class BaseQuerysetTest(TestCase): def assertValueQuerysetEqual(self, qs, values): return self.assertQuerysetEqual(qs, values, transform=lambda x: x) class Queries1Tests(BaseQuerysetTest): def setUp(self): generic = NamedCategory.objects.create(name="Generic") self.t1 = Tag.objects.create(name='t1', category=generic) self.t2 = Tag.objects.create(name='t2', parent=self.t1, category=generic) self.t3 = Tag.objects.create(name='t3', parent=self.t1) t4 = Tag.objects.create(name='t4', parent=self.t3) self.t5 = Tag.objects.create(name='t5', parent=self.t3) self.n1 = Note.objects.create(note='n1', misc='foo', id=1) n2 = Note.objects.create(note='n2', misc='bar', id=2) self.n3 = Note.objects.create(note='n3', misc='foo', id=3) ann1 = Annotation.objects.create(name='a1', tag=self.t1) ann1.notes.add(self.n1) ann2 = Annotation.objects.create(name='a2', tag=t4) ann2.notes.add(n2, self.n3) # Create these out of order so that sorting by 'id' will be different to sorting # by 'info'. Helps detect some problems later. self.e2 = ExtraInfo.objects.create(info='e2', note=n2, value=41) e1 = ExtraInfo.objects.create(info='e1', note=self.n1, value=42) self.a1 = Author.objects.create(name='a1', num=1001, extra=e1) self.a2 = Author.objects.create(name='a2', num=2002, extra=e1) a3 = Author.objects.create(name='a3', num=3003, extra=self.e2) self.a4 = Author.objects.create(name='a4', num=4004, extra=self.e2) self.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0) self.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0) time3 = datetime.datetime(2007, 12, 20, 22, 25, 0) time4 = datetime.datetime(2007, 12, 20, 21, 0, 0) self.i1 = Item.objects.create(name='one', created=self.time1, modified=self.time1, creator=self.a1, note=self.n3) self.i1.tags = [self.t1, self.t2] self.i2 = Item.objects.create(name='two', created=self.time2, creator=self.a2, note=n2) self.i2.tags = [self.t1, self.t3] self.i3 = Item.objects.create(name='three', created=time3, creator=self.a2, note=self.n3) i4 = Item.objects.create(name='four', created=time4, creator=self.a4, note=self.n3) i4.tags = [t4] self.r1 = Report.objects.create(name='r1', creator=self.a1) Report.objects.create(name='r2', creator=a3) Report.objects.create(name='r3') # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering # will be rank3, rank2, rank1. self.rank1 = Ranking.objects.create(rank=2, author=self.a2) Cover.objects.create(title="first", item=i4) Cover.objects.create(title="second", item=self.i2) def test_ticket1050(self): self.assertQuerysetEqual( Item.objects.filter(tags__isnull=True), ['<Item: three>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__id__isnull=True), ['<Item: three>'] ) def test_ticket1801(self): self.assertQuerysetEqual( Author.objects.filter(item=self.i2), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(item=self.i3), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3), ['<Author: a2>'] ) def test_ticket2306(self): # Checking that no join types are "left outer" joins. query = Item.objects.filter(tags=self.t2).query self.assertTrue(query.LOUTER not in [x[2] for x in query.alias_map.values()]) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).order_by('name'), ['<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred')|Q(tags=self.t2)), ['<Item: one>'] ) # Each filter call is processed "at once" against a single table, so this is # different from the previous example as it tries to find tags that are two # things at once (rather than two tags). self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)), [] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred')|Q(tags=self.t2)), [] ) qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id) self.assertQuerysetEqual(list(qs), ['<Author: a2>']) self.assertEqual(2, qs.query.count_active_tables(), 2) qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id) self.assertEqual(qs.query.count_active_tables(), 3) def test_ticket4464(self): self.assertQuerysetEqual( Item.objects.filter(tags=self.t1).filter(tags=self.t2), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'), ['<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3), ['<Item: two>'] ) # Make sure .distinct() works with slicing (this was broken in Oracle). self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3], ['<Item: one>', '<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3], ['<Item: one>', '<Item: two>'] ) def test_tickets_2080_3592(self): self.assertQuerysetEqual( Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(item__name='one') | Q(name='a3')), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(name='a3') | Q(item__name='one')), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(item__name='three') | Q(report__name='r3')), ['<Author: a2>'] ) def test_ticket6074(self): # Merging two empty result sets shouldn't leave a queryset with no constraints # (which would match everything). self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), []) self.assertQuerysetEqual( Author.objects.filter(Q(id__in=[])|Q(id__in=[])), [] ) def test_tickets_1878_2939(self): self.assertEqual(Item.objects.values('creator').distinct().count(), 3) # Create something with a duplicate 'name' so that we can test multi-column # cases (which require some tricky SQL transformations under the covers). xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1) xx.save() self.assertEqual( Item.objects.exclude(name='two').values('creator', 'name').distinct().count(), 4 ) self.assertEqual( Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name', 'foo').distinct().count(), 4 ) self.assertEqual( Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name').distinct().count(), 4 ) xx.delete() def test_ticket7323(self): self.assertEqual(Item.objects.values('creator', 'name').count(), 4) def test_ticket2253(self): q1 = Item.objects.order_by('name') q2 = Item.objects.filter(id=self.i1.id) self.assertQuerysetEqual( q1, ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual(q2, ['<Item: one>']) self.assertQuerysetEqual( (q1 | q2).order_by('name'), ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>']) q1 = Item.objects.filter(tags=self.t1) q2 = Item.objects.filter(note=self.n3, tags=self.t2) q3 = Item.objects.filter(creator=self.a4) self.assertQuerysetEqual( ((q1 & q2) | q3).order_by('name'), ['<Item: four>', '<Item: one>'] ) def test_order_by_tables(self): q1 = Item.objects.order_by('name') q2 = Item.objects.filter(id=self.i1.id) list(q2) self.assertEqual(len((q1 & q2).order_by('name').query.tables), 1) def test_order_by_join_unref(self): """ This test is related to the above one, testing that there aren't old JOINs in the query. """ qs = Celebrity.objects.order_by('greatest_fan__fan_of') self.assertIn('OUTER JOIN', str(qs.query)) qs = qs.order_by('id') self.assertNotIn('OUTER JOIN', str(qs.query)) def test_tickets_4088_4306(self): self.assertQuerysetEqual( Report.objects.filter(creator=1001), ['<Report: r1>'] ) self.assertQuerysetEqual( Report.objects.filter(creator__num=1001), ['<Report: r1>'] ) self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), []) self.assertQuerysetEqual( Report.objects.filter(creator__id=self.a1.id), ['<Report: r1>'] ) self.assertQuerysetEqual( Report.objects.filter(creator__name='a1'), ['<Report: r1>'] ) def test_ticket4510(self): self.assertQuerysetEqual( Author.objects.filter(report__name='r1'), ['<Author: a1>'] ) def test_ticket7378(self): self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>']) def test_tickets_5324_6704(self): self.assertQuerysetEqual( Item.objects.filter(tags__name='t4'), ['<Item: four>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4').order_by('name').distinct(), ['<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(), ['<Item: two>', '<Item: three>', '<Item: one>'] ) self.assertQuerysetEqual( Author.objects.exclude(item__name='one').distinct().order_by('name'), ['<Author: a2>', '<Author: a3>', '<Author: a4>'] ) # Excluding across a m2m relation when there is more than one related # object associated was problematic. self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1').order_by('name'), ['<Item: four>', '<Item: three>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'), ['<Item: three>'] ) # Excluding from a relation that cannot be NULL should not use outer joins. query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query self.assertTrue(query.LOUTER not in [x[2] for x in query.alias_map.values()]) # Similarly, when one of the joins cannot possibly, ever, involve NULL # values (Author -> ExtraInfo, in the following), it should never be # promoted to a left outer join. So the following query should only # involve one "left outer" join (Author -> Item is 0-to-many). qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1)|Q(item__note=self.n3)) self.assertEqual( len([x[2] for x in qs.query.alias_map.values() if x[2] == query.LOUTER and qs.query.alias_refcount[x[1]]]), 1 ) # The previous changes shouldn't affect nullable foreign key joins. self.assertQuerysetEqual( Tag.objects.filter(parent__isnull=True).order_by('name'), ['<Tag: t1>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent__isnull=True).order_by('name'), ['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) def test_ticket2091(self): t = Tag.objects.get(name='t4') self.assertQuerysetEqual( Item.objects.filter(tags__in=[t]), ['<Item: four>'] ) def test_heterogeneous_qs_combination(self): # Combining querysets built on different models should behave in a well-defined # fashion. We raise an error. self.assertRaisesMessage( AssertionError, 'Cannot combine queries on two different base models.', lambda: Author.objects.all() & Tag.objects.all() ) self.assertRaisesMessage( AssertionError, 'Cannot combine queries on two different base models.', lambda: Author.objects.all() | Tag.objects.all() ) def test_ticket3141(self): self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4) self.assertEqual( Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(), 4 ) def test_ticket2400(self): self.assertQuerysetEqual( Author.objects.filter(item__isnull=True), ['<Author: a3>'] ) self.assertQuerysetEqual( Tag.objects.filter(item__isnull=True), ['<Tag: t5>'] ) def test_ticket2496(self): self.assertQuerysetEqual( Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1], ['<Item: four>'] ) def test_tickets_2076_7256(self): # Ordering on related tables should be possible, even if the table is # not otherwise involved. self.assertQuerysetEqual( Item.objects.order_by('note__note', 'name'), ['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>'] ) # Ordering on a related field should use the remote model's default # ordering as a final step. self.assertQuerysetEqual( Author.objects.order_by('extra', '-name'), ['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>'] ) # Using remote model default ordering can span multiple models (in this # case, Cover is ordered by Item's default, which uses Note's default). self.assertQuerysetEqual( Cover.objects.all(), ['<Cover: first>', '<Cover: second>'] ) # If the remote model does not have a default ordering, we order by its 'id' # field. self.assertQuerysetEqual( Item.objects.order_by('creator', 'name'), ['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>'] ) # Ordering by a many-valued attribute (e.g. a many-to-many or reverse # ForeignKey) is legal, but the results might not make sense. That # isn't Django's problem. Garbage in, garbage out. self.assertQuerysetEqual( Item.objects.filter(tags__isnull=False).order_by('tags', 'id'), ['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>'] ) # If we replace the default ordering, Django adjusts the required # tables automatically. Item normally requires a join with Note to do # the default ordering, but that isn't needed here. qs = Item.objects.order_by('name') self.assertQuerysetEqual( qs, ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertEqual(len(qs.query.tables), 1) def test_tickets_2874_3002(self): qs = Item.objects.select_related().order_by('note__note', 'name') self.assertQuerysetEqual( qs, ['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>'] ) # This is also a good select_related() test because there are multiple # Note entries in the SQL. The two Note items should be different. self.assertTrue(repr(qs[0].note), '<Note: n2>') self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>') def test_ticket3037(self): self.assertQuerysetEqual( Item.objects.filter(Q(creator__name='a3', name='two')|Q(creator__name='a4', name='four')), ['<Item: four>'] ) def test_tickets_5321_7070(self): # Ordering columns must be included in the output columns. Note that # this means results that might otherwise be distinct are not (if there # are multiple values in the ordering cols), as in this example. This # isn't a bug; it's a warning to be careful with the selection of # ordering columns. self.assertValueQuerysetEqual( Note.objects.values('misc').distinct().order_by('note', '-misc'), [{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}] ) def test_ticket4358(self): # If you don't pass any fields to values(), relation fields are # returned as "foo_id" keys, not "foo". For consistency, you should be # able to pass "foo_id" in the fields list and have it work, too. We # actually allow both "foo" and "foo_id". # The *_id version is returned by default. self.assertTrue('note_id' in ExtraInfo.objects.values()[0]) # You can also pass it in explicitly. self.assertValueQuerysetEqual( ExtraInfo.objects.values('note_id'), [{'note_id': 1}, {'note_id': 2}] ) # ...or use the field name. self.assertValueQuerysetEqual( ExtraInfo.objects.values('note'), [{'note': 1}, {'note': 2}] ) def test_ticket2902(self): # Parameters can be given to extra_select, *if* you use a SortedDict. # (First we need to know which order the keys fall in "naturally" on # your system, so we can put things in the wrong way around from # normal. A normal dict would thus fail.) s = [('a', '%s'), ('b', '%s')] params = ['one', 'two'] if {'a': 1, 'b': 2}.keys() == ['a', 'b']: s.reverse() params.reverse() # This slightly odd comparison works around the fact that PostgreSQL will # return 'one' and 'two' as strings, not Unicode objects. It's a side-effect of # using constants here and not a real concern. d = Item.objects.extra(select=SortedDict(s), select_params=params).values('a', 'b')[0] self.assertEqual(d, {'a': 'one', 'b': 'two'}) # Order by the number of tags attached to an item. l = Item.objects.extra(select={'count': 'select count(*) from queries_item_tags where queries_item_tags.item_id = queries_item.id'}).order_by('-count') self.assertEqual([o.count for o in l], [2, 2, 1, 0]) def test_ticket6154(self): # Multiple filter statements are joined using "AND" all the time. self.assertQuerysetEqual( Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1)|Q(item__note=self.n3)), ['<Author: a1>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(extra__note=self.n1)|Q(item__note=self.n3)).filter(id=self.a1.id), ['<Author: a1>'] ) def test_ticket6981(self): self.assertQuerysetEqual( Tag.objects.select_related('parent').order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) def test_ticket9926(self): self.assertQuerysetEqual( Tag.objects.select_related("parent", "category").order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.select_related('parent', "parent__category").order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) def test_tickets_6180_6203(self): # Dates with limits and/or counts self.assertEqual(Item.objects.count(), 4) self.assertEqual(Item.objects.dates('created', 'month').count(), 1) self.assertEqual(Item.objects.dates('created', 'day').count(), 2) self.assertEqual(len(Item.objects.dates('created', 'day')), 2) self.assertEqual(Item.objects.dates('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0)) def test_tickets_7087_12242(self): # Dates with extra select columns self.assertQuerysetEqual( Item.objects.dates('created', 'day').extra(select={'a': 1}), ['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)'] ) self.assertQuerysetEqual( Item.objects.extra(select={'a': 1}).dates('created', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)'] ) name="one" self.assertQuerysetEqual( Item.objects.dates('created', 'day').extra(where=['name=%s'], params=[name]), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) self.assertQuerysetEqual( Item.objects.extra(where=['name=%s'], params=[name]).dates('created', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) def test_ticket7155(self): # Nullable dates self.assertQuerysetEqual( Item.objects.dates('modified', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) def test_ticket7098(self): # Make sure semi-deprecated ordering by related models syntax still # works. self.assertValueQuerysetEqual( Item.objects.values('note__note').order_by('queries_note.note', 'id'), [{'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}] ) def test_ticket7096(self): # Make sure exclude() with multiple conditions continues to work. self.assertQuerysetEqual( Tag.objects.filter(parent=self.t1, name='t3').order_by('name'), ['<Tag: t3>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(), ['<Item: four>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'), ['<Item: four>', '<Item: three>'] ) # More twisted cases, involving nested negations. self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t1', name='one')), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'), ['<Item: two>'] ) self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'), ['<Item: four>', '<Item: one>', '<Item: three>'] ) def test_tickets_7204_7506(self): # Make sure querysets with related fields can be pickled. If this # doesn't crash, it's a Good Thing. pickle.dumps(Item.objects.all()) def test_ticket7813(self): # We should also be able to pickle things that use select_related(). # The only tricky thing here is to ensure that we do the related # selections properly after unpickling. qs = Item.objects.select_related() query = qs.query.get_compiler(qs.db).as_sql()[0] query2 = pickle.loads(pickle.dumps(qs.query)) self.assertEqual( query2.get_compiler(qs.db).as_sql()[0], query ) def test_deferred_load_qs_pickling(self): # Check pickling of deferred-loading querysets qs = Item.objects.defer('name', 'creator') q2 = pickle.loads(pickle.dumps(qs)) self.assertEqual(list(qs), list(q2)) q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL)) self.assertEqual(list(qs), list(q3)) def test_ticket7277(self): self.assertQuerysetEqual( self.n1.annotation_set.filter(Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5)), ['<Annotation: a1>'] ) def test_tickets_7448_7707(self): # Complex objects should be converted to strings before being used in # lookups. self.assertQuerysetEqual( Item.objects.filter(created__in=[self.time1, self.time2]), ['<Item: one>', '<Item: two>'] ) def test_ticket7235(self): # An EmptyQuerySet should not raise exceptions if it is filtered. q = EmptyQuerySet() self.assertQuerysetEqual(q.all(), []) self.assertQuerysetEqual(q.filter(x=10), []) self.assertQuerysetEqual(q.exclude(y=3), []) self.assertQuerysetEqual(q.complex_filter({'pk': 1}), []) self.assertQuerysetEqual(q.select_related('spam', 'eggs'), []) self.assertQuerysetEqual(q.annotate(Count('eggs')), []) self.assertQuerysetEqual(q.order_by('-pub_date', 'headline'), []) self.assertQuerysetEqual(q.distinct(), []) self.assertQuerysetEqual( q.extra(select={'is_recent': "pub_date > '2006-01-01'"}), [] ) q.query.low_mark = 1 self.assertRaisesMessage( AssertionError, 'Cannot change a query once a slice has been taken', q.extra, select={'is_recent': "pub_date > '2006-01-01'"} ) self.assertQuerysetEqual(q.reverse(), []) self.assertQuerysetEqual(q.defer('spam', 'eggs'), []) self.assertQuerysetEqual(q.only('spam', 'eggs'), []) def test_ticket7791(self): # There were "issues" when ordering and distinct-ing on fields related # via ForeignKeys. self.assertEqual( len(Note.objects.order_by('extrainfo__info').distinct()), 3 ) # Pickling of DateQuerySets used to fail qs = Item.objects.dates('created', 'month') _ = pickle.loads(pickle.dumps(qs)) def test_ticket9997(self): # If a ValuesList or Values queryset is passed as an inner query, we # make sure it's only requesting a single value and use that as the # thing to select. self.assertQuerysetEqual( Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')), ['<Tag: t2>', '<Tag: t3>'] ) # Multi-valued values() and values_list() querysets should raise errors. self.assertRaisesMessage( TypeError, 'Cannot use a multi-field ValuesQuerySet as a filter value.', lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id')) ) self.assertRaisesMessage( TypeError, 'Cannot use a multi-field ValuesListQuerySet as a filter value.', lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id')) ) def test_ticket9985(self): # qs.values_list(...).values(...) combinations should work. self.assertValueQuerysetEqual( Note.objects.values_list("note", flat=True).values("id").order_by("id"), [{'id': 1}, {'id': 2}, {'id': 3}] ) self.assertQuerysetEqual( Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')), ['<Annotation: a1>'] ) def test_ticket10205(self): # When bailing out early because of an empty "__in" filter, we need # to set things up correctly internally so that subqueries can continue properly. self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0) def test_ticket10432(self): # Testing an empty "__in" filter with a generator as the value. def f(): return iter([]) n_obj = Note.objects.all()[0] def g(): for i in [n_obj.pk]: yield i self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), []) self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj]) def test_ticket10742(self): # Queries used in an __in clause don't execute subqueries subq = Author.objects.filter(num__lt=3000) qs = Author.objects.filter(pk__in=subq) self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>']) # The subquery result cache should not be populated self.assertTrue(subq._result_cache is None) subq = Author.objects.filter(num__lt=3000) qs = Author.objects.exclude(pk__in=subq) self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>']) # The subquery result cache should not be populated self.assertTrue(subq._result_cache is None) subq = Author.objects.filter(num__lt=3000) self.assertQuerysetEqual( Author.objects.filter(Q(pk__in=subq) & Q(name='a1')), ['<Author: a1>'] ) # The subquery result cache should not be populated self.assertTrue(subq._result_cache is None) def test_ticket7076(self): # Excluding shouldn't eliminate NULL entries. self.assertQuerysetEqual( Item.objects.exclude(modified=self.time1).order_by('name'), ['<Item: four>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent__name=self.t1.name), ['<Tag: t1>', '<Tag: t4>', '<Tag: t5>'] ) def test_ticket7181(self): # Ordering by related tables should accomodate nullable fields (this # test is a little tricky, since NULL ordering is database dependent. # Instead, we just count the number of results). self.assertEqual(len(Tag.objects.order_by('parent__name')), 5) # Empty querysets can be merged with others. self.assertQuerysetEqual( Note.objects.none() | Note.objects.all(), ['<Note: n1>', '<Note: n2>', '<Note: n3>'] ) self.assertQuerysetEqual( Note.objects.all() | Note.objects.none(), ['<Note: n1>', '<Note: n2>', '<Note: n3>'] ) self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), []) self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), []) def test_ticket9411(self): # Make sure bump_prefix() (an internal Query method) doesn't (re-)break. It's # sufficient that this query runs without error. qs = Tag.objects.values_list('id', flat=True).order_by('id') qs.query.bump_prefix() first = qs[0] self.assertEqual(list(qs), list(range(first, first+5))) def test_ticket8439(self): # Complex combinations of conjunctions, disjunctions and nullable # relations. self.assertQuerysetEqual( Author.objects.filter(Q(item__note__extrainfo=self.e2)|Q(report=self.r1, name='xyz')), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(report=self.r1, name='xyz')|Q(item__note__extrainfo=self.e2)), ['<Author: a2>'] ) self.assertQuerysetEqual( Annotation.objects.filter(Q(tag__parent=self.t1)|Q(notes__note='n1', name='a1')), ['<Annotation: a1>'] ) xx = ExtraInfo.objects.create(info='xx', note=self.n3) self.assertQuerysetEqual( Note.objects.filter(Q(extrainfo__author=self.a1)|Q(extrainfo=xx)), ['<Note: n1>', '<Note: n3>'] ) xx.delete() q = Note.objects.filter(Q(extrainfo__author=self.a1)|Q(extrainfo=xx)).query self.assertEqual( len([x[2] for x in q.alias_map.values() if x[2] == q.LOUTER and q.alias_refcount[x[1]]]), 1 ) def test_ticket17429(self): """ Ensure that Meta.ordering=None works the same as Meta.ordering=[] """ original_ordering = Tag._meta.ordering Tag._meta.ordering = None self.assertQuerysetEqual( Tag.objects.all(), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ) Tag._meta.ordering = original_ordering def test_exclude(self): self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4'), [repr(i) for i in Item.objects.filter(~Q(tags__name='t4'))]) self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name='t4')|Q(tags__name='t3')), [repr(i) for i in Item.objects.filter(~(Q(tags__name='t4')|Q(tags__name='t3')))]) self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name='t4')|~Q(tags__name='t3')), [repr(i) for i in Item.objects.filter(~(Q(tags__name='t4')|~Q(tags__name='t3')))]) def test_nested_exclude(self): self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))]) def test_double_exclude(self): self.assertQuerysetEqual( Item.objects.filter(Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))]) self.assertQuerysetEqual( Item.objects.filter(Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~Q(~Q(tags__name='t4')))]) @unittest.expectedFailure def test_exclude_in(self): self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name__in=['t4', 't3'])), [repr(i) for i in Item.objects.filter(~Q(tags__name__in=['t4', 't3']))]) self.assertQuerysetEqual( Item.objects.filter(Q(tags__name__in=['t4', 't3'])), [repr(i) for i in Item.objects.filter(~~Q(tags__name__in=['t4', 't3']))]) def test_ticket19672(self): self.assertQuerysetEqual( Report.objects.filter(Q(creator__isnull=False) & ~Q(creator__extra__value=41)), ['<Report: r1>'] ) class Queries2Tests(TestCase): def setUp(self): Number.objects.create(num=4) Number.objects.create(num=8) Number.objects.create(num=12) def test_ticket4289(self): # A slight variation on the restricting the filtering choices by the # lookup constraints. self.assertQuerysetEqual(Number.objects.filter(num__lt=4), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), []) self.assertQuerysetEqual( Number.objects.filter(num__gt=8, num__lt=13), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)), ['<Number: 8>'] ) def test_ticket12239(self): # Float was being rounded to integer on gte queries on integer field. Tests # show that gt, lt, gte, and lte work as desired. Note that the fix changes # get_prep_lookup for gte and lt queries only. self.assertQuerysetEqual( Number.objects.filter(num__gt=11.9), ['<Number: 12>'] ) self.assertQuerysetEqual(Number.objects.filter(num__gt=12), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), []) self.assertQuerysetEqual( Number.objects.filter(num__lt=12), ['<Number: 4>', '<Number: 8>'] ) self.assertQuerysetEqual( Number.objects.filter(num__lt=12.0), ['<Number: 4>', '<Number: 8>'] ) self.assertQuerysetEqual( Number.objects.filter(num__lt=12.1), ['<Number: 4>', '<Number: 8>', '<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__gte=11.9), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__gte=12), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__gte=12.0), ['<Number: 12>'] ) self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), []) self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), []) self.assertQuerysetEqual( Number.objects.filter(num__lte=11.9), ['<Number: 4>', '<Number: 8>'] ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12), ['<Number: 4>', '<Number: 8>', '<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.0), ['<Number: 4>', '<Number: 8>', '<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.1), ['<Number: 4>', '<Number: 8>', '<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.9), ['<Number: 4>', '<Number: 8>', '<Number: 12>'] ) def test_ticket7411(self): # Saving to db must work even with partially read result set in another # cursor. for num in range(2 * ITER_CHUNK_SIZE + 1): _ = Number.objects.create(num=num) for i, obj in enumerate(Number.objects.all()): obj.save() if i > 10: break def test_ticket7759(self): # Count should work with a partially read result set. count = Number.objects.count() qs = Number.objects.all() def run(): for obj in qs: return qs.count() == count self.assertTrue(run()) class Queries3Tests(BaseQuerysetTest): def test_ticket7107(self): # This shouldn't create an infinite loop. self.assertQuerysetEqual(Valid.objects.all(), []) def test_ticket8683(self): # Raise proper error when a DateQuerySet gets passed a wrong type of # field self.assertRaisesMessage( AssertionError, "'name' isn't a DateField.", Item.objects.dates, 'name', 'month' ) class Queries4Tests(BaseQuerysetTest): def setUp(self): generic = NamedCategory.objects.create(name="Generic") self.t1 = Tag.objects.create(name='t1', category=generic) n1 = Note.objects.create(note='n1', misc='foo', id=1) n2 = Note.objects.create(note='n2', misc='bar', id=2) e1 = ExtraInfo.objects.create(info='e1', note=n1) e2 = ExtraInfo.objects.create(info='e2', note=n2) self.a1 = Author.objects.create(name='a1', num=1001, extra=e1) self.a3 = Author.objects.create(name='a3', num=3003, extra=e2) self.r1 = Report.objects.create(name='r1', creator=self.a1) self.r2 = Report.objects.create(name='r2', creator=self.a3) self.r3 = Report.objects.create(name='r3') Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=self.a1) Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=self.a3) def test_ticket14876(self): q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1')) q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1')) self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False) self.assertEqual(str(q1.query), str(q2.query)) q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True)) q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True)) self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False) self.assertEqual(str(q1.query), str(q2.query)) q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by() q2 = Item.objects.filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1')).order_by() self.assertQuerysetEqual(q1, ["<Item: i1>"]) self.assertEqual(str(q1.query), str(q2.query)) q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by() q2 = Item.objects.filter(Q(creator__report__name='e1')).order_by() | Item.objects.filter(Q(creator=self.a1)).order_by() self.assertQuerysetEqual(q1, ["<Item: i1>"]) self.assertEqual(str(q1.query), str(q2.query)) def test_ticket7095(self): # Updates that are filtered on the model being updated are somewhat # tricky in MySQL. This exercises that case. ManagedModel.objects.create(data='mm1', tag=self.t1, public=True) self.assertEqual(ManagedModel.objects.update(data='mm'), 1) # A values() or values_list() query across joined models must use outer # joins appropriately. # Note: In Oracle, we expect a null CharField to return '' instead of # None. if connection.features.interprets_empty_strings_as_nulls: expected_null_charfield_repr = '' else: expected_null_charfield_repr = None self.assertValueQuerysetEqual( Report.objects.values_list("creator__extra__info", flat=True).order_by("name"), ['e1', 'e2', expected_null_charfield_repr], ) # Similarly for select_related(), joins beyond an initial nullable join # must use outer joins so that all results are included. self.assertQuerysetEqual( Report.objects.select_related("creator", "creator__extra").order_by("name"), ['<Report: r1>', '<Report: r2>', '<Report: r3>'] ) # When there are multiple paths to a table from another table, we have # to be careful not to accidentally reuse an inappropriate join when # using select_related(). We used to return the parent's Detail record # here by mistake. d1 = Detail.objects.create(data="d1") d2 = Detail.objects.create(data="d2") m1 = Member.objects.create(name="m1", details=d1) m2 = Member.objects.create(name="m2", details=d2) Child.objects.create(person=m2, parent=m1) obj = m1.children.select_related("person__details")[0] self.assertEqual(obj.person.details.data, 'd2') def test_order_by_resetting(self): # Calling order_by() with no parameters removes any existing ordering on the # model. But it should still be possible to add new ordering after that. qs = Author.objects.order_by().order_by('name') self.assertTrue('ORDER BY' in qs.query.get_compiler(qs.db).as_sql()[0]) def test_ticket10181(self): # Avoid raising an EmptyResultSet if an inner query is probably # empty (and hence, not executed). self.assertQuerysetEqual( Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])), [] ) def test_ticket15316_filter_false(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") ci1 = CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.filter(category__specialcategory__isnull=False) self.assertEqual(qs.count(), 2) self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False) def test_ticket15316_exclude_false(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") ci1 = CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False) self.assertEqual(qs.count(), 1) self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk) def test_ticket15316_filter_true(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") ci1 = CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.filter(category__specialcategory__isnull=True) self.assertEqual(qs.count(), 1) self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk) def test_ticket15316_exclude_true(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") ci1 = CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True) self.assertEqual(qs.count(), 2) self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False) def test_ticket15316_one2one_filter_false(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") c2 = OneToOneCategory.objects.create(category = c1, new_name="new1") c3 = OneToOneCategory.objects.create(category = c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=False) self.assertEqual(qs.count(), 2) self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False) def test_ticket15316_one2one_exclude_false(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") c2 = OneToOneCategory.objects.create(category = c1, new_name="new1") c3 = OneToOneCategory.objects.create(category = c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False) self.assertEqual(qs.count(), 1) self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk) def test_ticket15316_one2one_filter_true(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") c2 = OneToOneCategory.objects.create(category = c1, new_name="new1") c3 = OneToOneCategory.objects.create(category = c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True) self.assertEqual(qs.count(), 1) self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk) def test_ticket15316_one2one_exclude_true(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") c2 = OneToOneCategory.objects.create(category = c1, new_name="new1") c3 = OneToOneCategory.objects.create(category = c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=True) self.assertEqual(qs.count(), 2) self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False) class Queries5Tests(TestCase): def setUp(self): # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the # Meta.ordering will be rank3, rank2, rank1. n1 = Note.objects.create(note='n1', misc='foo', id=1) n2 = Note.objects.create(note='n2', misc='bar', id=2) e1 = ExtraInfo.objects.create(info='e1', note=n1) e2 = ExtraInfo.objects.create(info='e2', note=n2) a1 = Author.objects.create(name='a1', num=1001, extra=e1) a2 = Author.objects.create(name='a2', num=2002, extra=e1) a3 = Author.objects.create(name='a3', num=3003, extra=e2) self.rank1 = Ranking.objects.create(rank=2, author=a2) Ranking.objects.create(rank=1, author=a3) Ranking.objects.create(rank=3, author=a1) def test_ordering(self): # Cross model ordering is possible in Meta, too. self.assertQuerysetEqual( Ranking.objects.all(), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) self.assertQuerysetEqual( Ranking.objects.all().order_by('rank'), ['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>'] ) # Ordering of extra() pieces is possible, too and you can mix extra # fields and model fields in the ordering. self.assertQuerysetEqual( Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']), ['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>'] ) qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'}) self.assertEqual( [o.good for o in qs.extra(order_by=('-good',))], [True, False, False] ) self.assertQuerysetEqual( qs.extra(order_by=('-good', 'id')), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) # Despite having some extra aliases in the query, we can still omit # them in a values() query. dicts = qs.values('id', 'rank').order_by('id') self.assertEqual( [d['rank'] for d in dicts], [2, 1, 3] ) def test_ticket7256(self): # An empty values() call includes all aliases, including those from an # extra() qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'}) dicts = qs.values().order_by('id') for d in dicts: del d['id']; del d['author_id'] self.assertEqual( [sorted(d.items()) for d in dicts], [[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]] ) def test_ticket7045(self): # Extra tables used to crash SQL construction on the second use. qs = Ranking.objects.extra(tables=['django_site']) qs.query.get_compiler(qs.db).as_sql() # test passes if this doesn't raise an exception. qs.query.get_compiler(qs.db).as_sql() def test_ticket9848(self): # Make sure that updates which only filter on sub-tables don't # inadvertently update the wrong records (bug #9848). # Make sure that the IDs from different tables don't happen to match. self.assertQuerysetEqual( Ranking.objects.filter(author__name='a1'), ['<Ranking: 3: a1>'] ) self.assertEqual( Ranking.objects.filter(author__name='a1').update(rank='4'), 1 ) r = Ranking.objects.filter(author__name='a1')[0] self.assertNotEqual(r.id, r.author.id) self.assertEqual(r.rank, 4) r.rank = 3 r.save() self.assertQuerysetEqual( Ranking.objects.all(), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) def test_ticket5261(self): # Test different empty excludes. self.assertQuerysetEqual( Note.objects.exclude(Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.filter(~Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.filter(~Q()|~Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.exclude(~Q()&~Q()), ['<Note: n1>', '<Note: n2>'] ) class SelectRelatedTests(TestCase): def test_tickets_3045_3288(self): # Once upon a time, select_related() with circular relations would loop # infinitely if you forgot to specify "depth". Now we set an arbitrary # default upper bound. self.assertQuerysetEqual(X.objects.all(), []) self.assertQuerysetEqual(X.objects.select_related(), []) class SubclassFKTests(TestCase): def test_ticket7778(self): # Model subclasses could not be deleted if a nullable foreign key # relates to a model that relates back. num_celebs = Celebrity.objects.count() tvc = TvChef.objects.create(name="Huey") self.assertEqual(Celebrity.objects.count(), num_celebs + 1) Fan.objects.create(fan_of=tvc) Fan.objects.create(fan_of=tvc) tvc.delete() # The parent object should have been deleted as well. self.assertEqual(Celebrity.objects.count(), num_celebs) class CustomPkTests(TestCase): def test_ticket7371(self): self.assertQuerysetEqual(Related.objects.order_by('custom'), []) class NullableRelOrderingTests(TestCase): def test_ticket10028(self): # Ordering by model related to nullable relations(!) should use outer # joins, so that all results are included. Plaything.objects.create(name="p1") self.assertQuerysetEqual( Plaything.objects.all(), ['<Plaything: p1>'] ) def test_join_already_in_query(self): # Ordering by model related to nullable relations should not change # the join type of already existing joins. Plaything.objects.create(name="p1") s = SingleObject.objects.create(name='s') r = RelatedObject.objects.create(single=s) Plaything.objects.create(name="p2", others=r) qs = Plaything.objects.all().filter(others__isnull=False).order_by('pk') self.assertTrue('INNER' in str(qs.query)) qs = qs.order_by('others__single__name') # The ordering by others__single__pk will add one new join (to single) # and that join must be LEFT join. The already existing join to related # objects must be kept INNER. So, we have both a INNER and a LEFT join # in the query. self.assertTrue('LEFT' in str(qs.query)) self.assertTrue('INNER' in str(qs.query)) self.assertQuerysetEqual( qs, ['<Plaything: p2>'] ) class DisjunctiveFilterTests(TestCase): def setUp(self): self.n1 = Note.objects.create(note='n1', misc='foo', id=1) ExtraInfo.objects.create(info='e1', note=self.n1) def test_ticket7872(self): # Another variation on the disjunctive filtering theme. # For the purposes of this regression test, it's important that there is no # Join object releated to the LeafA we create. LeafA.objects.create(data='first') self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>']) self.assertQuerysetEqual( LeafA.objects.filter(Q(data='first')|Q(join__b__data='second')), ['<LeafA: first>'] ) def test_ticket8283(self): # Checking that applying filters after a disjunction works correctly. self.assertQuerysetEqual( (ExtraInfo.objects.filter(note=self.n1)|ExtraInfo.objects.filter(info='e2')).filter(note=self.n1), ['<ExtraInfo: e1>'] ) self.assertQuerysetEqual( (ExtraInfo.objects.filter(info='e2')|ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1), ['<ExtraInfo: e1>'] ) class Queries6Tests(TestCase): def setUp(self): generic = NamedCategory.objects.create(name="Generic") t1 = Tag.objects.create(name='t1', category=generic) t2 = Tag.objects.create(name='t2', parent=t1, category=generic) t3 = Tag.objects.create(name='t3', parent=t1) t4 = Tag.objects.create(name='t4', parent=t3) t5 = Tag.objects.create(name='t5', parent=t3) n1 = Note.objects.create(note='n1', misc='foo', id=1) ann1 = Annotation.objects.create(name='a1', tag=t1) ann1.notes.add(n1) ann2 = Annotation.objects.create(name='a2', tag=t4) # This next test used to cause really weird PostgreSQL behavior, but it was # only apparent much later when the full test suite ran. #@unittest.expectedFailure def test_slicing_and_cache_interaction(self): # We can do slicing beyond what is currently in the result cache, # too. # We need to mess with the implementation internals a bit here to decrease the # cache fill size so that we don't read all the results at once. from django.db.models import query query.ITER_CHUNK_SIZE = 2 qs = Tag.objects.all() # Fill the cache with the first chunk. self.assertTrue(bool(qs)) self.assertEqual(len(qs._result_cache), 2) # Query beyond the end of the cache and check that it is filled out as required. self.assertEqual(repr(qs[4]), '<Tag: t5>') self.assertEqual(len(qs._result_cache), 5) # But querying beyond the end of the result set will fail. self.assertRaises(IndexError, lambda: qs[100]) def test_parallel_iterators(self): # Test that parallel iterators work. qs = Tag.objects.all() i1, i2 = iter(qs), iter(qs) self.assertEqual(repr(next(i1)), '<Tag: t1>') self.assertEqual(repr(next(i1)), '<Tag: t2>') self.assertEqual(repr(next(i2)), '<Tag: t1>') self.assertEqual(repr(next(i2)), '<Tag: t2>') self.assertEqual(repr(next(i2)), '<Tag: t3>') self.assertEqual(repr(next(i1)), '<Tag: t3>') qs = X.objects.all() self.assertEqual(bool(qs), False) self.assertEqual(bool(qs), False) def test_nested_queries_sql(self): # Nested queries should not evaluate the inner query as part of constructing the # SQL (so we should see a nested query here, indicated by two "SELECT" calls). qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy")) self.assertEqual( qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'), 2 ) def test_tickets_8921_9188(self): # Incorrect SQL was being generated for certain types of exclude() # queries that crossed multi-valued relations (#8921, #9188 and some # pre-emptively discovered cases). self.assertQuerysetEqual( PointerA.objects.filter(connection__pointerb__id=1), [] ) self.assertQuerysetEqual( PointerA.objects.exclude(connection__pointerb__id=1), [] ) self.assertQuerysetEqual( Tag.objects.exclude(children=None), ['<Tag: t1>', '<Tag: t3>'] ) # This example is tricky because the parent could be NULL, so only checking # parents with annotations omits some results (tag t1, in this case). self.assertQuerysetEqual( Tag.objects.exclude(parent__annotation__name="a1"), ['<Tag: t1>', '<Tag: t4>', '<Tag: t5>'] ) # The annotation->tag link is single values and tag->children links is # multi-valued. So we have to split the exclude filter in the middle # and then optimize the inner query without losing results. self.assertQuerysetEqual( Annotation.objects.exclude(tag__children__name="t2"), ['<Annotation: a2>'] ) # Nested queries are possible (although should be used with care, since # they have performance problems on backends like MySQL. self.assertQuerysetEqual( Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")), ['<Annotation: a1>'] ) def test_ticket3739(self): # The all() method on querysets returns a copy of the queryset. q1 = Tag.objects.order_by('name') self.assertIsNot(q1, q1.all()) class RawQueriesTests(TestCase): def setUp(self): n1 = Note.objects.create(note='n1', misc='foo', id=1) def test_ticket14729(self): # Test representation of raw query with one or few parameters passed as list query = "SELECT * FROM queries_note WHERE note = %s" params = ['n1'] qs = Note.objects.raw(query, params=params) self.assertEqual(repr(qs), str_prefix("<RawQuerySet: %(_)s'SELECT * FROM queries_note WHERE note = n1'>")) query = "SELECT * FROM queries_note WHERE note = %s and misc = %s" params = ['n1', 'foo'] qs = Note.objects.raw(query, params=params) self.assertEqual(repr(qs), str_prefix("<RawQuerySet: %(_)s'SELECT * FROM queries_note WHERE note = n1 and misc = foo'>")) class GeneratorExpressionTests(TestCase): def test_ticket10432(self): # Using an empty generator expression as the rvalue for an "__in" # lookup is legal. self.assertQuerysetEqual( Note.objects.filter(pk__in=(x for x in ())), [] ) class ComparisonTests(TestCase): def setUp(self): self.n1 = Note.objects.create(note='n1', misc='foo', id=1) e1 = ExtraInfo.objects.create(info='e1', note=self.n1) self.a2 = Author.objects.create(name='a2', num=2002, extra=e1) def test_ticket8597(self): # Regression tests for case-insensitive comparisons _ = Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1) _ = Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1) self.assertQuerysetEqual( Item.objects.filter(name__iexact="A_b"), ['<Item: a_b>'] ) self.assertQuerysetEqual( Item.objects.filter(name__iexact="x%Y"), ['<Item: x%y>'] ) self.assertQuerysetEqual( Item.objects.filter(name__istartswith="A_b"), ['<Item: a_b>'] ) self.assertQuerysetEqual( Item.objects.filter(name__iendswith="A_b"), ['<Item: a_b>'] ) class ExistsSql(TestCase): def setUp(self): settings.DEBUG = True def test_exists(self): self.assertFalse(Tag.objects.exists()) # Ok - so the exist query worked - but did it include too many columns? self.assertTrue("id" not in connection.queries[-1]['sql'] and "name" not in connection.queries[-1]['sql']) def tearDown(self): settings.DEBUG = False class QuerysetOrderedTests(unittest.TestCase): """ Tests for the Queryset.ordered attribute. """ def test_no_default_or_explicit_ordering(self): self.assertEqual(Annotation.objects.all().ordered, False) def test_cleared_default_ordering(self): self.assertEqual(Tag.objects.all().ordered, True) self.assertEqual(Tag.objects.all().order_by().ordered, False) def test_explicit_ordering(self): self.assertEqual(Annotation.objects.all().order_by('id').ordered, True) def test_order_by_extra(self): self.assertEqual(Annotation.objects.all().extra(order_by=['id']).ordered, True) def test_annotated_ordering(self): qs = Annotation.objects.annotate(num_notes=Count('notes')) self.assertEqual(qs.ordered, False) self.assertEqual(qs.order_by('num_notes').ordered, True) class SubqueryTests(TestCase): def setUp(self): DumbCategory.objects.create(id=1) DumbCategory.objects.create(id=2) DumbCategory.objects.create(id=3) def test_ordered_subselect(self): "Subselects honor any manual ordering" try: query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2]) self.assertEqual(set(query.values_list('id', flat=True)), set([2,3])) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2]) self.assertEqual(set(query.values_list('id', flat=True)), set([2,3])) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:]) self.assertEqual(set(query.values_list('id', flat=True)), set([1])) except DatabaseError: # Oracle and MySQL both have problems with sliced subselects. # This prevents us from even evaluating this test case at all. # Refs #10099 self.assertFalse(connections[DEFAULT_DB_ALIAS].features.allow_sliced_subqueries) def test_sliced_delete(self): "Delete queries can safely contain sliced subqueries" try: DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete() self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), set([1,2])) except DatabaseError: # Oracle and MySQL both have problems with sliced subselects. # This prevents us from even evaluating this test case at all. # Refs #10099 self.assertFalse(connections[DEFAULT_DB_ALIAS].features.allow_sliced_subqueries) class CloneTests(TestCase): def test_evaluated_queryset_as_argument(self): "#13227 -- If a queryset is already evaluated, it can still be used as a query arg" n = Note(note='Test1', misc='misc') n.save() e = ExtraInfo(info='good', note=n) e.save() n_list = Note.objects.all() # Evaluate the Note queryset, populating the query cache list(n_list) # Use the note queryset in a query, and evalute # that query in a way that involves cloning. self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good') class EmptyQuerySetTests(TestCase): def test_emptyqueryset_values(self): # #14366 -- Calling .values() on an EmptyQuerySet and then cloning that # should not cause an error" self.assertQuerysetEqual( Number.objects.none().values('num').order_by('num'), [] ) def test_values_subquery(self): self.assertQuerysetEqual( Number.objects.filter(pk__in=Number.objects.none().values("pk")), [] ) self.assertQuerysetEqual( Number.objects.filter(pk__in=Number.objects.none().values_list("pk")), [] ) def test_ticket_19151(self): # #19151 -- Calling .values() or .values_list() on an EmptyQuerySet # should return EmptyQuerySet and not cause an error. q = EmptyQuerySet() self.assertQuerysetEqual(q.values(), []) self.assertQuerysetEqual(q.values_list(), []) class ValuesQuerysetTests(BaseQuerysetTest): def test_flat_values_lits(self): Number.objects.create(num=72) qs = Number.objects.values_list("num") qs = qs.values_list("num", flat=True) self.assertValueQuerysetEqual( qs, [72] ) class WeirdQuerysetSlicingTests(BaseQuerysetTest): def setUp(self): Number.objects.create(num=1) Number.objects.create(num=2) Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='two', created=datetime.datetime.now()) Article.objects.create(name='three', created=datetime.datetime.now()) Article.objects.create(name='four', created=datetime.datetime.now()) def test_tickets_7698_10202(self): # People like to slice with '0' as the high-water mark. self.assertQuerysetEqual(Article.objects.all()[0:0], []) self.assertQuerysetEqual(Article.objects.all()[0:0][:10], []) self.assertEqual(Article.objects.all()[:0].count(), 0) self.assertRaisesMessage( AssertionError, 'Cannot change a query once a slice has been taken.', Article.objects.all()[:0].latest, 'created' ) def test_empty_resultset_sql(self): # ticket #12192 self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1])) class EscapingTests(TestCase): def test_ticket_7302(self): # Reserved names are appropriately escaped _ = ReservedName.objects.create(name='a', order=42) ReservedName.objects.create(name='b', order=37) self.assertQuerysetEqual( ReservedName.objects.all().order_by('order'), ['<ReservedName: b>', '<ReservedName: a>'] ) self.assertQuerysetEqual( ReservedName.objects.extra(select={'stuff':'name'}, order_by=('order','stuff')), ['<ReservedName: b>', '<ReservedName: a>'] ) class ToFieldTests(TestCase): def test_in_query(self): apple = Food.objects.create(name="apple") pear = Food.objects.create(name="pear") lunch = Eaten.objects.create(food=apple, meal="lunch") dinner = Eaten.objects.create(food=pear, meal="dinner") self.assertEqual( set(Eaten.objects.filter(food__in=[apple, pear])), set([lunch, dinner]), ) def test_reverse_in(self): apple = Food.objects.create(name="apple") pear = Food.objects.create(name="pear") lunch_apple = Eaten.objects.create(food=apple, meal="lunch") lunch_pear = Eaten.objects.create(food=pear, meal="dinner") self.assertEqual( set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])), set([apple, pear]) ) def test_single_object(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") dinner = Eaten.objects.create(food=apple, meal="dinner") self.assertEqual( set(Eaten.objects.filter(food=apple)), set([lunch, dinner]) ) def test_single_object_reverse(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") self.assertEqual( set(Food.objects.filter(eaten=lunch)), set([apple]) ) def test_recursive_fk(self): node1 = Node.objects.create(num=42) node2 = Node.objects.create(num=1, parent=node1) self.assertEqual( list(Node.objects.filter(parent=node1)), [node2] ) def test_recursive_fk_reverse(self): node1 = Node.objects.create(num=42) node2 = Node.objects.create(num=1, parent=node1) self.assertEqual( list(Node.objects.filter(node=node2)), [node1] ) class ConditionalTests(BaseQuerysetTest): """Tests whose execution depend on different environment conditions like Python version or DB backend features""" def setUp(self): generic = NamedCategory.objects.create(name="Generic") t1 = Tag.objects.create(name='t1', category=generic) t2 = Tag.objects.create(name='t2', parent=t1, category=generic) t3 = Tag.objects.create(name='t3', parent=t1) t4 = Tag.objects.create(name='t4', parent=t3) t5 = Tag.objects.create(name='t5', parent=t3) # In Python 2.6 beta releases, exceptions raised in __len__ are swallowed # (Python issue 1242657), so these cases return an empty list, rather than # raising an exception. Not a lot we can do about that, unfortunately, due to # the way Python handles list() calls internally. Thus, we skip the tests for # Python 2.6. @unittest.skipIf(sys.version_info[:2] == (2, 6), "Python version is 2.6") def test_infinite_loop(self): # If you're not careful, it's possible to introduce infinite loops via # default ordering on foreign keys in a cycle. We detect that. self.assertRaisesMessage( FieldError, 'Infinite loop caused by ordering.', lambda: list(LoopX.objects.all()) # Force queryset evaluation with list() ) self.assertRaisesMessage( FieldError, 'Infinite loop caused by ordering.', lambda: list(LoopZ.objects.all()) # Force queryset evaluation with list() ) # Note that this doesn't cause an infinite loop, since the default # ordering on the Tag model is empty (and thus defaults to using "id" # for the related field). self.assertEqual(len(Tag.objects.order_by('parent')), 5) # ... but you can still order in a non-recursive fashion amongst linked # fields (the previous test failed because the default ordering was # recursive). self.assertQuerysetEqual( LoopX.objects.all().order_by('y__x__y__x__id'), [] ) # When grouping without specifying ordering, we add an explicit "ORDER BY NULL" # portion in MySQL to prevent unnecessary sorting. @skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping') def test_null_ordering_added(self): query = Tag.objects.values_list('parent_id', flat=True).order_by().query query.group_by = ['parent_id'] sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0] fragment = "ORDER BY " pos = sql.find(fragment) self.assertEqual(sql.find(fragment, pos + 1), -1) self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment)) # Sqlite 3 does not support passing in more than 1000 parameters except by # changing a parameter at compilation time. @skipUnlessDBFeature('supports_1000_query_parameters') def test_ticket14244(self): # Test that the "in" lookup works with lists of 1000 items or more. # The numbers amount is picked to force three different IN batches # for Oracle, yet to be less than 2100 parameter limit for MSSQL. numbers = range(2050) Number.objects.all().delete() Number.objects.bulk_create(Number(num=num) for num in numbers) self.assertEqual( Number.objects.filter(num__in=numbers[:1000]).count(), 1000 ) self.assertEqual( Number.objects.filter(num__in=numbers[:1001]).count(), 1001 ) self.assertEqual( Number.objects.filter(num__in=numbers[:2000]).count(), 2000 ) self.assertEqual( Number.objects.filter(num__in=numbers).count(), len(numbers) ) class UnionTests(unittest.TestCase): """ Tests for the union of two querysets. Bug #12252. """ def setUp(self): objectas = [] objectbs = [] objectcs = [] a_info = ['one', 'two', 'three'] for name in a_info: o = ObjectA(name=name) o.save() objectas.append(o) b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])] for name, number, objecta in b_info: o = ObjectB(name=name, num=number, objecta=objecta) o.save() objectbs.append(o) c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])] for name, objecta, objectb in c_info: o = ObjectC(name=name, objecta=objecta, objectb=objectb) o.save() objectcs.append(o) def check_union(self, model, Q1, Q2): filter = model.objects.filter self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2))) self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2))) def test_A_AB(self): Q1 = Q(name='two') Q2 = Q(objectb__name='deux') self.check_union(ObjectA, Q1, Q2) def test_A_AB2(self): Q1 = Q(name='two') Q2 = Q(objectb__name='deux', objectb__num=2) self.check_union(ObjectA, Q1, Q2) def test_AB_ACB(self): Q1 = Q(objectb__name='deux') Q2 = Q(objectc__objectb__name='deux') self.check_union(ObjectA, Q1, Q2) def test_BAB_BAC(self): Q1 = Q(objecta__objectb__name='deux') Q2 = Q(objecta__objectc__name='ein') self.check_union(ObjectB, Q1, Q2) def test_BAB_BACB(self): Q1 = Q(objecta__objectb__name='deux') Q2 = Q(objecta__objectc__objectb__name='trois') self.check_union(ObjectB, Q1, Q2) def test_BA_BCA__BAB_BAC_BCA(self): Q1 = Q(objecta__name='one', objectc__objecta__name='two') Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois') self.check_union(ObjectB, Q1, Q2) class DefaultValuesInsertTest(TestCase): def test_no_extra_params(self): # Ticket #17056 -- affects Oracle try: DumbCategory.objects.create() except TypeError: self.fail("Creation of an instance of a model with only the PK field shouldn't error out after bulk insert refactoring (#17056)") class NullInExcludeTest(TestCase): def setUp(self): NullableName.objects.create(name='i1') NullableName.objects.create() def test_null_in_exclude_qs(self): none_val = '' if connection.features.interprets_empty_strings_as_nulls else None self.assertQuerysetEqual( NullableName.objects.exclude(name__in=[]), ['i1', none_val], attrgetter('name')) self.assertQuerysetEqual( NullableName.objects.exclude(name__in=['i1']), [none_val], attrgetter('name')) self.assertQuerysetEqual( NullableName.objects.exclude(name__in=['i3']), ['i1', none_val], attrgetter('name')) inner_qs = NullableName.objects.filter(name='i1').values_list('name') self.assertQuerysetEqual( NullableName.objects.exclude(name__in=inner_qs), [none_val], attrgetter('name')) # Check that the inner queryset wasn't executed - it should be turned # into subquery above self.assertIs(inner_qs._result_cache, None) @unittest.expectedFailure def test_col_not_in_list_containing_null(self): """ The following case is not handled properly because SQL's COL NOT IN (list containing null) handling is too weird to abstract away. """ self.assertQuerysetEqual( NullableName.objects.exclude(name__in=[None]), ['i1'], attrgetter('name')) class EmptyStringsAsNullTest(TestCase): """ Test that filtering on non-null character fields works as expected. The reason for these tests is that Oracle treats '' as NULL, and this can cause problems in query construction. Refs #17957. """ def setUp(self): self.nc = NamedCategory.objects.create(name='') def test_direct_exclude(self): self.assertQuerysetEqual( NamedCategory.objects.exclude(name__in=['nonexisting']), [self.nc.pk], attrgetter('pk') ) def test_joined_exclude(self): self.assertQuerysetEqual( DumbCategory.objects.exclude(namedcategory__name__in=['nonexisting']), [self.nc.pk], attrgetter('pk') ) class ProxyQueryCleanupTest(TestCase): def test_evaluated_proxy_count(self): """ Test that generating the query string doesn't alter the query's state in irreversible ways. Refs #18248. """ ProxyCategory.objects.create() qs = ProxyCategory.objects.all() self.assertEqual(qs.count(), 1) str(qs.query) self.assertEqual(qs.count(), 1) class WhereNodeTest(TestCase): class DummyNode(object): def as_sql(self, qn, connection): return 'dummy', [] def test_empty_full_handling_conjunction(self): qn = connection.ops.quote_name w = WhereNode(children=[EverythingNode()]) self.assertEqual(w.as_sql(qn, connection), ('', [])) w.negate() self.assertRaises(EmptyResultSet, w.as_sql, qn, connection) w = WhereNode(children=[NothingNode()]) self.assertRaises(EmptyResultSet, w.as_sql, qn, connection) w.negate() self.assertEqual(w.as_sql(qn, connection), ('', [])) w = WhereNode(children=[EverythingNode(), EverythingNode()]) self.assertEqual(w.as_sql(qn, connection), ('', [])) w.negate() self.assertRaises(EmptyResultSet, w.as_sql, qn, connection) w = WhereNode(children=[EverythingNode(), self.DummyNode()]) self.assertEqual(w.as_sql(qn, connection), ('dummy', [])) w = WhereNode(children=[self.DummyNode(), self.DummyNode()]) self.assertEqual(w.as_sql(qn, connection), ('(dummy AND dummy)', [])) w.negate() self.assertEqual(w.as_sql(qn, connection), ('NOT (dummy AND dummy)', [])) w = WhereNode(children=[NothingNode(), self.DummyNode()]) self.assertRaises(EmptyResultSet, w.as_sql, qn, connection) w.negate() self.assertEqual(w.as_sql(qn, connection), ('', [])) def test_empty_full_handling_disjunction(self): qn = connection.ops.quote_name w = WhereNode(children=[EverythingNode()], connector='OR') self.assertEqual(w.as_sql(qn, connection), ('', [])) w.negate() self.assertRaises(EmptyResultSet, w.as_sql, qn, connection) w = WhereNode(children=[NothingNode()], connector='OR') self.assertRaises(EmptyResultSet, w.as_sql, qn, connection) w.negate() self.assertEqual(w.as_sql(qn, connection), ('', [])) w = WhereNode(children=[EverythingNode(), EverythingNode()], connector='OR') self.assertEqual(w.as_sql(qn, connection), ('', [])) w.negate() self.assertRaises(EmptyResultSet, w.as_sql, qn, connection) w = WhereNode(children=[EverythingNode(), self.DummyNode()], connector='OR') self.assertEqual(w.as_sql(qn, connection), ('', [])) w.negate() self.assertRaises(EmptyResultSet, w.as_sql, qn, connection) w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector='OR') self.assertEqual(w.as_sql(qn, connection), ('(dummy OR dummy)', [])) w.negate() self.assertEqual(w.as_sql(qn, connection), ('NOT (dummy OR dummy)', [])) w = WhereNode(children=[NothingNode(), self.DummyNode()], connector='OR') self.assertEqual(w.as_sql(qn, connection), ('dummy', [])) w.negate() self.assertEqual(w.as_sql(qn, connection), ('NOT (dummy)', [])) def test_empty_nodes(self): qn = connection.ops.quote_name empty_w = WhereNode() w = WhereNode(children=[empty_w, empty_w]) self.assertEqual(w.as_sql(qn, connection), (None, [])) w.negate() self.assertEqual(w.as_sql(qn, connection), (None, [])) w.connector = 'OR' self.assertEqual(w.as_sql(qn, connection), (None, [])) w.negate() self.assertEqual(w.as_sql(qn, connection), (None, [])) w = WhereNode(children=[empty_w, NothingNode()], connector='OR') self.assertRaises(EmptyResultSet, w.as_sql, qn, connection) class NullJoinPromotionOrTest(TestCase): def setUp(self): d = ModelD.objects.create(name='foo') ModelA.objects.create(name='bar', d=d) def test_ticket_17886(self): # The first Q-object is generating the match, the rest of the filters # should not remove the match even if they do not match anything. The # problem here was that b__name generates a LOUTER JOIN, then # b__c__name generates join to c, which the ORM tried to promote but # failed as that join isn't nullable. q_obj = ( Q(d__name='foo')| Q(b__name='foo')| Q(b__c__name='foo') ) qset = ModelA.objects.filter(q_obj) self.assertEqual(len(qset), 1) # We generate one INNER JOIN to D. The join is direct and not nullable # so we can use INNER JOIN for it. However, we can NOT use INNER JOIN # for the b->c join, as a->b is nullable. self.assertEqual(str(qset.query).count('INNER JOIN'), 1) class EmptyStringPromotionTests(TestCase): def test_empty_string_promotion(self): qs = RelatedObject.objects.filter(single__name='') if connection.features.interprets_empty_strings_as_nulls: self.assertIn('LEFT OUTER JOIN', str(qs.query)) else: self.assertNotIn('LEFT OUTER JOIN', str(qs.query))
fegran/astrotools
refs/heads/master
select_var.py
1
from __future__ import division import matplotlib.pyplot as plt from astropy.io import ascii import numpy as np import warnings import sys import os if len(sys.argv) < 2: os.system('clear') print '\n' print 'How to run this script:\t python %s info_file.dat' %sys.argv[0] print 'Output of this script:\t info_file.sel' print '\n' sys.exit(True) ################################# # USER INPUT info_file = sys.argv[1] ################################# if '.sel' in info_file: os.system('clear') print 'Archivo info no valido: ".sel"' print 'Cambiar nombre o archivo' sys.exit(True) out_file = info_file[:info_file.find('.')]+'.sel' warnings.filterwarnings('ignore') if os.path.exists(out_file): os.system('rm %s' %out_file) def phaser(mjd, P): return (mjd / P) % 1 def plotter(i, ids, P1, P2, P3): mjd, mag, err = np.genfromtxt('%d.dat' %ids, usecols=(0,1,2), unpack=True) mean = np.average(mag, weights=1/err**2) amp = np.abs(np.min(mag)-np.max(mag)) plt.figure(figsize=(8,6)) for j, P in enumerate([P1, P2, P3, 1]): phase = phaser(mjd,P) plt.subplot(2,2,j+1) plt.suptitle(r'ID: $%d$' %ids, size=16) plt.gca().invert_yaxis() if P != 1.: plt.title(r'$P_{%d} = %3.3f$' %(j+1,P) ) plt.xlabel('Phase') plt.errorbar(phase,mag,err,fmt='ko',alpha=0.5) plt.errorbar(phase+1,mag,err,fmt='ko',alpha=0.5) plt.xlim(-0.1,2.1) else: mjd_0 = np.round(mjd[0] / 50) * 50 plt.errorbar(mjd-mjd_0,mag,err,fmt='ko',alpha=0.5) plt.xlabel('MJD - %d' %mjd_0) plt.xlim(np.min(mjd)-mjd_0-100, np.max(mjd)-mjd_0+100) plt.title(r'$\langle K_{\rm s} \rangle = %1.2f \quad \Delta K_{\rm s} = %1.2f$' %(mean, amp)) if j in (0,2): plt.ylabel(r'$K_{\rm s}$', size=16) plt.tight_layout() plt.draw() plt.show(block=False) return ids, P1, P2, P3 = np.genfromtxt(info_file, usecols=(0,12,13,14), unpack=True) final_election = np.zeros_like(ids).astype(int) i = 0 while True: if i >= len(ids): election = 'VVV' while election not in ['', 'p']: election = raw_input('Proceso completado!! Eleccion: (p) previous, (ENTER) quit: \n') if election == '': plt.close('all') break if election == 'p': i = i - 1 continue plotter(i, ids[i], P1[i], P2[i], P3[i]) election = 'VVV' while election not in ['s', '', 'p', 'q']: election = raw_input('Eleccion para esta curva %d/%d: (s) save, (ENTER) next, (p) previous, (q) quit: \n' %(i+1,len(ids))) if election == 's': final_election[i] = 1 plt.savefig('%d.pdf' %ids[i], format='pdf') i = i + 1 plt.close('all') if election == '': final_election[i] = 0 i = i + 1 plt.close('all') if election == 'p': plt.close('all') if i == 0: print 'No puedes retroceder de la posicion 1!!' continue i = i - 1 continue if election == 'q': print '-'*40 print '*'*40 print 'Ultimo ID con eleccion valida : %d' %ids[i-1] print '*'*40 print '-'*40 break if np.sum(final_election) != 0: data = ascii.read(info_file) data = data[final_election.astype(bool)] ascii.write(data, out_file, delimiter=' ', format='fixed_width', formats={'%7.0f','%8.8f','%8.8f','%5.2f','%6.4f','%5.3f','%8.2f','%8.2f','%7.4f','%7.4f','%7.4f','%7.4f','%11.6f','%11.6f','%11.6f','%11.6f','%5.0f','%5.0f'}) os.system('sed -i "" "s/ID/%sID/g" %s' %('#',out_file))
souravbadami/zulip
refs/heads/master
zerver/migrations/0035_realm_message_retention_period_days.py
40
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('zerver', '0034_userprofile_enable_online_push_notifications'), ] operations = [ migrations.AddField( model_name='realm', name='message_retention_days', field=models.IntegerField(null=True), ), ]
smarr/SOMns
refs/heads/release
docs/conf.py
2
from recommonmark.parser import CommonMarkParser source_parsers = { '.md': CommonMarkParser, } source_suffix = ['.md'] html_theme = 'sphinx_rtd_theme'
melon-li/openstack-dashboard
refs/heads/master
openstack_dashboard/dashboards/project/networks/subnets/workflows.py
19
# Copyright 2013 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import workflows from openstack_dashboard import api from openstack_dashboard.dashboards.project.networks import workflows \ as network_workflows LOG = logging.getLogger(__name__) class CreateSubnetInfoAction(network_workflows.CreateSubnetInfoAction): with_subnet = forms.BooleanField(initial=True, required=False, widget=forms.HiddenInput()) msg = _('Specify "Network Address"') def __init__(self, request, *args, **kwargs): super(CreateSubnetInfoAction, self).__init__(request, *args, **kwargs) class Meta(object): name = _("Subnet") help_text = _('Create a subnet associated with the network. ' 'Advanced configuration is available by clicking on the ' '"Subnet Details" tab.') def clean(self): cleaned_data = workflows.Action.clean(self) self._check_subnet_data(cleaned_data) return cleaned_data class CreateSubnetInfo(network_workflows.CreateSubnetInfo): action_class = CreateSubnetInfoAction depends_on = ("network_id",) class CreateSubnet(network_workflows.CreateNetwork): slug = "create_subnet" name = _("Create Subnet") finalize_button_name = _("Create") success_message = _('Created subnet "%s".') failure_message = _('Unable to create subnet "%s".') default_steps = (CreateSubnetInfo, network_workflows.CreateSubnetDetail) def format_status_message(self, message): name = self.context.get('subnet_name') or self.context.get('subnet_id') return message % name def get_success_url(self): return reverse("horizon:project:networks:detail", args=(self.context.get('network_id'),)) def get_failure_url(self): return reverse("horizon:project:networks:detail", args=(self.context.get('network_id'),)) def handle(self, request, data): subnet = self._create_subnet(request, data) return True if subnet else False class UpdateSubnetInfoAction(CreateSubnetInfoAction): address_source = forms.ChoiceField(widget=forms.HiddenInput(), required=False) subnetpool = forms.ChoiceField(widget=forms.HiddenInput(), required=False) prefixlen = forms.ChoiceField(widget=forms.HiddenInput(), required=False) cidr = forms.IPField(label=_("Network Address"), required=False, initial="", widget=forms.TextInput( attrs={'readonly': 'readonly'}), help_text=_("Network address in CIDR format " "(e.g. 192.168.0.0/24)"), version=forms.IPv4 | forms.IPv6, mask=True) # NOTE(amotoki): When 'disabled' attribute is set for the ChoiceField # and ValidationError is raised for POST request, the initial value of # the ip_version ChoiceField is not set in the re-displayed form # As a result, 'IPv4' is displayed even when IPv6 is used if # ValidationError is detected. In addition 'required=True' check complains # when re-POST since the value of the ChoiceField is not set. # Thus now I use HiddenInput for the ip_version ChoiceField as a work # around. ip_version = forms.ChoiceField(choices=[(4, 'IPv4'), (6, 'IPv6')], widget=forms.HiddenInput(), label=_("IP Version")) class Meta(object): name = _("Subnet") help_text = _('Update a subnet associated with the network. ' 'Advanced configuration are available at ' '"Subnet Details" tab.') def clean(self): cleaned_data = workflows.Action.clean(self) self._check_subnet_data(cleaned_data, is_create=False) return cleaned_data class UpdateSubnetInfo(CreateSubnetInfo): action_class = UpdateSubnetInfoAction depends_on = ("network_id", "subnet_id") class UpdateSubnetDetailAction(network_workflows.CreateSubnetDetailAction): def __init__(self, request, context, *args, **kwargs): super(UpdateSubnetDetailAction, self).__init__(request, context, *args, **kwargs) # TODO(amotoki): Due to Neutron bug 1362966, we cannot pass "None" # to Neutron. It means we cannot set IPv6 two modes to # "No option selected". # Until bug 1362966 is fixed, we disable this field. # if context['ip_version'] != 6: # self.fields['ipv6_modes'].widget = forms.HiddenInput() # self.fields['ipv6_modes'].required = False self.fields['ipv6_modes'].widget = forms.HiddenInput() self.fields['ipv6_modes'].required = False class Meta(object): name = _("Subnet Details") help_text = _('Specify additional attributes for the subnet.') class UpdateSubnetDetail(network_workflows.CreateSubnetDetail): action_class = UpdateSubnetDetailAction class UpdateSubnet(network_workflows.CreateNetwork): slug = "update_subnet" name = _("Edit Subnet") finalize_button_name = _("Save") success_message = _('Updated subnet "%s".') failure_message = _('Unable to update subnet "%s".') success_url = "horizon:project:networks:detail" failure_url = "horizon:project:networks:detail" default_steps = (UpdateSubnetInfo, UpdateSubnetDetail) def format_status_message(self, message): name = self.context.get('subnet_name') or self.context.get('subnet_id') return message % name def get_success_url(self): return reverse(self.success_url, args=(self.context.get('network_id'),)) def _update_subnet(self, request, data): network_id = self.context.get('network_id') try: subnet_id = self.context.get('subnet_id') params = {} params['name'] = data['subnet_name'] if data['no_gateway']: params['gateway_ip'] = None elif data['gateway_ip']: params['gateway_ip'] = data['gateway_ip'] # We should send gateway_ip only when it is changed, because # updating gateway_ip is prohibited when the ip is used. # See bug 1227268. subnet = api.neutron.subnet_get(request, subnet_id) if params['gateway_ip'] == subnet.gateway_ip: del params['gateway_ip'] self._setup_subnet_parameters(params, data, is_create=False) subnet = api.neutron.subnet_update(request, subnet_id, **params) msg = _('Subnet "%s" was successfully updated.') % data['cidr'] LOG.debug(msg) return subnet except Exception as e: msg = (_('Failed to update subnet "%(sub)s": ' ' %(reason)s') % {"sub": data['cidr'], "reason": e}) redirect = reverse(self.failure_url, args=(network_id,)) exceptions.handle(request, msg, redirect=redirect) return False def handle(self, request, data): subnet = self._update_subnet(request, data) return True if subnet else False
ettm2012/MissionPlanner
refs/heads/master
Lib/site-packages/numpy/core/tests/test_shape_base.py
54
from numpy.testing import * from numpy.core import array, atleast_1d, atleast_2d, atleast_3d, vstack, \ hstack, newaxis class TestAtleast1d(TestCase): def test_0D_array(self): a = array(1); b = array(2); res=map(atleast_1d,[a,b]) desired = [array([1]),array([2])] assert_array_equal(res,desired) def test_1D_array(self): a = array([1,2]); b = array([2,3]); res=map(atleast_1d,[a,b]) desired = [array([1,2]),array([2,3])] assert_array_equal(res,desired) def test_2D_array(self): a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]); res=map(atleast_1d,[a,b]) desired = [a,b] assert_array_equal(res,desired) def test_3D_array(self): a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]); a = array([a,a]);b = array([b,b]); res=map(atleast_1d,[a,b]) desired = [a,b] assert_array_equal(res,desired) def test_r1array(self): """ Test to make sure equivalent Travis O's r1array function """ assert(atleast_1d(3).shape == (1,)) assert(atleast_1d(3j).shape == (1,)) assert(atleast_1d(3L).shape == (1,)) assert(atleast_1d(3.0).shape == (1,)) assert(atleast_1d([[2,3],[4,5]]).shape == (2,2)) class TestAtleast2d(TestCase): def test_0D_array(self): a = array(1); b = array(2); res=map(atleast_2d,[a,b]) desired = [array([[1]]),array([[2]])] assert_array_equal(res,desired) def test_1D_array(self): a = array([1,2]); b = array([2,3]); res=map(atleast_2d,[a,b]) desired = [array([[1,2]]),array([[2,3]])] assert_array_equal(res,desired) def test_2D_array(self): a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]); res=map(atleast_2d,[a,b]) desired = [a,b] assert_array_equal(res,desired) def test_3D_array(self): a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]); a = array([a,a]);b = array([b,b]); res=map(atleast_2d,[a,b]) desired = [a,b] assert_array_equal(res,desired) def test_r2array(self): """ Test to make sure equivalent Travis O's r2array function """ assert(atleast_2d(3).shape == (1,1)) assert(atleast_2d([3j,1]).shape == (1,2)) assert(atleast_2d([[[3,1],[4,5]],[[3,5],[1,2]]]).shape == (2,2,2)) class TestAtleast3d(TestCase): def test_0D_array(self): a = array(1); b = array(2); res=map(atleast_3d,[a,b]) desired = [array([[[1]]]),array([[[2]]])] assert_array_equal(res,desired) def test_1D_array(self): a = array([1,2]); b = array([2,3]); res=map(atleast_3d,[a,b]) desired = [array([[[1],[2]]]),array([[[2],[3]]])] assert_array_equal(res,desired) def test_2D_array(self): a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]); res=map(atleast_3d,[a,b]) desired = [a[:,:,newaxis],b[:,:,newaxis]] assert_array_equal(res,desired) def test_3D_array(self): a = array([[1,2],[1,2]]); b = array([[2,3],[2,3]]); a = array([a,a]);b = array([b,b]); res=map(atleast_3d,[a,b]) desired = [a,b] assert_array_equal(res,desired) class TestHstack(TestCase): def test_0D_array(self): a = array(1); b = array(2); res=hstack([a,b]) desired = array([1,2]) assert_array_equal(res,desired) def test_1D_array(self): a = array([1]); b = array([2]); res=hstack([a,b]) desired = array([1,2]) assert_array_equal(res,desired) def test_2D_array(self): a = array([[1],[2]]); b = array([[1],[2]]); res=hstack([a,b]) desired = array([[1,1],[2,2]]) assert_array_equal(res,desired) class TestVstack(TestCase): def test_0D_array(self): a = array(1); b = array(2); res=vstack([a,b]) desired = array([[1],[2]]) assert_array_equal(res,desired) def test_1D_array(self): a = array([1]); b = array([2]); res=vstack([a,b]) desired = array([[1],[2]]) assert_array_equal(res,desired) def test_2D_array(self): a = array([[1],[2]]); b = array([[1],[2]]); res=vstack([a,b]) desired = array([[1],[2],[1],[2]]) assert_array_equal(res,desired) def test_2D_array2(self): a = array([1,2]); b = array([1,2]); res=vstack([a,b]) desired = array([[1,2],[1,2]]) assert_array_equal(res,desired) if __name__ == "__main__": run_module_suite()
wdzhou/mantid
refs/heads/master
Framework/PythonInterface/test/python/plugins/algorithms/CalculateSampleTransmissionTest.py
3
from __future__ import (absolute_import, division, print_function) import unittest import numpy as np from mantid.simpleapi import CalculateSampleTransmission class CalculateSampleTransmissionTest(unittest.TestCase): def test_sample_transmission_calculation(self): """ Test a basic transmission calculation using number density. """ # Using water sample formula = "H2-O" density = 0.100272 thickness = 0.1 ws = CalculateSampleTransmission(WavelengthRange='5.0,0.2,7.0', ChemicalFormula=formula, Density=density, Thickness=thickness, DensityType='Number Density') self.assertEqual(ws.getNumberHistograms(), 2) expected_trans = [0.56619985, 0.56605978, 0.56591975, 0.56577975, 0.56563978, 0.56549985, 0.56535996, 0.56522009, 0.56508027, 0.56494047] expected_scatt = [0.43017862, 0.43017862, 0.43017862, 0.43017862, 0.43017862, 0.43017862, 0.43017862, 0.43017862, 0.43017862, 0.43017862] trans = ws.readY(0) scatt = ws.readY(1) np.testing.assert_array_almost_equal(trans, expected_trans, decimal=4) np.testing.assert_array_almost_equal(scatt, expected_scatt, decimal=4) def test_mass_density(self): """ Tests a transmission calculation using mass density """ formula = "H2-O" density = 1 thickness = 0.1 ws = CalculateSampleTransmission(WavelengthRange='5.0,0.2,7.0', ChemicalFormula=formula, Density=density, Thickness=thickness, DensityType='Mass Density') self.assertEqual(ws.getNumberHistograms(), 2) expected_trans = [0.56619985, 0.56605978, 0.56591975, 0.56577975, 0.56563978, 0.56549985, 0.56535996, 0.56522009, 0.56508027, 0.56494047] expected_scatt = [0.43021665, 0.43021665, 0.43021665, 0.43021665, 0.43021665, 0.43021665, 0.43021665, 0.43021665, 0.43021665, 0.43021665] trans = ws.readY(0) scatt = ws.readY(1) np.testing.assert_array_almost_equal(trans, expected_trans, decimal=4) np.testing.assert_array_almost_equal(scatt, expected_scatt, decimal=4) def test_validate_density(self): """ Tests validation on Density property. """ # Using water sample formula = "H2-O" density = -0.1 thickness = 0.1 self.assertRaises(RuntimeError, CalculateSampleTransmission, WavelengthRange='5.0,0.2,7.0', ChemicalFormula=formula, NumberDensity=density, Thickness=thickness) def test_validate_thickness(self): """ Tests validation on Thickness property. """ # Using water sample formula = "H2-O" density = 0.1 thickness = -0.1 self.assertRaises(RuntimeError, CalculateSampleTransmission, WavelengthRange='5.0,0.2,7.0', ChemicalFormula=formula, NumberDensity=density, Thickness=thickness) if __name__ == "__main__": unittest.main()
maartenq/ansible
refs/heads/devel
test/runner/lib/sanity/__init__.py
18
"""Execute Ansible sanity tests.""" from __future__ import absolute_import, print_function import abc import glob import json import os import re import sys from lib.util import ( ApplicationError, SubprocessError, display, run_command, import_plugins, load_plugins, parse_to_dict, ABC, is_binary_file, ) from lib.ansible_util import ( ansible_environment, ) from lib.target import ( walk_external_targets, walk_internal_targets, walk_sanity_targets, ) from lib.executor import ( get_changes_filter, AllTargetsSkipped, Delegate, install_command_requirements, SUPPORTED_PYTHON_VERSIONS, ) from lib.config import ( SanityConfig, ) from lib.test import ( TestSuccess, TestFailure, TestSkipped, TestMessage, ) COMMAND = 'sanity' def command_sanity(args): """ :type args: SanityConfig """ changes = get_changes_filter(args) require = (args.require or []) + changes targets = SanityTargets(args.include, args.exclude, require) if not targets.include: raise AllTargetsSkipped() if args.delegate: raise Delegate(require=changes) install_command_requirements(args) tests = sanity_get_tests() if args.test: tests = [t for t in tests if t.name in args.test] else: disabled = [t.name for t in tests if not t.enabled and not args.allow_disabled] tests = [t for t in tests if t.enabled or args.allow_disabled] if disabled: display.warning('Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled))) if args.skip_test: tests = [t for t in tests if t.name not in args.skip_test] total = 0 failed = [] for test in tests: if args.list_tests: display.info(test.name) continue if isinstance(test, SanityMultipleVersion): versions = SUPPORTED_PYTHON_VERSIONS else: versions = (None,) for version in versions: if args.python and version and version != args.python_version: continue display.info('Sanity check using %s%s' % (test.name, ' with Python %s' % version if version else '')) options = '' if isinstance(test, SanityCodeSmellTest): result = test.test(args, targets) elif isinstance(test, SanityMultipleVersion): result = test.test(args, targets, python_version=version) options = ' --python %s' % version elif isinstance(test, SanitySingleVersion): result = test.test(args, targets) else: raise Exception('Unsupported test type: %s' % type(test)) result.write(args) total += 1 if isinstance(result, SanityFailure): failed.append(result.test + options) if failed: message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % ( len(failed), total, '\n'.join(failed)) if args.failure_ok: display.error(message) else: raise ApplicationError(message) def collect_code_smell_tests(): """ :rtype: tuple[SanityCodeSmellTest] """ with open('test/sanity/code-smell/skip.txt', 'r') as skip_fd: skip_tests = skip_fd.read().splitlines() paths = glob.glob('test/sanity/code-smell/*') paths = sorted(p for p in paths if os.access(p, os.X_OK) and os.path.isfile(p) and os.path.basename(p) not in skip_tests) tests = tuple(SanityCodeSmellTest(p) for p in paths) return tests def sanity_get_tests(): """ :rtype: tuple[SanityFunc] """ return SANITY_TESTS class SanitySuccess(TestSuccess): """Sanity test success.""" def __init__(self, test, python_version=None): """ :type test: str :type python_version: str """ super(SanitySuccess, self).__init__(COMMAND, test, python_version) class SanitySkipped(TestSkipped): """Sanity test skipped.""" def __init__(self, test, python_version=None): """ :type test: str :type python_version: str """ super(SanitySkipped, self).__init__(COMMAND, test, python_version) class SanityFailure(TestFailure): """Sanity test failure.""" def __init__(self, test, python_version=None, messages=None, summary=None): """ :type test: str :type python_version: str :type messages: list[SanityMessage] :type summary: unicode """ super(SanityFailure, self).__init__(COMMAND, test, python_version, messages, summary) class SanityMessage(TestMessage): """Single sanity test message for one file.""" pass class SanityTargets(object): """Sanity test target information.""" def __init__(self, include, exclude, require): """ :type include: list[str] :type exclude: list[str] :type require: list[str] """ self.all = not include self.targets = tuple(sorted(walk_sanity_targets())) self.include = walk_internal_targets(self.targets, include, exclude, require) self.include_external, self.exclude_external = walk_external_targets(self.targets, include, exclude, require) class SanityTest(ABC): """Sanity test base class.""" __metaclass__ = abc.ABCMeta def __init__(self, name): self.name = name self.enabled = True class SanityCodeSmellTest(SanityTest): """Sanity test script.""" def __init__(self, path): name = os.path.splitext(os.path.basename(path))[0] config_path = os.path.splitext(path)[0] + '.json' super(SanityCodeSmellTest, self).__init__(name) self.path = path self.config_path = config_path if os.path.exists(config_path) else None self.config = None if self.config_path: with open(self.config_path, 'r') as config_fd: self.config = json.load(config_fd) if self.config: self.enabled = not self.config.get('disabled') def test(self, args, targets): """ :type args: SanityConfig :type targets: SanityTargets :rtype: TestResult """ if self.path.endswith('.py'): cmd = [args.python_executable, self.path] else: cmd = [self.path] env = ansible_environment(args, color=False) pattern = None data = None if self.config: output = self.config.get('output') extensions = self.config.get('extensions') prefixes = self.config.get('prefixes') files = self.config.get('files') always = self.config.get('always') text = self.config.get('text') if output == 'path-line-column-message': pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$' elif output == 'path-message': pattern = '^(?P<path>[^:]*): (?P<message>.*)$' else: pattern = ApplicationError('Unsupported output type: %s' % output) paths = sorted(i.path for i in targets.include) if always: paths = [] # short-term work-around for paths being str instead of unicode on python 2.x if sys.version_info[0] == 2: paths = [p.decode('utf-8') for p in paths] if text is not None: if text: paths = [p for p in paths if not is_binary_file(p)] else: paths = [p for p in paths if is_binary_file(p)] if extensions: paths = [p for p in paths if os.path.splitext(p)[1] in extensions or (p.startswith('bin/') and '.py' in extensions)] if prefixes: paths = [p for p in paths if any(p.startswith(pre) for pre in prefixes)] if files: paths = [p for p in paths if os.path.basename(p) in files] if not paths and not always: return SanitySkipped(self.name) data = '\n'.join(paths) if data: display.info(data, verbosity=4) try: stdout, stderr = run_command(args, cmd, data=data, env=env, capture=True) status = 0 except SubprocessError as ex: stdout = ex.stdout stderr = ex.stderr status = ex.status if stdout and not stderr: if pattern: matches = [parse_to_dict(pattern, line) for line in stdout.splitlines()] messages = [SanityMessage( message=m['message'], path=m['path'], line=int(m.get('line', 0)), column=int(m.get('column', 0)), ) for m in matches] return SanityFailure(self.name, messages=messages) if stderr or status: summary = u'%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout) return SanityFailure(self.name, summary=summary) return SanitySuccess(self.name) class SanityFunc(SanityTest): """Base class for sanity test plugins.""" def __init__(self): name = self.__class__.__name__ name = re.sub(r'Test$', '', name) # drop Test suffix name = re.sub(r'(.)([A-Z][a-z]+)', r'\1-\2', name).lower() # use dashes instead of capitalization super(SanityFunc, self).__init__(name) class SanitySingleVersion(SanityFunc): """Base class for sanity test plugins which should run on a single python version.""" @abc.abstractmethod def test(self, args, targets): """ :type args: SanityConfig :type targets: SanityTargets :rtype: TestResult """ pass class SanityMultipleVersion(SanityFunc): """Base class for sanity test plugins which should run on multiple python versions.""" @abc.abstractmethod def test(self, args, targets, python_version): """ :type args: SanityConfig :type targets: SanityTargets :type python_version: str :rtype: TestResult """ pass SANITY_TESTS = ( ) def sanity_init(): """Initialize full sanity test list (includes code-smell scripts determined at runtime).""" import_plugins('sanity') sanity_plugins = {} # type: dict[str, type] load_plugins(SanityFunc, sanity_plugins) sanity_tests = tuple([plugin() for plugin in sanity_plugins.values()]) global SANITY_TESTS # pylint: disable=locally-disabled, global-statement SANITY_TESTS = tuple(sorted(sanity_tests + collect_code_smell_tests(), key=lambda k: k.name))
egafford/sahara
refs/heads/master
sahara/plugins/spark/edp_engine.py
3
# Copyright (c) 2014 Mirantis Inc. # Copyright (c) 2015 ISPRAS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import six from sahara import exceptions as ex from sahara.i18n import _ from sahara.plugins import utils as plugin_utils from sahara.service.edp.spark import engine as edp_engine class EdpEngine(edp_engine.SparkJobEngine): edp_base_version = "1.3.1" def __init__(self, cluster): super(EdpEngine, self).__init__(cluster) self.master = plugin_utils.get_instance(cluster, "master") self.plugin_params["spark-user"] = "" self.plugin_params["spark-submit"] = os.path.join( plugin_utils. get_config_value_or_default("Spark", "Spark home", self.cluster), "bin/spark-submit") self.plugin_params["deploy-mode"] = "client" port_str = six.text_type( plugin_utils.get_config_value_or_default( "Spark", "Master port", self.cluster)) self.plugin_params["master"] = ('spark://%(host)s:' + port_str) driver_cp = plugin_utils.get_config_value_or_default( "Spark", "Executor extra classpath", self.cluster) self.plugin_params["driver-class-path"] = driver_cp @staticmethod def edp_supported(version): return version >= EdpEngine.edp_base_version @staticmethod def job_type_supported(job_type): return job_type in edp_engine.SparkJobEngine.get_supported_job_types() def validate_job_execution(self, cluster, job, data): if not self.edp_supported(cluster.hadoop_version): raise ex.InvalidDataException( _('Spark {base} or higher required to run {type} jobs').format( base=EdpEngine.edp_base_version, type=job.type)) super(EdpEngine, self).validate_job_execution(cluster, job, data)
syhost/android_kernel_pantech_ef51l
refs/heads/jb-org
scripts/gcc-wrapper.py
501
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of Code Aurora nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Invoke gcc, looking for warnings, and causing a failure if there are # non-whitelisted warnings. import errno import re import os import sys import subprocess # Note that gcc uses unicode, which may depend on the locale. TODO: # force LANG to be set to en_US.UTF-8 to get consistent warnings. allowed_warnings = set([ "alignment.c:327", "mmu.c:602", "return_address.c:62", ]) # Capture the name of the object file, can find it. ofile = None warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''') def interpret_warning(line): """Decode the message from gcc. The messages we care about have a filename, and a warning""" line = line.rstrip('\n') m = warning_re.match(line) if m and m.group(2) not in allowed_warnings: print "error, forbidden warning:", m.group(2) # If there is a warning, remove any object if it exists. if ofile: try: os.remove(ofile) except OSError: pass sys.exit(1) def run_gcc(): args = sys.argv[1:] # Look for -o try: i = args.index('-o') global ofile ofile = args[i+1] except (ValueError, IndexError): pass compiler = sys.argv[0] try: proc = subprocess.Popen(args, stderr=subprocess.PIPE) for line in proc.stderr: print line, interpret_warning(line) result = proc.wait() except OSError as e: result = e.errno if result == errno.ENOENT: print args[0] + ':',e.strerror print 'Is your PATH set correctly?' else: print ' '.join(args), str(e) return result if __name__ == '__main__': status = run_gcc() sys.exit(status)
CMTaylor/robotframework-autoitlibrary
refs/heads/master
src/AutoItLibrary/Counter.py
3
""" Package: AutoItLibrary Module: Counter Purpose: Defines a Counter class from which other classes can inherit the ability to initialize a counter at class instantiation and get the next number in the sequence with a _next method. Copyright (c) 2009 Texas Instruments Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __author__ = "Martin Taylor <cmtaylor@ti.com>" __version__ = "1.0" class Counter: def __init__(self): self._counter = 0 def _next(self): self._counter += 1 return self._counter # # -------------------------------- End of file --------------------------------
patilsangram/erpnext
refs/heads/develop
erpnext/hooks.py
1
from __future__ import unicode_literals from frappe import _ app_name = "erpnext" app_title = "ERPNext" app_publisher = "Frappe Technologies Pvt. Ltd." app_description = """ERP made simple""" app_icon = "fa fa-th" app_color = "#e74c3c" app_email = "info@erpnext.com" app_license = "GNU General Public License (v3)" source_link = "https://github.com/frappe/erpnext" develop_version = '12.x.x-develop' staging_version = '11.0.3-beta.15' error_report_email = "support@erpnext.com" docs_app = "foundation" app_include_js = "assets/js/erpnext.min.js" app_include_css = "assets/css/erpnext.css" web_include_js = "assets/js/erpnext-web.min.js" web_include_css = "assets/css/erpnext-web.css" doctype_js = { "Communication": "public/js/communication.js", "Event": "public/js/event.js" } welcome_email = "erpnext.setup.utils.welcome_email" # setup wizard setup_wizard_requires = "assets/erpnext/js/setup_wizard.js" setup_wizard_stages = "erpnext.setup.setup_wizard.setup_wizard.get_setup_stages" setup_wizard_test = "erpnext.setup.setup_wizard.test_setup_wizard.run_setup_wizard_test" before_install = "erpnext.setup.install.check_setup_wizard_not_completed" after_install = "erpnext.setup.install.after_install" boot_session = "erpnext.startup.boot.boot_session" notification_config = "erpnext.startup.notifications.get_notification_config" get_help_messages = "erpnext.utilities.activation.get_help_messages" get_user_progress_slides = "erpnext.utilities.user_progress.get_user_progress_slides" update_and_get_user_progress = "erpnext.utilities.user_progress_utils.update_default_domain_actions_and_get_state" on_session_creation = "erpnext.shopping_cart.utils.set_cart_count" on_logout = "erpnext.shopping_cart.utils.clear_cart_count" treeviews = ['Account', 'Cost Center', 'Warehouse', 'Item Group', 'Customer Group', 'Sales Person', 'Territory', 'Assessment Group'] # website update_website_context = "erpnext.shopping_cart.utils.update_website_context" my_account_context = "erpnext.shopping_cart.utils.update_my_account_context" email_append_to = ["Job Applicant", "Lead", "Opportunity", "Issue"] calendars = ["Task", "Work Order", "Leave Application", "Sales Order", "Holiday List", "Course Schedule"] domains = { 'Agriculture': 'erpnext.domains.agriculture', 'Distribution': 'erpnext.domains.distribution', 'Education': 'erpnext.domains.education', 'Healthcare': 'erpnext.domains.healthcare', 'Hospitality': 'erpnext.domains.hospitality', 'Manufacturing': 'erpnext.domains.manufacturing', 'Non Profit': 'erpnext.domains.non_profit', 'Retail': 'erpnext.domains.retail', 'Services': 'erpnext.domains.services', } website_generators = ["Item Group", "Item", "BOM", "Sales Partner", "Job Opening", "Student Admission"] website_context = { "favicon": "/assets/erpnext/images/favicon.png", "splash_image": "/assets/erpnext/images/erp-icon.svg" } website_route_rules = [ {"from_route": "/orders", "to_route": "Sales Order"}, {"from_route": "/orders/<path:name>", "to_route": "order", "defaults": { "doctype": "Sales Order", "parents": [{"label": _("Orders"), "route": "orders"}] } }, {"from_route": "/invoices", "to_route": "Sales Invoice"}, {"from_route": "/invoices/<path:name>", "to_route": "order", "defaults": { "doctype": "Sales Invoice", "parents": [{"label": _("Invoices"), "route": "invoices"}] } }, {"from_route": "/supplier-quotations", "to_route": "Supplier Quotation"}, {"from_route": "/supplier-quotations/<path:name>", "to_route": "order", "defaults": { "doctype": "Supplier Quotation", "parents": [{"label": _("Supplier Quotation"), "route": "supplier-quotations"}] } }, {"from_route": "/quotations", "to_route": "Quotation"}, {"from_route": "/quotations/<path:name>", "to_route": "order", "defaults": { "doctype": "Quotation", "parents": [{"label": _("Quotations"), "route": "quotations"}] } }, {"from_route": "/shipments", "to_route": "Delivery Note"}, {"from_route": "/shipments/<path:name>", "to_route": "order", "defaults": { "doctype": "Delivery Note", "parents": [{"label": _("Shipments"), "route": "shipments"}] } }, {"from_route": "/rfq", "to_route": "Request for Quotation"}, {"from_route": "/rfq/<path:name>", "to_route": "rfq", "defaults": { "doctype": "Request for Quotation", "parents": [{"label": _("Request for Quotation"), "route": "rfq"}] } }, {"from_route": "/addresses", "to_route": "Address"}, {"from_route": "/addresses/<path:name>", "to_route": "addresses", "defaults": { "doctype": "Address", "parents": [{"label": _("Addresses"), "route": "addresses"}] } }, {"from_route": "/jobs", "to_route": "Job Opening"}, {"from_route": "/admissions", "to_route": "Student Admission"}, {"from_route": "/boms", "to_route": "BOM"}, {"from_route": "/timesheets", "to_route": "Timesheet"}, ] standard_portal_menu_items = [ {"title": _("Personal Details"), "route": "/personal-details", "reference_doctype": "Patient", "role": "Patient"}, {"title": _("Projects"), "route": "/project", "reference_doctype": "Project"}, {"title": _("Request for Quotations"), "route": "/rfq", "reference_doctype": "Request for Quotation", "role": "Supplier"}, {"title": _("Supplier Quotation"), "route": "/supplier-quotations", "reference_doctype": "Supplier Quotation", "role": "Supplier"}, {"title": _("Quotations"), "route": "/quotations", "reference_doctype": "Quotation", "role":"Customer"}, {"title": _("Orders"), "route": "/orders", "reference_doctype": "Sales Order", "role":"Customer"}, {"title": _("Invoices"), "route": "/invoices", "reference_doctype": "Sales Invoice", "role":"Customer"}, {"title": _("Shipments"), "route": "/shipments", "reference_doctype": "Delivery Note", "role":"Customer"}, {"title": _("Issues"), "route": "/issues", "reference_doctype": "Issue", "role":"Customer"}, {"title": _("Addresses"), "route": "/addresses", "reference_doctype": "Address"}, {"title": _("Timesheets"), "route": "/timesheets", "reference_doctype": "Timesheet", "role":"Customer"}, {"title": _("Timesheets"), "route": "/timesheets", "reference_doctype": "Timesheet", "role":"Customer"}, {"title": _("Lab Test"), "route": "/lab-test", "reference_doctype": "Lab Test", "role":"Patient"}, {"title": _("Prescription"), "route": "/prescription", "reference_doctype": "Patient Encounter", "role":"Patient"}, {"title": _("Patient Appointment"), "route": "/patient-appointments", "reference_doctype": "Patient Appointment", "role":"Patient"}, {"title": _("Fees"), "route": "/fees", "reference_doctype": "Fees", "role":"Student"}, {"title": _("Newsletter"), "route": "/newsletters", "reference_doctype": "Newsletter"}, {"title": _("Admission"), "route": "/admissions", "reference_doctype": "Student Admission"}, {"title": _("Certification"), "route": "/certification", "reference_doctype": "Certification Application"}, ] default_roles = [ {'role': 'Customer', 'doctype':'Contact', 'email_field': 'email_id'}, {'role': 'Supplier', 'doctype':'Contact', 'email_field': 'email_id'}, {'role': 'Student', 'doctype':'Student', 'email_field': 'student_email_id'}, ] has_website_permission = { "Sales Order": "erpnext.controllers.website_list_for_contact.has_website_permission", "Quotation": "erpnext.controllers.website_list_for_contact.has_website_permission", "Sales Invoice": "erpnext.controllers.website_list_for_contact.has_website_permission", "Supplier Quotation": "erpnext.controllers.website_list_for_contact.has_website_permission", "Delivery Note": "erpnext.controllers.website_list_for_contact.has_website_permission", "Issue": "erpnext.support.doctype.issue.issue.has_website_permission", "Timesheet": "erpnext.controllers.website_list_for_contact.has_website_permission", "Lab Test": "erpnext.healthcare.web_form.lab_test.lab_test.has_website_permission", "Patient Encounter": "erpnext.healthcare.web_form.prescription.prescription.has_website_permission", "Patient Appointment": "erpnext.healthcare.web_form.patient_appointments.patient_appointments.has_website_permission", "Patient": "erpnext.healthcare.web_form.personal_details.personal_details.has_website_permission" } dump_report_map = "erpnext.startup.report_data_map.data_map" before_tests = "erpnext.setup.utils.before_tests" standard_queries = { "Customer": "erpnext.selling.doctype.customer.customer.get_customer_list" } doc_events = { "Stock Entry": { "on_submit": "erpnext.stock.doctype.material_request.material_request.update_completed_and_requested_qty", "on_cancel": "erpnext.stock.doctype.material_request.material_request.update_completed_and_requested_qty" }, "User": { "after_insert": "frappe.contacts.doctype.contact.contact.update_contact", "validate": "erpnext.hr.doctype.employee.employee.validate_employee_role", "on_update": ["erpnext.hr.doctype.employee.employee.update_user_permissions", "erpnext.portal.utils.set_default_role"] }, ("Sales Taxes and Charges Template", 'Price List'): { "on_update": "erpnext.shopping_cart.doctype.shopping_cart_settings.shopping_cart_settings.validate_cart_settings" }, "Website Settings": { "validate": "erpnext.portal.doctype.products_settings.products_settings.home_page_is_products" }, "Sales Invoice": { "on_submit": "erpnext.regional.france.utils.create_transaction_log", "on_trash": "erpnext.regional.check_deletion_permission" }, "Payment Entry": { "on_submit": ["erpnext.regional.france.utils.create_transaction_log", "erpnext.accounts.doctype.payment_request.payment_request.make_status_as_paid"], "on_trash": "erpnext.regional.check_deletion_permission" }, 'Address': { 'validate': 'erpnext.regional.india.utils.validate_gstin_for_india' }, ('Sales Invoice', 'Purchase Invoice', 'Delivery Note'): { 'validate': 'erpnext.regional.india.utils.set_place_of_supply' }, "Contact":{ "on_trash": "erpnext.support.doctype.issue.issue.update_issue" } } scheduler_events = { "hourly": [ 'erpnext.hr.doctype.daily_work_summary_group.daily_work_summary_group.trigger_emails', "erpnext.accounts.doctype.subscription.subscription.process_all", "erpnext.erpnext_integrations.doctype.amazon_mws_settings.amazon_mws_settings.schedule_get_order_details" ], "daily": [ "erpnext.stock.reorder_item.reorder_item", "erpnext.setup.doctype.email_digest.email_digest.send", "erpnext.support.doctype.issue.issue.auto_close_tickets", "erpnext.crm.doctype.opportunity.opportunity.auto_close_opportunity", "erpnext.controllers.accounts_controller.update_invoice_status", "erpnext.accounts.doctype.fiscal_year.fiscal_year.auto_create_fiscal_year", "erpnext.hr.doctype.employee.employee.send_birthday_reminders", "erpnext.projects.doctype.task.task.set_tasks_as_overdue", "erpnext.assets.doctype.asset.depreciation.post_depreciation_entries", "erpnext.hr.doctype.daily_work_summary_group.daily_work_summary_group.send_summary", "erpnext.stock.doctype.serial_no.serial_no.update_maintenance_status", "erpnext.buying.doctype.supplier_scorecard.supplier_scorecard.refresh_scorecards", "erpnext.setup.doctype.company.company.cache_companies_monthly_sales_history", "erpnext.assets.doctype.asset.asset.update_maintenance_status", "erpnext.assets.doctype.asset.asset.make_post_gl_entry", "erpnext.crm.doctype.contract.contract.update_status_for_contracts", "erpnext.projects.doctype.project.project.update_project_sales_billing" ], "daily_long": [ "erpnext.manufacturing.doctype.bom_update_tool.bom_update_tool.update_latest_price_in_all_boms" ], "monthly": [ "erpnext.accounts.deferred_revenue.convert_deferred_revenue_to_income", "erpnext.accounts.deferred_revenue.convert_deferred_expense_to_expense", "erpnext.hr.utils.allocate_earned_leaves" ] } email_brand_image = "assets/erpnext/images/erpnext-logo.jpg" default_mail_footer = """ <span> Sent via <a class="text-muted" href="https://erpnext.com?source=via_email_footer" target="_blank"> ERPNext </a> </span> """ get_translated_dict = { ("doctype", "Global Defaults"): "frappe.geo.country_info.get_translated_dict" } bot_parsers = [ 'erpnext.utilities.bot.FindItemBot', ] get_site_info = 'erpnext.utilities.get_site_info' payment_gateway_enabled = "erpnext.accounts.utils.create_payment_gateway_account" regional_overrides = { 'France': { 'erpnext.tests.test_regional.test_method': 'erpnext.regional.france.utils.test_method' }, 'India': { 'erpnext.tests.test_regional.test_method': 'erpnext.regional.india.utils.test_method', 'erpnext.controllers.taxes_and_totals.get_itemised_tax_breakup_header': 'erpnext.regional.india.utils.get_itemised_tax_breakup_header', 'erpnext.controllers.taxes_and_totals.get_itemised_tax_breakup_data': 'erpnext.regional.india.utils.get_itemised_tax_breakup_data', 'erpnext.accounts.party.get_regional_address_details': 'erpnext.regional.india.utils.get_regional_address_details', 'erpnext.hr.utils.calculate_annual_eligible_hra_exemption': 'erpnext.regional.india.utils.calculate_annual_eligible_hra_exemption', 'erpnext.hr.utils.calculate_hra_exemption_for_period': 'erpnext.regional.india.utils.calculate_hra_exemption_for_period' }, 'United Arab Emirates': { 'erpnext.controllers.taxes_and_totals.update_itemised_tax_data': 'erpnext.regional.united_arab_emirates.utils.update_itemised_tax_data' }, 'Saudi Arabia': { 'erpnext.controllers.taxes_and_totals.update_itemised_tax_data': 'erpnext.regional.united_arab_emirates.utils.update_itemised_tax_data' } }
CSC301H-Fall2013/JuakStore
refs/heads/master
site-packages/tests/regressiontests/file_uploads/uploadhandler.py
151
""" Upload handlers to test the upload API. """ from django.core.files.uploadhandler import FileUploadHandler, StopUpload class QuotaUploadHandler(FileUploadHandler): """ This test upload handler terminates the connection if more than a quota (5MB) is uploaded. """ QUOTA = 5 * 2**20 # 5 MB def __init__(self, request=None): super(QuotaUploadHandler, self).__init__(request) self.total_upload = 0 def receive_data_chunk(self, raw_data, start): self.total_upload += len(raw_data) if self.total_upload >= self.QUOTA: raise StopUpload(connection_reset=True) return raw_data def file_complete(self, file_size): return None class CustomUploadError(Exception): pass class ErroringUploadHandler(FileUploadHandler): """A handler that raises an exception.""" def receive_data_chunk(self, raw_data, start): raise CustomUploadError("Oops!")
muffins-on-dope/bakery
refs/heads/master
bakery/auth/views.py
1
from django.core.urlresolvers import reverse from django.views.generic import TemplateView, RedirectView from django.contrib import auth from bakery.auth.models import BakeryUser class LoginErrorView(TemplateView): template_name = 'error.html' login_error = LoginErrorView.as_view() class LogoutView(RedirectView): permanent = False def get_redirect_url(self, **kwargs): auth.logout(self.request) return reverse('home') logout = LogoutView.as_view() class ProfileView(TemplateView): template_name = 'profiles/profile.html' def get_context_data(self, **kwargs): context = super(ProfileView, self).get_context_data(**kwargs) user = BakeryUser.objects.get(username=kwargs['username']) context['bakery_user'] = user return context profile = ProfileView.as_view()
Jgarcia-IAS/SAT
refs/heads/master
openerp/addons-extra/stock_foreign_trade/stock_foreign_trade.py
6
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2014 Interconsulting S.A e Innovatecsa SAS. # (<http://www.interconsulting.com.co). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class purchase_order(osv.osv): _name = "purchase.order" _inherit = "purchase.order" _columns = { 'additional_cost': fields.boolean('Costos adicionales', help="If the active field is set to True, it will not recalculete the average cost when generate the stock move."), } _defaults = { 'additional_cost': False, } # def action_picking_create(self, cr, uid, ids, context=None): # for order in self.browse(cr, uid, ids): # picking_vals = { # 'picking_type_id': order.picking_type_id.id, # 'partner_id': order.dest_address_id.id or order.partner_id.id, # 'date': max([l.date_planned for l in order.order_line]), # 'origin': order.name, # 'additional_cost': order.additional_cost, # 'move_id': order.id, # } # picking_id = self.pool.get('stock.picking').create(cr, uid, picking_vals, context=context) # self._create_stock_moves(cr, uid, order, order.order_line, picking_id, context=context) # # return super(purchase_order, self).action_picking_create(self, cr, uid, ids,context=None) class stock_move(osv.osv): _name = "stock.move" _inherit = "stock.move" _columns = { 'additional_cost': fields.boolean('Costos adicionales', help="If the active field is set to True, it will not recalculete the average cost when generate the stock move."), } _defaults = { 'additional_cost': False, } class stock_landed_cost(osv.osv): _name = 'stock.landed.cost' _description = 'Stock Landed Cost' _inherit = 'stock.landed.cost' class stock_picking(osv.osv): _inherit = 'stock.picking' _columns = { 'move_id': fields.many2one('stock.move','move_id', help='move_id'), 'additional_cost': fields.boolean('Costos adicionales', help="If the active field is set to True, it will not recalculete the average cost when generate the stock move."), } _defaults = { 'additional_cost': False, }
Novasoft-India/OperERP-AM-Motors
refs/heads/master
openerp/addons/account/res_config.py
7
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time import datetime from dateutil.relativedelta import relativedelta from operator import itemgetter from os.path import join as opj from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF from openerp.tools.translate import _ from openerp.osv import fields, osv from openerp import tools class account_config_settings(osv.osv_memory): _name = 'account.config.settings' _inherit = 'res.config.settings' _columns = { 'company_id': fields.many2one('res.company', 'Company', required=True), 'has_default_company': fields.boolean('Has default company', readonly=True), 'expects_chart_of_accounts': fields.related('company_id', 'expects_chart_of_accounts', type='boolean', string='This company has its own chart of accounts', help="""Check this box if this company is a legal entity."""), 'currency_id': fields.related('company_id', 'currency_id', type='many2one', relation='res.currency', required=True, string='Default company currency', help="Main currency of the company."), 'paypal_account': fields.related('company_id', 'paypal_account', type='char', size=128, string='Paypal account', help="Paypal account (email) for receiving online payments (credit card, etc.) If you set a paypal account, the customer will be able to pay your invoices or quotations with a button \"Pay with Paypal\" in automated emails or through the OpenERP portal."), 'company_footer': fields.related('company_id', 'rml_footer', type='text', readonly=True, string='Bank accounts footer preview', help="Bank accounts as printed in the footer of each printed document"), 'has_chart_of_accounts': fields.boolean('Company has a chart of accounts'), 'chart_template_id': fields.many2one('account.chart.template', 'Template', domain="[('visible','=', True)]"), 'code_digits': fields.integer('# of Digits', help="No. of digits to use for account code"), 'tax_calculation_rounding_method': fields.related('company_id', 'tax_calculation_rounding_method', type='selection', selection=[ ('round_per_line', 'Round per line'), ('round_globally', 'Round globally'), ], string='Tax calculation rounding method', help="If you select 'Round per line' : for each tax, the tax amount will first be computed and rounded for each PO/SO/invoice line and then these rounded amounts will be summed, leading to the total amount for that tax. If you select 'Round globally': for each tax, the tax amount will be computed for each PO/SO/invoice line, then these amounts will be summed and eventually this total tax amount will be rounded. If you sell with tax included, you should choose 'Round per line' because you certainly want the sum of your tax-included line subtotals to be equal to the total amount with taxes."), 'sale_tax': fields.many2one("account.tax.template", "Default sale tax"), 'purchase_tax': fields.many2one("account.tax.template", "Default purchase tax"), 'sale_tax_rate': fields.float('Sales tax (%)'), 'purchase_tax_rate': fields.float('Purchase tax (%)'), 'complete_tax_set': fields.boolean('Complete set of taxes', help='This boolean helps you to choose if you want to propose to the user to encode the sales and purchase rates or use the usual m2o fields. This last choice assumes that the set of tax defined for the chosen template is complete'), 'has_fiscal_year': fields.boolean('Company has a fiscal year'), 'date_start': fields.date('Start date', required=True), 'date_stop': fields.date('End date', required=True), 'period': fields.selection([('month', 'Monthly'), ('3months','3 Monthly')], 'Periods', required=True), 'sale_journal_id': fields.many2one('account.journal', 'Sale journal'), 'sale_sequence_prefix': fields.related('sale_journal_id', 'sequence_id', 'prefix', type='char', string='Invoice sequence'), 'sale_sequence_next': fields.related('sale_journal_id', 'sequence_id', 'number_next', type='integer', string='Next invoice number'), 'sale_refund_journal_id': fields.many2one('account.journal', 'Sale refund journal'), 'sale_refund_sequence_prefix': fields.related('sale_refund_journal_id', 'sequence_id', 'prefix', type='char', string='Credit note sequence'), 'sale_refund_sequence_next': fields.related('sale_refund_journal_id', 'sequence_id', 'number_next', type='integer', string='Next credit note number'), 'purchase_journal_id': fields.many2one('account.journal', 'Purchase journal'), 'purchase_sequence_prefix': fields.related('purchase_journal_id', 'sequence_id', 'prefix', type='char', string='Supplier invoice sequence'), 'purchase_sequence_next': fields.related('purchase_journal_id', 'sequence_id', 'number_next', type='integer', string='Next supplier invoice number'), 'purchase_refund_journal_id': fields.many2one('account.journal', 'Purchase refund journal'), 'purchase_refund_sequence_prefix': fields.related('purchase_refund_journal_id', 'sequence_id', 'prefix', type='char', string='Supplier credit note sequence'), 'purchase_refund_sequence_next': fields.related('purchase_refund_journal_id', 'sequence_id', 'number_next', type='integer', string='Next supplier credit note number'), 'module_account_check_writing': fields.boolean('Pay your suppliers by check', help="""This allows you to check writing and printing. This installs the module account_check_writing."""), 'module_account_accountant': fields.boolean('Full accounting features: journals, legal statements, chart of accounts, etc.', help="""If you do not check this box, you will be able to do invoicing & payments, but not accounting (Journal Items, Chart of Accounts, ...)"""), 'module_account_asset': fields.boolean('Assets management', help="""This allows you to manage the assets owned by a company or a person. It keeps track of the depreciation occurred on those assets, and creates account move for those depreciation lines. This installs the module account_asset. If you do not check this box, you will be able to do invoicing & payments, but not accounting (Journal Items, Chart of Accounts, ...)"""), 'module_account_budget': fields.boolean('Budget management', help="""This allows accountants to manage analytic and crossovered budgets. Once the master budgets and the budgets are defined, the project managers can set the planned amount on each analytic account. This installs the module account_budget."""), 'module_account_payment': fields.boolean('Manage payment orders', help="""This allows you to create and manage your payment orders, with purposes to * serve as base for an easy plug-in of various automated payment mechanisms, and * provide a more efficient way to manage invoice payments. This installs the module account_payment."""), 'module_account_voucher': fields.boolean('Manage customer payments', help="""This includes all the basic requirements of voucher entries for bank, cash, sales, purchase, expense, contra, etc. This installs the module account_voucher."""), 'module_account_followup': fields.boolean('Manage customer payment follow-ups', help="""This allows to automate letters for unpaid invoices, with multi-level recalls. This installs the module account_followup."""), 'group_proforma_invoices': fields.boolean('Allow pro-forma invoices', implied_group='account.group_proforma_invoices', help="Allows you to put invoices in pro-forma state."), 'default_sale_tax': fields.many2one('account.tax', 'Default sale tax', help="This sale tax will be assigned by default on new products."), 'default_purchase_tax': fields.many2one('account.tax', 'Default purchase tax', help="This purchase tax will be assigned by default on new products."), 'decimal_precision': fields.integer('Decimal precision on journal entries', help="""As an example, a decimal precision of 2 will allow journal entries like: 9.99 EUR, whereas a decimal precision of 4 will allow journal entries like: 0.0231 EUR."""), 'group_multi_currency': fields.boolean('Allow multi currencies', implied_group='base.group_multi_currency', help="Allows you multi currency environment"), 'group_analytic_accounting': fields.boolean('Analytic accounting', implied_group='analytic.group_analytic_accounting', help="Allows you to use the analytic accounting."), 'group_check_supplier_invoice_total': fields.boolean('Check the total of supplier invoices', implied_group="account.group_supplier_inv_check_total"), } def _default_company(self, cr, uid, context=None): user = self.pool.get('res.users').browse(cr, uid, uid, context=context) return user.company_id.id def _default_has_default_company(self, cr, uid, context=None): count = self.pool.get('res.company').search_count(cr, uid, [], context=context) return bool(count == 1) def _get_default_fiscalyear_data(self, cr, uid, company_id, context=None): """Compute default period, starting and ending date for fiscalyear - if in a fiscal year, use its period, starting and ending date - if past fiscal year, use its period, and new dates [ending date of the latest +1 day ; ending date of the latest +1 year] - if no fiscal year, use monthly, 1st jan, 31th dec of this year :return: (date_start, date_stop, period) at format DEFAULT_SERVER_DATETIME_FORMAT """ fiscalyear_ids = self.pool.get('account.fiscalyear').search(cr, uid, [('date_start', '<=', time.strftime(DF)), ('date_stop', '>=', time.strftime(DF)), ('company_id', '=', company_id)]) if fiscalyear_ids: # is in a current fiscal year, use this one fiscalyear = self.pool.get('account.fiscalyear').browse(cr, uid, fiscalyear_ids[0], context=context) if len(fiscalyear.period_ids) == 5: # 4 periods of 3 months + opening period period = '3months' else: period = 'month' return (fiscalyear.date_start, fiscalyear.date_stop, period) else: past_fiscalyear_ids = self.pool.get('account.fiscalyear').search(cr, uid, [('date_stop', '<=', time.strftime(DF)), ('company_id', '=', company_id)]) if past_fiscalyear_ids: # use the latest fiscal, sorted by (start_date, id) latest_year = self.pool.get('account.fiscalyear').browse(cr, uid, past_fiscalyear_ids[-1], context=context) latest_stop = datetime.datetime.strptime(latest_year.date_stop, DF) if len(latest_year.period_ids) == 5: period = '3months' else: period = 'month' return ((latest_stop+datetime.timedelta(days=1)).strftime(DF), latest_stop.replace(year=latest_stop.year+1).strftime(DF), period) else: return (time.strftime('%Y-01-01'), time.strftime('%Y-12-31'), 'month') _defaults = { 'company_id': _default_company, 'has_default_company': _default_has_default_company, } def create(self, cr, uid, values, context=None): id = super(account_config_settings, self).create(cr, uid, values, context) # Hack: to avoid some nasty bug, related fields are not written upon record creation. # Hence we write on those fields here. vals = {} for fname, field in self._columns.iteritems(): if isinstance(field, fields.related) and fname in values: vals[fname] = values[fname] self.write(cr, uid, [id], vals, context) return id def onchange_company_id(self, cr, uid, ids, company_id, context=None): # update related fields values = {} values['currency_id'] = False if company_id: company = self.pool.get('res.company').browse(cr, uid, company_id, context=context) has_chart_of_accounts = company_id not in self.pool.get('account.installer').get_unconfigured_cmp(cr, uid) fiscalyear_count = self.pool.get('account.fiscalyear').search_count(cr, uid, [('date_start', '<=', time.strftime('%Y-%m-%d')), ('date_stop', '>=', time.strftime('%Y-%m-%d')), ('company_id', '=', company_id)]) date_start, date_stop, period = self._get_default_fiscalyear_data(cr, uid, company_id, context=context) values = { 'expects_chart_of_accounts': company.expects_chart_of_accounts, 'currency_id': company.currency_id.id, 'paypal_account': company.paypal_account, 'company_footer': company.rml_footer, 'has_chart_of_accounts': has_chart_of_accounts, 'has_fiscal_year': bool(fiscalyear_count), 'chart_template_id': False, 'tax_calculation_rounding_method': company.tax_calculation_rounding_method, 'date_start': date_start, 'date_stop': date_stop, 'period': period, } # update journals and sequences for journal_type in ('sale', 'sale_refund', 'purchase', 'purchase_refund'): for suffix in ('_journal_id', '_sequence_prefix', '_sequence_next'): values[journal_type + suffix] = False journal_obj = self.pool.get('account.journal') journal_ids = journal_obj.search(cr, uid, [('company_id', '=', company_id)]) for journal in journal_obj.browse(cr, uid, journal_ids): if journal.type in ('sale', 'sale_refund', 'purchase', 'purchase_refund'): values.update({ journal.type + '_journal_id': journal.id, journal.type + '_sequence_prefix': journal.sequence_id.prefix, journal.type + '_sequence_next': journal.sequence_id.number_next, }) # update taxes ir_values = self.pool.get('ir.values') taxes_id = ir_values.get_default(cr, uid, 'product.product', 'taxes_id', company_id=company_id) supplier_taxes_id = ir_values.get_default(cr, uid, 'product.product', 'supplier_taxes_id', company_id=company_id) values.update({ 'default_sale_tax': isinstance(taxes_id, list) and taxes_id[0] or taxes_id, 'default_purchase_tax': isinstance(supplier_taxes_id, list) and supplier_taxes_id[0] or supplier_taxes_id, }) return {'value': values} def onchange_chart_template_id(self, cr, uid, ids, chart_template_id, context=None): tax_templ_obj = self.pool.get('account.tax.template') res = {'value': { 'complete_tax_set': False, 'sale_tax': False, 'purchase_tax': False, 'sale_tax_rate': 15, 'purchase_tax_rate': 15, }} if chart_template_id: # update complete_tax_set, sale_tax and purchase_tax chart_template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context) res['value'].update({'complete_tax_set': chart_template.complete_tax_set}) if chart_template.complete_tax_set: # default tax is given by the lowest sequence. For same sequence we will take the latest created as it will be the case for tax created while isntalling the generic chart of account sale_tax_ids = tax_templ_obj.search(cr, uid, [("chart_template_id", "=", chart_template_id), ('type_tax_use', 'in', ('sale','all'))], order="sequence, id desc") purchase_tax_ids = tax_templ_obj.search(cr, uid, [("chart_template_id", "=", chart_template_id), ('type_tax_use', 'in', ('purchase','all'))], order="sequence, id desc") res['value']['sale_tax'] = sale_tax_ids and sale_tax_ids[0] or False res['value']['purchase_tax'] = purchase_tax_ids and purchase_tax_ids[0] or False if chart_template.code_digits: res['value']['code_digits'] = chart_template.code_digits return res def onchange_tax_rate(self, cr, uid, ids, rate, context=None): return {'value': {'purchase_tax_rate': rate or False}} def onchange_start_date(self, cr, uid, id, start_date): if start_date: start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d") end_date = (start_date + relativedelta(months=12)) - relativedelta(days=1) return {'value': {'date_stop': end_date.strftime('%Y-%m-%d')}} return {} def open_company_form(self, cr, uid, ids, context=None): config = self.browse(cr, uid, ids[0], context) return { 'type': 'ir.actions.act_window', 'name': 'Configure your Company', 'res_model': 'res.company', 'res_id': config.company_id.id, 'view_mode': 'form', } def set_default_taxes(self, cr, uid, ids, context=None): """ set default sale and purchase taxes for products """ ir_values = self.pool.get('ir.values') config = self.browse(cr, uid, ids[0], context) ir_values.set_default(cr, uid, 'product.product', 'taxes_id', config.default_sale_tax and [config.default_sale_tax.id] or False, company_id=config.company_id.id) ir_values.set_default(cr, uid, 'product.product', 'supplier_taxes_id', config.default_purchase_tax and [config.default_purchase_tax.id] or False, company_id=config.company_id.id) def set_chart_of_accounts(self, cr, uid, ids, context=None): """ install a chart of accounts for the given company (if required) """ config = self.browse(cr, uid, ids[0], context) if config.chart_template_id: assert config.expects_chart_of_accounts and not config.has_chart_of_accounts wizard = self.pool.get('wizard.multi.charts.accounts') wizard_id = wizard.create(cr, uid, { 'company_id': config.company_id.id, 'chart_template_id': config.chart_template_id.id, 'code_digits': config.code_digits or 6, 'sale_tax': config.sale_tax.id, 'purchase_tax': config.purchase_tax.id, 'sale_tax_rate': config.sale_tax_rate, 'purchase_tax_rate': config.purchase_tax_rate, 'complete_tax_set': config.complete_tax_set, 'currency_id': config.currency_id.id, }, context) wizard.execute(cr, uid, [wizard_id], context) def set_fiscalyear(self, cr, uid, ids, context=None): """ create a fiscal year for the given company (if necessary) """ config = self.browse(cr, uid, ids[0], context) if config.has_chart_of_accounts or config.chart_template_id: fiscalyear = self.pool.get('account.fiscalyear') fiscalyear_count = fiscalyear.search_count(cr, uid, [('date_start', '<=', config.date_start), ('date_stop', '>=', config.date_stop), ('company_id', '=', config.company_id.id)], context=context) if not fiscalyear_count: name = code = config.date_start[:4] if int(name) != int(config.date_stop[:4]): name = config.date_start[:4] +'-'+ config.date_stop[:4] code = config.date_start[2:4] +'-'+ config.date_stop[2:4] vals = { 'name': name, 'code': code, 'date_start': config.date_start, 'date_stop': config.date_stop, 'company_id': config.company_id.id, } fiscalyear_id = fiscalyear.create(cr, uid, vals, context=context) if config.period == 'month': fiscalyear.create_period(cr, uid, [fiscalyear_id]) elif config.period == '3months': fiscalyear.create_period3(cr, uid, [fiscalyear_id]) def get_default_dp(self, cr, uid, fields, context=None): dp = self.pool.get('ir.model.data').get_object(cr, uid, 'product','decimal_account') return {'decimal_precision': dp.digits} def set_default_dp(self, cr, uid, ids, context=None): config = self.browse(cr, uid, ids[0], context) dp = self.pool.get('ir.model.data').get_object(cr, uid, 'product','decimal_account') dp.write({'digits': config.decimal_precision}) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
bvermeer/WTH_Hackathon_Singapore_2017
refs/heads/master
Examples/PyGame_Examples/PyGame_Ball.py
1
#!/usr/bin/python2.7 ''' Description: This is a simple GUI app which creates a bouncing ball animation. Created: September 15, 2017 Author: Blake Vermeer ''' import sys, pygame, os os.putenv('DISPLAY', ':0') pygame.init() size = width, height = 320, 240 speed = [1, 1] black = 0, 0, 0 screen = pygame.display.set_mode(size, pygame.FULLSCREEN) # Be careful with this line! The touchscreen doesn't work correctly when hiding the mouse! pygame.mouse.set_visible(False) ball = pygame.image.load("./Images/ball.png") ballrect = ball.get_rect() while 1: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() ballrect = ballrect.move(speed) if ballrect.left < 0 or ballrect.right > width: speed[0] = -speed[0] if ballrect.top < 0 or ballrect.bottom > height: speed[1] = -speed[1] screen.fill(black) screen.blit(ball, ballrect) pygame.display.flip()
mrnamingo/enigma2-test
refs/heads/master
lib/python/Screens/LanguageSelection.py
1
from Screens.Screen import Screen from Screens.MessageBox import MessageBox from Components.ActionMap import ActionMap from Components.Language import language from Components.config import config from Components.Sources.List import List from Components.Label import Label from Components.Sources.StaticText import StaticText from Components.Pixmap import Pixmap from Components.Language_cache import LANG_TEXT from enigma import eTimer from Screens.Rc import Rc from Tools.Directories import resolveFilename, SCOPE_ACTIVE_SKIN from Tools.LoadPixmap import LoadPixmap import gettext inWizzard = False def LanguageEntryComponent(file, name, index): png = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "countries/" + index + ".png")) if png is None: png = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "countries/" + file + ".png")) if png is None: png = LoadPixmap(resolveFilename(SCOPE_ACTIVE_SKIN, "countries/missing.png")) res = (index, name, png) return res def _cached(x): return LANG_TEXT.get(config.osd.language.value, {}).get(x, "") class LanguageSelection(Screen): def __init__(self, session): Screen.__init__(self, session) self.setTitle(_("Setup Language")) language.InitLang() self.oldActiveLanguage = language.getActiveLanguage() self.catalog = language.getActiveCatalog() self.list = [] # self["flag"] = Pixmap() self["summarylangname"] = StaticText() self["summarylangsel"] = StaticText() self["languages"] = List(self.list) self["languages"].onSelectionChanged.append(self.changed) self.updateList() self.onLayoutFinish.append(self.selectActiveLanguage) self["key_red"] = Label(_("Cancel")) self["key_green"] = Label(_("Save")) self["key_yellow"] = Label(_("Update Cache")) self["key_blue"] = Label(_("Delete Language")) self["actions"] = ActionMap(["SetupActions", "ColorActions"], { "ok": self.save, "cancel": self.cancel, "red": self.cancel, "green": self.save, "yellow": self.updateCache, "blue": self.delLang, }, -1) def updateCache(self): print"updateCache" self["languages"].setList([('update cache','Updating cache, please wait...',None)]) self.updateTimer = eTimer() self.updateTimer.callback.append(self.startupdateCache) self.updateTimer.start(100) def startupdateCache(self): self.updateTimer.stop() language.updateLanguageCache() self["languages"].setList(self.list) self.selectActiveLanguage() def selectActiveLanguage(self): activeLanguage = language.getActiveLanguage() pos = 0 for x in self.list: if x[0] == activeLanguage: self["languages"].index = pos break pos += 0 def save(self): self.run() global inWizzard if inWizzard: inWizzard = False self.session.openWithCallback(self.deletelanguagesCB, MessageBox, _("Do you want to delete all other languages?"), default = False) else: self.close(self.oldActiveLanguage != config.osd.language.value) def deletelanguagesCB(self, anwser): if anwser: language.delLanguage() self.close() def cancel(self): language.activateLanguage(self.oldActiveLanguage) config.osd.language.setValue(self.oldActiveLanguage) config.osd.language.save() self.close() def delLang(self): curlang = config.osd.language.value lang = curlang languageList = language.getLanguageListSelection() for t in languageList: if curlang == t[0]: lang = t[1] break self.session.openWithCallback(self.delLangCB, MessageBox, _("Do you want to delete all other languages?") + _(" Except %s") %(lang), default = False) def delLangCB(self, anwser): if anwser: language.delLanguage() language.activateLanguage(self.oldActiveLanguage) self.updateList() self.selectActiveLanguage() def run(self, justlocal = False): print "updating language..." lang = self["languages"].getCurrent()[0] if lang == 'update cache': self.setTitle("Updating cache") self["summarylangname"].setText("Updating cache") return if lang != config.osd.language.value: config.osd.language.setValue(lang) config.osd.language.save() self.setTitle(_cached("T2")) self["summarylangname"].setText(_cached("T2")) self["summarylangsel"].setText(self["languages"].getCurrent()[1]) self["key_red"].setText(_cached("T3")) self["key_green"].setText(_cached("T4")) # index = self["languages"].getCurrent()[2] # print 'INDEX:',index # self["flag"].instance.setPixmap(self["languages"].getCurrent()[2]) if justlocal: return language.activateLanguage(lang) config.misc.languageselected.value = 0 config.misc.languageselected.save() print "ok" def updateList(self): languageList = language.getLanguageList() if not languageList: # no language available => display only english list = [ LanguageEntryComponent("en", "English (US)", "en_US") ] else: list = [ LanguageEntryComponent(file = x[1][2].lower(), name = x[1][0], index = x[0]) for x in languageList] self.list = list self["languages"].list = list def changed(self): self.run(justlocal = True) class LanguageWizard(LanguageSelection, Rc): def __init__(self, session): LanguageSelection.__init__(self, session) Rc.__init__(self) global inWizzard inWizzard = True self.onLayoutFinish.append(self.selectKeys) self["wizard"] = Pixmap() self["summarytext"] = StaticText() self["text"] = Label() self.setText() def selectKeys(self): self.clearSelectedKeys() self.selectKey("UP") self.selectKey("DOWN") def changed(self): self.run(justlocal = True) self.setText() def setText(self): self["text"].setText(_cached("T1")) self["summarytext"].setText(_cached("T1")) def createSummary(self): return LanguageWizardSummary class LanguageWizardSummary(Screen): def __init__(self, session, parent): Screen.__init__(self, session, parent)
ofgulban/scikit-image
refs/heads/ncut-rag-options
skimage/morphology/_skeletonize.py
28
""" Algorithms for computing the skeleton of a binary image """ import numpy as np from scipy import ndimage as ndi from ._skeletonize_cy import _fast_skeletonize, _skeletonize_loop, _table_lookup_index # --------- Skeletonization by morphological thinning --------- def skeletonize(image): """Return the skeleton of a binary image. Thinning is used to reduce each connected component in a binary image to a single-pixel wide skeleton. Parameters ---------- image : numpy.ndarray A binary image containing the objects to be skeletonized. '1' represents foreground, and '0' represents background. It also accepts arrays of boolean values where True is foreground. Returns ------- skeleton : ndarray A matrix containing the thinned image. See also -------- medial_axis Notes ----- The algorithm [1]_ works by making successive passes of the image, removing pixels on object borders. This continues until no more pixels can be removed. The image is correlated with a mask that assigns each pixel a number in the range [0...255] corresponding to each possible pattern of its 8 neighbouring pixels. A look up table is then used to assign the pixels a value of 0, 1, 2 or 3, which are selectively removed during the iterations. Note that this algorithm will give different results than a medial axis transform, which is also often referred to as "skeletonization". References ---------- .. [1] A fast parallel algorithm for thinning digital patterns, T. Y. Zhang and C. Y. Suen, Communications of the ACM, March 1984, Volume 27, Number 3. Examples -------- >>> X, Y = np.ogrid[0:9, 0:9] >>> ellipse = (1./3 * (X - 4)**2 + (Y - 4)**2 < 3**2).astype(np.uint8) >>> ellipse array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0]], dtype=uint8) >>> skel = skeletonize(ellipse) >>> skel.astype(np.uint8) array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8) """ # convert to unsigned int (this should work for boolean values) image = image.astype(np.uint8) # check some properties of the input image: # - 2D # - binary image with only 0's and 1's if image.ndim != 2: raise ValueError('Skeletonize requires a 2D array') if not np.all(np.in1d(image.flat, (0, 1))): raise ValueError('Image contains values other than 0 and 1') return _fast_skeletonize(image) # --------- Skeletonization by medial axis transform -------- _eight_connect = ndi.generate_binary_structure(2, 2) def medial_axis(image, mask=None, return_distance=False): """ Compute the medial axis transform of a binary image Parameters ---------- image : binary ndarray, shape (M, N) The image of the shape to be skeletonized. mask : binary ndarray, shape (M, N), optional If a mask is given, only those elements in `image` with a true value in `mask` are used for computing the medial axis. return_distance : bool, optional If true, the distance transform is returned as well as the skeleton. Returns ------- out : ndarray of bools Medial axis transform of the image dist : ndarray of ints, optional Distance transform of the image (only returned if `return_distance` is True) See also -------- skeletonize Notes ----- This algorithm computes the medial axis transform of an image as the ridges of its distance transform. The different steps of the algorithm are as follows * A lookup table is used, that assigns 0 or 1 to each configuration of the 3x3 binary square, whether the central pixel should be removed or kept. We want a point to be removed if it has more than one neighbor and if removing it does not change the number of connected components. * The distance transform to the background is computed, as well as the cornerness of the pixel. * The foreground (value of 1) points are ordered by the distance transform, then the cornerness. * A cython function is called to reduce the image to its skeleton. It processes pixels in the order determined at the previous step, and removes or maintains a pixel according to the lookup table. Because of the ordering, it is possible to process all pixels in only one pass. Examples -------- >>> square = np.zeros((7, 7), dtype=np.uint8) >>> square[1:-1, 2:-2] = 1 >>> square array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0]], dtype=uint8) >>> medial_axis(square).astype(np.uint8) array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0]], dtype=uint8) """ global _eight_connect if mask is None: masked_image = image.astype(np.bool) else: masked_image = image.astype(bool).copy() masked_image[~mask] = False # # Build lookup table - three conditions # 1. Keep only positive pixels (center_is_foreground array). # AND # 2. Keep if removing the pixel results in a different connectivity # (if the number of connected components is different with and # without the central pixel) # OR # 3. Keep if # pixels in neighbourhood is 2 or less # Note that table is independent of image center_is_foreground = (np.arange(512) & 2**4).astype(bool) table = (center_is_foreground # condition 1. & (np.array([ndi.label(_pattern_of(index), _eight_connect)[1] != ndi.label(_pattern_of(index & ~ 2**4), _eight_connect)[1] for index in range(512)]) # condition 2 | np.array([np.sum(_pattern_of(index)) < 3 for index in range(512)])) # condition 3 ) # Build distance transform distance = ndi.distance_transform_edt(masked_image) if return_distance: store_distance = distance.copy() # Corners # The processing order along the edge is critical to the shape of the # resulting skeleton: if you process a corner first, that corner will # be eroded and the skeleton will miss the arm from that corner. Pixels # with fewer neighbors are more "cornery" and should be processed last. # We use a cornerness_table lookup table where the score of a # configuration is the number of background (0-value) pixels in the # 3x3 neighbourhood cornerness_table = np.array([9 - np.sum(_pattern_of(index)) for index in range(512)]) corner_score = _table_lookup(masked_image, cornerness_table) # Define arrays for inner loop i, j = np.mgrid[0:image.shape[0], 0:image.shape[1]] result = masked_image.copy() distance = distance[result] i = np.ascontiguousarray(i[result], dtype=np.intp) j = np.ascontiguousarray(j[result], dtype=np.intp) result = np.ascontiguousarray(result, np.uint8) # Determine the order in which pixels are processed. # We use a random # for tiebreaking. Assign each pixel in the image a # predictable, random # so that masking doesn't affect arbitrary choices # of skeletons # generator = np.random.RandomState(0) tiebreaker = generator.permutation(np.arange(masked_image.sum())) order = np.lexsort((tiebreaker, corner_score[masked_image], distance)) order = np.ascontiguousarray(order, dtype=np.int32) table = np.ascontiguousarray(table, dtype=np.uint8) # Remove pixels not belonging to the medial axis _skeletonize_loop(result, i, j, order, table) result = result.astype(bool) if not mask is None: result[~mask] = image[~mask] if return_distance: return result, store_distance else: return result def _pattern_of(index): """ Return the pattern represented by an index value Byte decomposition of index """ return np.array([[index & 2**0, index & 2**1, index & 2**2], [index & 2**3, index & 2**4, index & 2**5], [index & 2**6, index & 2**7, index & 2**8]], bool) def _table_lookup(image, table): """ Perform a morphological transform on an image, directed by its neighbors Parameters ---------- image : ndarray A binary image table : ndarray A 512-element table giving the transform of each pixel given the values of that pixel and its 8-connected neighbors. border_value : bool The value of pixels beyond the border of the image. Returns ------- result : ndarray of same shape as `image` Transformed image Notes ----- The pixels are numbered like this:: 0 1 2 3 4 5 6 7 8 The index at a pixel is the sum of 2**<pixel-number> for pixels that evaluate to true. """ # # We accumulate into the indexer to get the index into the table # at each point in the image # if image.shape[0] < 3 or image.shape[1] < 3: image = image.astype(bool) indexer = np.zeros(image.shape, int) indexer[1:, 1:] += image[:-1, :-1] * 2**0 indexer[1:, :] += image[:-1, :] * 2**1 indexer[1:, :-1] += image[:-1, 1:] * 2**2 indexer[:, 1:] += image[:, :-1] * 2**3 indexer[:, :] += image[:, :] * 2**4 indexer[:, :-1] += image[:, 1:] * 2**5 indexer[:-1, 1:] += image[1:, :-1] * 2**6 indexer[:-1, :] += image[1:, :] * 2**7 indexer[:-1, :-1] += image[1:, 1:] * 2**8 else: indexer = _table_lookup_index(np.ascontiguousarray(image, np.uint8)) image = table[indexer] return image
openstack/nova
refs/heads/master
nova/tests/functional/db/test_security_group.py
9
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import context from nova.db.sqlalchemy import api as db_api from nova import objects from nova import test class SecurityGroupObjectTestCase(test.TestCase): def setUp(self): super(SecurityGroupObjectTestCase, self).setUp() self.context = context.RequestContext('fake-user', 'fake-project') def _create_group(self, **values): defaults = {'project_id': self.context.project_id, 'user_id': self.context.user_id, 'name': 'foogroup', 'description': 'foodescription'} defaults.update(values) db_api.security_group_create(self.context, defaults) def test_get_counts(self): # _create_group() creates a group with project_id and user_id from # self.context by default self._create_group(name='a') self._create_group(name='b', project_id='foo') self._create_group(name='c', user_id='bar') # Count only across a project counts = objects.SecurityGroupList.get_counts(self.context, 'foo') self.assertEqual(1, counts['project']['security_groups']) self.assertNotIn('user', counts) # Count across a project and a user counts = objects.SecurityGroupList.get_counts( self.context, self.context.project_id, user_id=self.context.user_id) self.assertEqual(2, counts['project']['security_groups']) self.assertEqual(1, counts['user']['security_groups'])
amitjamadagni/sympy
refs/heads/master
doc/ext/numpydoc.py
66
""" ======== numpydoc ======== Sphinx extension that handles docstrings in the Numpy standard format. [1] It will: - Convert Parameters etc. sections to field lists. - Convert See Also section to a See also entry. - Renumber references. - Extract the signature from the docstring, if it can't be determined otherwise. .. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt """ import sphinx if sphinx.__version__ < '1.0.1': raise RuntimeError("Sphinx 1.0.1 or newer is required") import os import re import pydoc from docscrape_sphinx import get_doc_object, SphinxDocString from sphinx.util.compat import Directive import inspect def mangle_docstrings(app, what, name, obj, options, lines, reference_offset=[0]): cfg = dict(use_plots=app.config.numpydoc_use_plots, show_class_members=app.config.numpydoc_show_class_members) if what == 'module': # Strip top title title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', re.I | re.S) lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n") else: doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg) lines[:] = unicode(doc).split(u"\n") if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ obj.__name__: if hasattr(obj, '__module__'): v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__)) else: v = dict(full_name=obj.__name__) lines += [u'', u'.. htmlonly::', ''] lines += [u' %s' % x for x in (app.config.numpydoc_edit_link % v).split("\n")] # replace reference numbers so that there are no duplicates references = [] for line in lines: line = line.strip() m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I) if m: references.append(m.group(1)) # start renaming from the longest string, to avoid overwriting parts references.sort(key=lambda x: -len(x)) if references: for i, line in enumerate(lines): for r in references: if re.match(ur'^\d+$', r): new_r = u"R%d" % (reference_offset[0] + int(r)) else: new_r = u"%s%d" % (r, reference_offset[0]) lines[i] = lines[i].replace(u'[%s]_' % r, u'[%s]_' % new_r) lines[i] = lines[i].replace(u'.. [%s]' % r, u'.. [%s]' % new_r) reference_offset[0] += len(references) def mangle_signature(app, what, name, obj, options, sig, retann): # Do not try to inspect classes that don't define `__init__` if (inspect.isclass(obj) and (not hasattr(obj, '__init__') or 'initializes x; see ' in pydoc.getdoc(obj.__init__))): return '', '' if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return if not hasattr(obj, '__doc__'): return doc = SphinxDocString(pydoc.getdoc(obj)) if doc['Signature']: sig = re.sub(u"^[^(]*", u"", doc['Signature']) return sig, u'' def setup(app, get_doc_object_=get_doc_object): global get_doc_object get_doc_object = get_doc_object_ app.connect('autodoc-process-docstring', mangle_docstrings) app.connect('autodoc-process-signature', mangle_signature) app.add_config_value('numpydoc_edit_link', None, False) app.add_config_value('numpydoc_use_plots', None, False) app.add_config_value('numpydoc_show_class_members', True, True) # Extra mangling domains app.add_domain(NumpyPythonDomain) app.add_domain(NumpyCDomain) #------------------------------------------------------------------------------ # Docstring-mangling domains #------------------------------------------------------------------------------ from docutils.statemachine import ViewList from sphinx.domains.c import CDomain from sphinx.domains.python import PythonDomain class ManglingDomainBase(object): directive_mangling_map = {} def __init__(self, *a, **kw): super(ManglingDomainBase, self).__init__(*a, **kw) self.wrap_mangling_directives() def wrap_mangling_directives(self): for name, objtype in self.directive_mangling_map.items(): self.directives[name] = wrap_mangling_directive( self.directives[name], objtype) class NumpyPythonDomain(ManglingDomainBase, PythonDomain): name = 'np' directive_mangling_map = { 'function': 'function', 'class': 'class', 'exception': 'class', 'method': 'function', 'classmethod': 'function', 'staticmethod': 'function', 'attribute': 'attribute', } class NumpyCDomain(ManglingDomainBase, CDomain): name = 'np-c' directive_mangling_map = { 'function': 'function', 'member': 'attribute', 'macro': 'function', 'type': 'class', 'var': 'object', } def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): env = self.state.document.settings.env name = None if self.arguments: m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) name = m.group(2).strip() if not name: name = self.arguments[0] lines = list(self.content) mangle_docstrings(env.app, objtype, name, None, None, lines) self.content = ViewList(lines, self.content.parent) return base_directive.run(self) return directive
earshel/PokeyPySnipe
refs/heads/master
POGOProtos/Networking/Requests/Messages/DownloadSettingsMessage_pb2.py
16
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: POGOProtos/Networking/Requests/Messages/DownloadSettingsMessage.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='POGOProtos/Networking/Requests/Messages/DownloadSettingsMessage.proto', package='POGOProtos.Networking.Requests.Messages', syntax='proto3', serialized_pb=_b('\nEPOGOProtos/Networking/Requests/Messages/DownloadSettingsMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\"\'\n\x17\x44ownloadSettingsMessage\x12\x0c\n\x04hash\x18\x01 \x01(\tb\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _DOWNLOADSETTINGSMESSAGE = _descriptor.Descriptor( name='DownloadSettingsMessage', full_name='POGOProtos.Networking.Requests.Messages.DownloadSettingsMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='hash', full_name='POGOProtos.Networking.Requests.Messages.DownloadSettingsMessage.hash', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=114, serialized_end=153, ) DESCRIPTOR.message_types_by_name['DownloadSettingsMessage'] = _DOWNLOADSETTINGSMESSAGE DownloadSettingsMessage = _reflection.GeneratedProtocolMessageType('DownloadSettingsMessage', (_message.Message,), dict( DESCRIPTOR = _DOWNLOADSETTINGSMESSAGE, __module__ = 'POGOProtos.Networking.Requests.Messages.DownloadSettingsMessage_pb2' # @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.DownloadSettingsMessage) )) _sym_db.RegisterMessage(DownloadSettingsMessage) # @@protoc_insertion_point(module_scope)
AdamIsrael/PerfKitBenchmarker
refs/heads/master
perfkitbenchmarker/linux_packages/zlib.py
6
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module containing zlib installation and cleanup functions.""" def YumInstall(vm): """Installs the zlib package on the VM.""" vm.InstallPackages('zlib zlib-devel') def AptInstall(vm): """Installs the zlib package on the VM.""" vm.InstallPackages('zlib1g zlib1g-dev')
etuna-SBF-kog/Stadsparken
refs/heads/master
env/lib/python2.7/site-packages/django/db/models/fields/files.py
88
import datetime import os from django import forms from django.db.models.fields import Field from django.core.files.base import File from django.core.files.storage import default_storage from django.core.files.images import ImageFile from django.db.models import signals from django.utils.encoding import force_unicode, smart_str from django.utils.translation import ugettext_lazy as _ class FieldFile(File): def __init__(self, instance, field, name): super(FieldFile, self).__init__(None, name) self.instance = instance self.field = field self.storage = field.storage self._committed = True def __eq__(self, other): # Older code may be expecting FileField values to be simple strings. # By overriding the == operator, it can remain backwards compatibility. if hasattr(other, 'name'): return self.name == other.name return self.name == other def __ne__(self, other): return not self.__eq__(other) def __hash__(self): # Required because we defined a custom __eq__. return hash(self.name) # The standard File contains most of the necessary properties, but # FieldFiles can be instantiated without a name, so that needs to # be checked for here. def _require_file(self): if not self: raise ValueError("The '%s' attribute has no file associated with it." % self.field.name) def _get_file(self): self._require_file() if not hasattr(self, '_file') or self._file is None: self._file = self.storage.open(self.name, 'rb') return self._file def _set_file(self, file): self._file = file def _del_file(self): del self._file file = property(_get_file, _set_file, _del_file) def _get_path(self): self._require_file() return self.storage.path(self.name) path = property(_get_path) def _get_url(self): self._require_file() return self.storage.url(self.name) url = property(_get_url) def _get_size(self): self._require_file() if not self._committed: return self.file.size return self.storage.size(self.name) size = property(_get_size) def open(self, mode='rb'): self._require_file() self.file.open(mode) # open() doesn't alter the file's contents, but it does reset the pointer open.alters_data = True # In addition to the standard File API, FieldFiles have extra methods # to further manipulate the underlying file, as well as update the # associated model instance. def save(self, name, content, save=True): name = self.field.generate_filename(self.instance, name) self.name = self.storage.save(name, content) setattr(self.instance, self.field.name, self.name) # Update the filesize cache self._size = content.size self._committed = True # Save the object because it has changed, unless save is False if save: self.instance.save() save.alters_data = True def delete(self, save=True): # Only close the file if it's already open, which we know by the # presence of self._file if hasattr(self, '_file'): self.close() del self.file self.storage.delete(self.name) self.name = None setattr(self.instance, self.field.name, self.name) # Delete the filesize cache if hasattr(self, '_size'): del self._size self._committed = False if save: self.instance.save() delete.alters_data = True def _get_closed(self): file = getattr(self, '_file', None) return file is None or file.closed closed = property(_get_closed) def close(self): file = getattr(self, '_file', None) if file is not None: file.close() def __getstate__(self): # FieldFile needs access to its associated model field and an instance # it's attached to in order to work properly, but the only necessary # data to be pickled is the file's name itself. Everything else will # be restored later, by FileDescriptor below. return {'name': self.name, 'closed': False, '_committed': True, '_file': None} class FileDescriptor(object): """ The descriptor for the file attribute on the model instance. Returns a FieldFile when accessed so you can do stuff like:: >>> instance.file.size Assigns a file object on assignment so you can do:: >>> instance.file = File(...) """ def __init__(self, field): self.field = field def __get__(self, instance=None, owner=None): if instance is None: raise AttributeError( "The '%s' attribute can only be accessed from %s instances." % (self.field.name, owner.__name__)) # This is slightly complicated, so worth an explanation. # instance.file`needs to ultimately return some instance of `File`, # probably a subclass. Additionally, this returned object needs to have # the FieldFile API so that users can easily do things like # instance.file.path and have that delegated to the file storage engine. # Easy enough if we're strict about assignment in __set__, but if you # peek below you can see that we're not. So depending on the current # value of the field we have to dynamically construct some sort of # "thing" to return. # The instance dict contains whatever was originally assigned # in __set__. file = instance.__dict__[self.field.name] # If this value is a string (instance.file = "path/to/file") or None # then we simply wrap it with the appropriate attribute class according # to the file field. [This is FieldFile for FileFields and # ImageFieldFile for ImageFields; it's also conceivable that user # subclasses might also want to subclass the attribute class]. This # object understands how to convert a path to a file, and also how to # handle None. if isinstance(file, basestring) or file is None: attr = self.field.attr_class(instance, self.field, file) instance.__dict__[self.field.name] = attr # Other types of files may be assigned as well, but they need to have # the FieldFile interface added to the. Thus, we wrap any other type of # File inside a FieldFile (well, the field's attr_class, which is # usually FieldFile). elif isinstance(file, File) and not isinstance(file, FieldFile): file_copy = self.field.attr_class(instance, self.field, file.name) file_copy.file = file file_copy._committed = False instance.__dict__[self.field.name] = file_copy # Finally, because of the (some would say boneheaded) way pickle works, # the underlying FieldFile might not actually itself have an associated # file. So we need to reset the details of the FieldFile in those cases. elif isinstance(file, FieldFile) and not hasattr(file, 'field'): file.instance = instance file.field = self.field file.storage = self.field.storage # That was fun, wasn't it? return instance.__dict__[self.field.name] def __set__(self, instance, value): instance.__dict__[self.field.name] = value class FileField(Field): # The class to wrap instance attributes in. Accessing the file object off # the instance will always return an instance of attr_class. attr_class = FieldFile # The descriptor to use for accessing the attribute off of the class. descriptor_class = FileDescriptor description = _("File") def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs): for arg in ('primary_key', 'unique'): if arg in kwargs: raise TypeError("'%s' is not a valid argument for %s." % (arg, self.__class__)) self.storage = storage or default_storage self.upload_to = upload_to if callable(upload_to): self.generate_filename = upload_to kwargs['max_length'] = kwargs.get('max_length', 100) super(FileField, self).__init__(verbose_name, name, **kwargs) def get_internal_type(self): return "FileField" def get_prep_lookup(self, lookup_type, value): if hasattr(value, 'name'): value = value.name return super(FileField, self).get_prep_lookup(lookup_type, value) def get_prep_value(self, value): "Returns field's value prepared for saving into a database." # Need to convert File objects provided via a form to unicode for database insertion if value is None: return None return unicode(value) def pre_save(self, model_instance, add): "Returns field's value just before saving." file = super(FileField, self).pre_save(model_instance, add) if file and not file._committed: # Commit the file to storage prior to saving the model file.save(file.name, file, save=False) return file def contribute_to_class(self, cls, name): super(FileField, self).contribute_to_class(cls, name) setattr(cls, self.name, self.descriptor_class(self)) def get_directory_name(self): return os.path.normpath(force_unicode(datetime.datetime.now().strftime(smart_str(self.upload_to)))) def get_filename(self, filename): return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename))) def generate_filename(self, instance, filename): return os.path.join(self.get_directory_name(), self.get_filename(filename)) def save_form_data(self, instance, data): # Important: None means "no change", other false value means "clear" # This subtle distinction (rather than a more explicit marker) is # needed because we need to consume values that are also sane for a # regular (non Model-) Form to find in its cleaned_data dictionary. if data is not None: # This value will be converted to unicode and stored in the # database, so leaving False as-is is not acceptable. if not data: data = '' setattr(instance, self.name, data) def formfield(self, **kwargs): defaults = {'form_class': forms.FileField, 'max_length': self.max_length} # If a file has been provided previously, then the form doesn't require # that a new file is provided this time. # The code to mark the form field as not required is used by # form_for_instance, but can probably be removed once form_for_instance # is gone. ModelForm uses a different method to check for an existing file. if 'initial' in kwargs: defaults['required'] = False defaults.update(kwargs) return super(FileField, self).formfield(**defaults) class ImageFileDescriptor(FileDescriptor): """ Just like the FileDescriptor, but for ImageFields. The only difference is assigning the width/height to the width_field/height_field, if appropriate. """ def __set__(self, instance, value): previous_file = instance.__dict__.get(self.field.name) super(ImageFileDescriptor, self).__set__(instance, value) # To prevent recalculating image dimensions when we are instantiating # an object from the database (bug #11084), only update dimensions if # the field had a value before this assignment. Since the default # value for FileField subclasses is an instance of field.attr_class, # previous_file will only be None when we are called from # Model.__init__(). The ImageField.update_dimension_fields method # hooked up to the post_init signal handles the Model.__init__() cases. # Assignment happening outside of Model.__init__() will trigger the # update right here. if previous_file is not None: self.field.update_dimension_fields(instance, force=True) class ImageFieldFile(ImageFile, FieldFile): def delete(self, save=True): # Clear the image dimensions cache if hasattr(self, '_dimensions_cache'): del self._dimensions_cache super(ImageFieldFile, self).delete(save) class ImageField(FileField): attr_class = ImageFieldFile descriptor_class = ImageFileDescriptor description = _("Image") def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs): self.width_field, self.height_field = width_field, height_field super(ImageField, self).__init__(verbose_name, name, **kwargs) def contribute_to_class(self, cls, name): super(ImageField, self).contribute_to_class(cls, name) # Attach update_dimension_fields so that dimension fields declared # after their corresponding image field don't stay cleared by # Model.__init__, see bug #11196. signals.post_init.connect(self.update_dimension_fields, sender=cls) def update_dimension_fields(self, instance, force=False, *args, **kwargs): """ Updates field's width and height fields, if defined. This method is hooked up to model's post_init signal to update dimensions after instantiating a model instance. However, dimensions won't be updated if the dimensions fields are already populated. This avoids unnecessary recalculation when loading an object from the database. Dimensions can be forced to update with force=True, which is how ImageFileDescriptor.__set__ calls this method. """ # Nothing to update if the field doesn't have have dimension fields. has_dimension_fields = self.width_field or self.height_field if not has_dimension_fields: return # getattr will call the ImageFileDescriptor's __get__ method, which # coerces the assigned value into an instance of self.attr_class # (ImageFieldFile in this case). file = getattr(instance, self.attname) # Nothing to update if we have no file and not being forced to update. if not file and not force: return dimension_fields_filled = not( (self.width_field and not getattr(instance, self.width_field)) or (self.height_field and not getattr(instance, self.height_field)) ) # When both dimension fields have values, we are most likely loading # data from the database or updating an image field that already had # an image stored. In the first case, we don't want to update the # dimension fields because we are already getting their values from the # database. In the second case, we do want to update the dimensions # fields and will skip this return because force will be True since we # were called from ImageFileDescriptor.__set__. if dimension_fields_filled and not force: return # file should be an instance of ImageFieldFile or should be None. if file: width = file.width height = file.height else: # No file, so clear dimensions fields. width = None height = None # Update the width and height fields. if self.width_field: setattr(instance, self.width_field, width) if self.height_field: setattr(instance, self.height_field, height) def formfield(self, **kwargs): defaults = {'form_class': forms.ImageField} defaults.update(kwargs) return super(ImageField, self).formfield(**defaults)
rec/DMXIS
refs/heads/master
Macros/Colours/Purple/Purple.py
1
#=============================================================== # DMXIS Macro (c) 2010 db audioware limited #=============================================================== RgbColour(128,0,128)
almeidapaulopt/erpnext
refs/heads/develop
erpnext/hr/doctype/leave_allocation/leave_allocation.py
33
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import flt, date_diff, formatdate from frappe import _ from frappe.model.document import Document from erpnext.hr.utils import set_employee_name from erpnext.hr.doctype.leave_application.leave_application import get_approved_leaves_for_period class OverlapError(frappe.ValidationError): pass class BackDatedAllocationError(frappe.ValidationError): pass class OverAllocationError(frappe.ValidationError): pass class LessAllocationError(frappe.ValidationError): pass class ValueMultiplierError(frappe.ValidationError): pass class LeaveAllocation(Document): def validate(self): self.validate_period() self.validate_new_leaves_allocated_value() self.validate_allocation_overlap() self.validate_back_dated_allocation() self.set_total_leaves_allocated() self.validate_total_leaves_allocated() self.validate_lwp() set_employee_name(self) def on_update_after_submit(self): self.validate_new_leaves_allocated_value() self.set_total_leaves_allocated() frappe.db.set(self,'carry_forwarded_leaves', flt(self.carry_forwarded_leaves)) frappe.db.set(self,'total_leaves_allocated',flt(self.total_leaves_allocated)) self.validate_against_leave_applications() def validate_period(self): if date_diff(self.to_date, self.from_date) <= 0: frappe.throw(_("To date cannot be before from date")) def validate_lwp(self): if frappe.db.get_value("Leave Type", self.leave_type, "is_lwp"): frappe.throw(_("Leave Type {0} cannot be allocated since it is leave without pay").format(self.leave_type)) def validate_new_leaves_allocated_value(self): """validate that leave allocation is in multiples of 0.5""" if flt(self.new_leaves_allocated) % 0.5: frappe.throw(_("Leaves must be allocated in multiples of 0.5"), ValueMultiplierError) def validate_allocation_overlap(self): leave_allocation = frappe.db.sql(""" select name from `tabLeave Allocation` where employee=%s and leave_type=%s and docstatus=1 and to_date >= %s and from_date <= %s""", (self.employee, self.leave_type, self.from_date, self.to_date)) if leave_allocation: frappe.msgprint(_("{0} already allocated for Employee {1} for period {2} to {3}") .format(self.leave_type, self.employee, formatdate(self.from_date), formatdate(self.to_date))) frappe.throw(_('Reference') + ': <a href="#Form/Leave Allocation/{0}">{0}</a>' .format(leave_allocation[0][0]), OverlapError) def validate_back_dated_allocation(self): future_allocation = frappe.db.sql("""select name, from_date from `tabLeave Allocation` where employee=%s and leave_type=%s and docstatus=1 and from_date > %s and carry_forward=1""", (self.employee, self.leave_type, self.to_date), as_dict=1) if future_allocation: frappe.throw(_("Leave cannot be allocated before {0}, as leave balance has already been carry-forwarded in the future leave allocation record {1}") .format(formatdate(future_allocation[0].from_date), future_allocation[0].name), BackDatedAllocationError) def set_total_leaves_allocated(self): self.carry_forwarded_leaves = get_carry_forwarded_leaves(self.employee, self.leave_type, self.from_date, self.carry_forward) self.total_leaves_allocated = flt(self.carry_forwarded_leaves) + flt(self.new_leaves_allocated) if not self.total_leaves_allocated: frappe.throw(_("Total leaves allocated is mandatory")) def validate_total_leaves_allocated(self): # Adding a day to include To Date in the difference date_difference = date_diff(self.to_date, self.from_date) + 1 if date_difference < self.total_leaves_allocated: frappe.throw(_("Total allocated leaves are more than days in the period"), OverAllocationError) def validate_against_leave_applications(self): leaves_taken = get_approved_leaves_for_period(self.employee, self.leave_type, self.from_date, self.to_date) if flt(leaves_taken) > flt(self.total_leaves_allocated): if frappe.db.get_value("Leave Type", self.leave_type, "allow_negative"): frappe.msgprint(_("Note: Total allocated leaves {0} shouldn't be less than already approved leaves {1} for the period").format(self.total_leaves_allocated, leaves_taken)) else: frappe.throw(_("Total allocated leaves {0} cannot be less than already approved leaves {1} for the period").format(self.total_leaves_allocated, leaves_taken), LessAllocationError) @frappe.whitelist() def get_carry_forwarded_leaves(employee, leave_type, date, carry_forward=None): carry_forwarded_leaves = 0 if carry_forward: validate_carry_forward(leave_type) previous_allocation = frappe.db.sql(""" select name, from_date, to_date, total_leaves_allocated from `tabLeave Allocation` where employee=%s and leave_type=%s and docstatus=1 and to_date < %s order by to_date desc limit 1 """, (employee, leave_type, date), as_dict=1) if previous_allocation: leaves_taken = get_approved_leaves_for_period(employee, leave_type, previous_allocation[0].from_date, previous_allocation[0].to_date) carry_forwarded_leaves = flt(previous_allocation[0].total_leaves_allocated) - flt(leaves_taken) return carry_forwarded_leaves def validate_carry_forward(leave_type): if not frappe.db.get_value("Leave Type", leave_type, "is_carry_forward"): frappe.throw(_("Leave Type {0} cannot be carry-forwarded").format(leave_type))
jeffreyliu3230/osf.io
refs/heads/develop
scripts/tests/test_migrate_was_invited.py
62
# -*- coding: utf-8 -*- from nose.tools import * # noqa from tests.base import fake from tests.base import OsfTestCase from tests.factories import UserFactory from tests.factories import NodeFactory from tests.factories import UnconfirmedUserFactory from framework.auth.core import Auth from scripts.migrate_was_invited import main from scripts.migrate_was_invited import is_invited class TestWasInvited(OsfTestCase): def test_was_invited(self): referrer = UserFactory() node = NodeFactory(creator=referrer) name = fake.name() email = fake.email() user = node.add_unregistered_contributor( fullname=name, email=email, auth=Auth(user=referrer), ) user.register(email, 'secret') assert_true(is_invited(user)) user.is_invited = None user.save() main(dry_run=False) user.reload() assert_true(user.is_invited) def test_was_not_invited(self): referrer = UserFactory() node = NodeFactory(creator=referrer) user = UserFactory() node.add_contributor(user, auth=Auth(referrer)) assert_false(is_invited(user)) user.is_invited = None user.save() main(dry_run=False) user.reload() assert_false(user.is_invited) def test_was_not_invited_unconfirmed(self): user = UnconfirmedUserFactory() assert_false(is_invited(user)) user.is_invited = None user.save() main(dry_run=False) user.reload() assert_false(user.is_invited)
cstan11/Sick-Beard
refs/heads/torrent_1080_subtitles
sickbeard/webapi.py
2
# Author: Dennis Lutter <lad1337@gmail.com> # Author: Jonathon Saine <thezoggy@gmail.com> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import os import time import urllib import datetime import threading import re import traceback import cherrypy import sickbeard import webserve from sickbeard import db, logger, exceptions, history, ui, helpers from sickbeard.exceptions import ex from sickbeard import encodingKludge as ek from sickbeard import search_queue from sickbeard.common import SNATCHED, SNATCHED_PROPER, DOWNLOADED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED, UNKNOWN from common import Quality, qualityPresetStrings, statusStrings from sickbeard import image_cache from lib.tvdb_api import tvdb_api, tvdb_exceptions try: import json except ImportError: from lib import simplejson as json import xml.etree.cElementTree as etree dateFormat = "%Y-%m-%d" dateTimeFormat = "%Y-%m-%d %H:%M" RESULT_SUCCESS = 10 # only use inside the run methods RESULT_FAILURE = 20 # only use inside the run methods RESULT_TIMEOUT = 30 # not used yet :( RESULT_ERROR = 40 # only use outside of the run methods ! RESULT_FATAL = 50 # only use in Api.default() ! this is the "we encountered an internal error" error RESULT_DENIED = 60 # only use in Api.default() ! this is the acces denied error result_type_map = {RESULT_SUCCESS: "success", RESULT_FAILURE: "failure", RESULT_TIMEOUT: "timeout", RESULT_ERROR: "error", RESULT_FATAL: "fatal", RESULT_DENIED: "denied", } # basically everything except RESULT_SUCCESS / success is bad class Api: """ api class that returns json results """ version = 4 # use an int since float-point is unpredictible intent = 4 @cherrypy.expose def default(self, *args, **kwargs): self.apiKey = sickbeard.API_KEY access, accessMsg, args, kwargs = self._grand_access(self.apiKey, args, kwargs) # set the output callback # default json outputCallbackDict = {'default': self._out_as_json, 'image': lambda x: x['image'], } # do we have acces ? if access: logger.log(accessMsg, logger.DEBUG) else: logger.log(accessMsg, logger.WARNING) return outputCallbackDict['default'](_responds(RESULT_DENIED, msg=accessMsg)) # set the original call_dispatcher as the local _call_dispatcher _call_dispatcher = call_dispatcher # if profile was set wrap "_call_dispatcher" in the profile function if 'profile' in kwargs: from lib.profilehooks import profile _call_dispatcher = profile(_call_dispatcher, immediate=True) del kwargs["profile"] # if debug was set call the "_call_dispatcher" if 'debug' in kwargs: outDict = _call_dispatcher(args, kwargs) # this way we can debug the cherry.py traceback in the browser del kwargs["debug"] else:# if debug was not set we wrap the "call_dispatcher" in a try block to assure a json output try: outDict = _call_dispatcher(args, kwargs) except cherrypy.HTTPRedirect: # seams like cherrypy uses exceptions for redirecting apparently this can happen when requesting images but it is ok so lets re raise it raise except Exception, e: # real internal error oohhh nooo :( logger.log(u"API :: " + ex(e), logger.ERROR) errorData = {"error_msg": ex(e), "args": args, "kwargs": kwargs} outDict = _responds(RESULT_FATAL, errorData, "SickBeard encountered an internal error! Please report to the Devs") if 'outputType' in outDict: outputCallback = outputCallbackDict[outDict['outputType']] else: outputCallback = outputCallbackDict['default'] return outputCallback(outDict) @cherrypy.expose def builder(self): """ expose the api-builder template """ t = webserve.PageTemplate(file="apiBuilder.tmpl") def titler(x): if not x: return x if x.lower().startswith('a '): x = x[2:] elif x.lower().startswith('the '): x = x[4:] return x t.sortedShowList = sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name))) myDB = db.DBConnection(row_type="dict") seasonSQLResults = {} episodeSQLResults = {} for curShow in t.sortedShowList: seasonSQLResults[curShow.tvdbid] = myDB.select("SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season DESC", [curShow.tvdbid]) for curShow in t.sortedShowList: episodeSQLResults[curShow.tvdbid] = myDB.select("SELECT DISTINCT season,episode FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC", [curShow.tvdbid]) t.seasonSQLResults = seasonSQLResults t.episodeSQLResults = episodeSQLResults myDB.connection.close() if len(sickbeard.API_KEY) == 32: t.apikey = sickbeard.API_KEY else: t.apikey = "api key not generated" return webserve._munge(t) def _out_as_json(self, dict): """ set cherrypy response to json """ response = cherrypy.response request = cherrypy.request response.headers['Content-Type'] = 'application/json;charset=UTF-8' try: out = json.dumps(dict, indent=self.intent, sort_keys=True) callback = request.params.get('callback') or request.params.get('jsonp') if callback != None: out = callback + '(' + out + ');' # wrap with JSONP call if requested except Exception, e: # if we fail to generate the output fake an error logger.log(u"API :: " + traceback.format_exc(), logger.DEBUG) out = '{"result":"' + result_type_map[RESULT_ERROR] + '", "message": "error while composing output: "' + ex(e) + '"}' return out def _grand_access(self, realKey, args, kwargs): """ validate api key and log result """ remoteIp = cherrypy.request.remote.ip apiKey = kwargs.get("apikey", None) if not apiKey: if args: # if we have keyless vars we assume first one is the api key, always ! apiKey = args[0] args = args[1:] # remove the apikey from the args tuple else: del kwargs["apikey"] if sickbeard.USE_API != True: msg = u"API :: " + remoteIp + " - SB API Disabled. ACCESS DENIED" return False, msg, args, kwargs elif apiKey == realKey: msg = u"API :: " + remoteIp + " - gave correct API KEY. ACCESS GRANTED" return True, msg, args, kwargs elif not apiKey: msg = u"API :: " + remoteIp + " - gave NO API KEY. ACCESS DENIED" return False, msg, args, kwargs else: msg = u"API :: " + remoteIp + " - gave WRONG API KEY " + apiKey + ". ACCESS DENIED" return False, msg, args, kwargs def call_dispatcher(args, kwargs): """ calls the appropriate CMD class looks for a cmd in args and kwargs or calls the TVDBShorthandWrapper when the first args element is a number or returns an error that there is no such cmd """ logger.log(u"API :: all args: '" + str(args) + "'", logger.DEBUG) logger.log(u"API :: all kwargs: '" + str(kwargs) + "'", logger.DEBUG) #logger.log(u"API :: dateFormat: '" + str(dateFormat) + "'", logger.DEBUG) cmds = None if args: cmds = args[0] args = args[1:] if "cmd" in kwargs: cmds = kwargs["cmd"] del kwargs["cmd"] outDict = {} if cmds != None: cmds = cmds.split("|") multiCmds = bool(len(cmds) > 1) for cmd in cmds: curArgs, curKwargs = filter_params(cmd, args, kwargs) cmdIndex = None if len(cmd.split("_")) > 1: # was a index used for this cmd ? cmd, cmdIndex = cmd.split("_") # this gives us the clear cmd and the index logger.log(u"API :: " + cmd + ": curKwargs " + str(curKwargs), logger.DEBUG) if not (multiCmds and cmd in ('show.getposter', 'show.getbanner')): # skip these cmd while chaining try: if cmd in _functionMaper: curOutDict = _functionMaper.get(cmd)(curArgs, curKwargs).run() # get the cmd class, init it and run() elif _is_int(cmd): curOutDict = TVDBShorthandWrapper(curArgs, curKwargs, cmd).run() else: curOutDict = _responds(RESULT_ERROR, "No such cmd: '" + cmd + "'") except ApiError, e: # Api errors that we raised, they are harmless curOutDict = _responds(RESULT_ERROR, msg=ex(e)) else: # if someone chained one of the forbiden cmds they will get an error for this one cmd curOutDict = _responds(RESULT_ERROR, msg="The cmd '" + cmd + "' is not supported while chaining") if multiCmds: # note: if multiple same cmds are issued but one has not an index defined it will override all others # or the other way around, this depends on the order of the cmds # this is not a bug if cmdIndex is None: # do we need a index dict for this cmd ? outDict[cmd] = curOutDict else: if not cmd in outDict: outDict[cmd] = {} outDict[cmd][cmdIndex] = curOutDict else: outDict = curOutDict if multiCmds: # if we had multiple cmds we have to wrap it in a response dict outDict = _responds(RESULT_SUCCESS, outDict) else: # index / no cmd given outDict = CMD_SickBeard(args, kwargs).run() return outDict def filter_params(cmd, args, kwargs): """ return only params kwargs that are for cmd and rename them to a clean version (remove "<cmd>_") args are shared across all cmds all args and kwarks are lowerd cmd are separated by "|" e.g. &cmd=shows|future kwargs are namespaced with "." e.g. show.tvdbid=101501 if a karg has no namespace asing it anyways (global) full e.g. /api?apikey=1234&cmd=show.seasonlist_asd|show.seasonlist_2&show.seasonlist_asd.tvdbid=101501&show.seasonlist_2.tvdbid=79488&sort=asc two calls of show.seasonlist one has the index "asd" the other one "2" the "tvdbid" kwargs / params have the indexed cmd as a namspace and the kwarg / param "sort" is a used as a global """ curArgs = [] for arg in args: curArgs.append(arg.lower()) curArgs = tuple(curArgs) curKwargs = {} for kwarg in kwargs: if kwarg.find(cmd + ".") == 0: cleanKey = kwarg.rpartition(".")[2] curKwargs[cleanKey] = kwargs[kwarg].lower() elif not "." in kwarg: # the kwarg was not namespaced therefore a "global" curKwargs[kwarg] = kwargs[kwarg] return curArgs, curKwargs class ApiCall(object): _help = {"desc": "No help message available. Please tell the devs that a help msg is missing for this cmd"} def __init__(self, args, kwargs): # missing try: if self._missing: self.run = self.return_missing except AttributeError: pass # help if 'help' in kwargs: self.run = self.return_help def run(self): # override with real output function in subclass return {} def return_help(self): try: if self._requiredParams: pass except AttributeError: self._requiredParams = [] try: if self._optionalParams: pass except AttributeError: self._optionalParams = [] for paramDict, type in [(self._requiredParams, "requiredParameters"), (self._optionalParams, "optionalParameters")]: if type in self._help: for paramName in paramDict: if not paramName in self._help[type]: self._help[type][paramName] = {} if paramDict[paramName]["allowedValues"]: self._help[type][paramName]["allowedValues"] = paramDict[paramName]["allowedValues"] else: self._help[type][paramName]["allowedValues"] = "see desc" self._help[type][paramName]["defaultValue"] = paramDict[paramName]["defaultValue"] elif paramDict: for paramName in paramDict: self._help[type] = {} self._help[type][paramName] = paramDict[paramName] else: self._help[type] = {} msg = "No description available" if "desc" in self._help: msg = self._help["desc"] del self._help["desc"] return _responds(RESULT_SUCCESS, self._help, msg) def return_missing(self): if len(self._missing) == 1: msg = "The required parameter: '" + self._missing[0] + "' was not set" else: msg = "The required parameters: '" + "','".join(self._missing) + "' where not set" return _responds(RESULT_ERROR, msg=msg) def check_params(self, args, kwargs, key, default, required, type, allowedValues): # TODO: explain this """ function to check passed params for the shorthand wrapper and to detect missing/required param """ missing = True orgDefault = default if type == "bool": allowedValues = [0, 1] if args: default = args[0] missing = False args = args[1:] if kwargs.get(key): default = kwargs.get(key) missing = False if required: try: self._missing self._requiredParams.append(key) except AttributeError: self._missing = [] self._requiredParams = {} self._requiredParams[key] = {"allowedValues": allowedValues, "defaultValue": orgDefault} if missing and key not in self._missing: self._missing.append(key) else: try: self._optionalParams[key] = {"allowedValues": allowedValues, "defaultValue": orgDefault} except AttributeError: self._optionalParams = {} self._optionalParams[key] = {"allowedValues": allowedValues, "defaultValue": orgDefault} if default: default = self._check_param_type(default, key, type) if type == "bool": type = [] self._check_param_value(default, key, allowedValues) return default, args def _check_param_type(self, value, name, type): """ checks if value can be converted / parsed to type will raise an error on failure or will convert it to type and return new converted value can check for: - int: will be converted into int - bool: will be converted to False / True - list: will always return a list - string: will do nothing for now - ignore: will ignore it, just like "string" """ error = False if type == "int": if _is_int(value): value = int(value) else: error = True elif type == "bool": if value in ("0", "1"): value = bool(int(value)) elif value in ("true", "True", "TRUE"): value = True elif value in ("false", "False", "FALSE"): value = False else: error = True elif type == "list": value = value.split("|") elif type == "string": pass elif type == "ignore": pass else: logger.log(u"API :: Invalid param type set " + str(type) + " can not check or convert ignoring it", logger.ERROR) if error: # this is a real ApiError !! raise ApiError(u"param: '" + str(name) + "' with given value: '" + str(value) + "' could not be parsed into '" + str(type) + "'") return value def _check_param_value(self, value, name, allowedValues): """ will check if value (or all values in it ) are in allowed values will raise an exception if value is "out of range" if bool(allowedValue) == False a check is not performed and all values are excepted """ if allowedValues: error = False if isinstance(value, list): for item in value: if not item in allowedValues: error = True else: if not value in allowedValues: error = True if error: # this is kinda a ApiError but raising an error is the only way of quitting here raise ApiError(u"param: '" + str(name) + "' with given value: '" + str(value) + "' is out of allowed range '" + str(allowedValues) + "'") class TVDBShorthandWrapper(ApiCall): _help = {"desc": "this is an internal function wrapper. call the help command directly for more information"} def __init__(self, args, kwargs, sid): self.origArgs = args self.kwargs = kwargs self.sid = sid self.s, args = self.check_params(args, kwargs, "s", None, False, "ignore", []) self.e, args = self.check_params(args, kwargs, "e", None, False, "ignore", []) self.args = args ApiCall.__init__(self, args, kwargs) def run(self): """ internal function wrapper """ args = (self.sid,) + self.origArgs if self.e: return CMD_Episode(args, self.kwargs).run() elif self.s: return CMD_ShowSeasons(args, self.kwargs).run() else: return CMD_Show(args, self.kwargs).run() ################################ # helper functions # ################################ def _sizeof_fmt(num): for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.00: return "%3.2f %s" % (num, x) num /= 1024.00 def _is_int(data): try: int(data) except (TypeError, ValueError, OverflowError): return False else: return True def _rename_element(dict, oldKey, newKey): try: dict[newKey] = dict[oldKey] del dict[oldKey] except (ValueError, TypeError, NameError): pass return dict def _responds(result_type, data=None, msg=""): """ result is a string of given "type" (success/failure/timeout/error) message is a human readable string, can be empty data is either a dict or a array, can be a empty dict or empty array """ if data is None: data = {} return {"result": result_type_map[result_type], "message": msg, "data": data} def _get_quality_string(q): qualityString = "Custom" if q in qualityPresetStrings: qualityString = qualityPresetStrings[q] elif q in Quality.qualityStrings: qualityString = Quality.qualityStrings[q] return qualityString def _get_status_Strings(s): return statusStrings[s] def _ordinal_to_dateTimeForm(ordinal): # workaround for episodes with no airdate if int(ordinal) != 1: date = datetime.date.fromordinal(ordinal) else: return "" return date.strftime(dateTimeFormat) def _ordinal_to_dateForm(ordinal): if int(ordinal) != 1: date = datetime.date.fromordinal(ordinal) else: return "" return date.strftime(dateFormat) def _historyDate_to_dateTimeForm(timeString): date = datetime.datetime.strptime(timeString, history.dateFormat) return date.strftime(dateTimeFormat) def _replace_statusStrings_with_statusCodes(statusStrings): statusCodes = [] if "snatched" in statusStrings: statusCodes += Quality.SNATCHED if "downloaded" in statusStrings: statusCodes += Quality.DOWNLOADED if "skipped" in statusStrings: statusCodes.append(SKIPPED) if "wanted" in statusStrings: statusCodes.append(WANTED) if "archived" in statusStrings: statusCodes.append(ARCHIVED) if "ignored" in statusStrings: statusCodes.append(IGNORED) if "unaired" in statusStrings: statusCodes.append(UNAIRED) return statusCodes def _mapQuality(showObj): quality_map = _getQualityMap() anyQualities = [] bestQualities = [] iqualityID, aqualityID = Quality.splitQuality(int(showObj)) if iqualityID: for quality in iqualityID: anyQualities.append(quality_map[quality]) if aqualityID: for quality in aqualityID: bestQualities.append(quality_map[quality]) return anyQualities, bestQualities def _getQualityMap(): return {Quality.SDTV: 'sdtv', Quality.SDDVD: 'sddvd', Quality.HDTV: 'hdtv', Quality.RAWHDTV: 'rawhdtv', Quality.FULLHDTV: 'fullhdtv', Quality.HDWEBDL: 'hdwebdl', Quality.FULLHDWEBDL: 'fullhdwebdl', Quality.HDBLURAY: 'hdbluray', Quality.FULLHDBLURAY: 'fullhdbluray', Quality.UNKNOWN: 'unknown'} def _getRootDirs(): if sickbeard.ROOT_DIRS == "": return {} rootDir = {} root_dirs = sickbeard.ROOT_DIRS.split('|') default_index = int(sickbeard.ROOT_DIRS.split('|')[0]) rootDir["default_index"] = int(sickbeard.ROOT_DIRS.split('|')[0]) # remove default_index value from list (this fixes the offset) root_dirs.pop(0) if len(root_dirs) < default_index: return {} # clean up the list - replace %xx escapes by their single-character equivalent root_dirs = [urllib.unquote_plus(x) for x in root_dirs] default_dir = root_dirs[default_index] dir_list = [] for root_dir in root_dirs: valid = 1 try: ek.ek(os.listdir, root_dir) except: valid = 0 default = 0 if root_dir is default_dir: default = 1 curDir = {} curDir['valid'] = valid curDir['location'] = root_dir curDir['default'] = default dir_list.append(curDir) return dir_list class ApiError(Exception): "Generic API error" class IntParseError(Exception): "A value could not be parsed into a int. But should be parsable to a int " #-------------------------------------------------------------------------------------# class CMD_Help(ApiCall): _help = {"desc": "display help information for a given subject/command", "optionalParameters": {"subject": {"desc": "command - the top level command"}, } } def __init__(self, args, kwargs): # required # optional self.subject, args = self.check_params(args, kwargs, "subject", "help", False, "string", _functionMaper.keys()) ApiCall.__init__(self, args, kwargs) def run(self): """ display help information for a given subject/command """ if self.subject in _functionMaper: out = _responds(RESULT_SUCCESS, _functionMaper.get(self.subject)((), {"help": 1}).run()) else: out = _responds(RESULT_FAILURE, msg="No such cmd") return out class CMD_ComingEpisodes(ApiCall): _help = {"desc": "display the coming episodes", "optionalParameters": {"sort": {"desc": "change the sort order"}, "type": {"desc": "one or more of allowedValues separated by |"}, "paused": {"desc": "0 to exclude paused shows, 1 to include them, or omitted to use the SB default"}, } } def __init__(self, args, kwargs): # required # optional self.sort, args = self.check_params(args, kwargs, "sort", "date", False, "string", ["date", "show", "network"]) self.type, args = self.check_params(args, kwargs, "type", "today|missed|soon|later", False, "list", ["missed", "later", "today", "soon"]) self.paused, args = self.check_params(args, kwargs, "paused", sickbeard.COMING_EPS_DISPLAY_PAUSED, False, "int", [0, 1]) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display the coming episodes """ today = datetime.date.today().toordinal() next_week = (datetime.date.today() + datetime.timedelta(days=7)).toordinal() recently = (datetime.date.today() - datetime.timedelta(days=3)).toordinal() done_show_list = [] qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED] myDB = db.DBConnection(row_type="dict") sql_results = myDB.select("SELECT airdate, airs, episode, name AS 'ep_name', description AS 'ep_plot', network, season, showid AS 'tvdbid', show_name, tv_shows.quality AS quality, tv_shows.status AS 'show_status', tv_shows.paused AS 'paused' FROM tv_episodes, tv_shows WHERE season != 0 AND airdate >= ? AND airdate < ? AND tv_shows.tvdb_id = tv_episodes.showid AND tv_episodes.status NOT IN (" + ','.join(['?'] * len(qualList)) + ")", [today, next_week] + qualList) for cur_result in sql_results: done_show_list.append(int(cur_result["tvdbid"])) more_sql_results = myDB.select("SELECT airdate, airs, episode, name AS 'ep_name', description AS 'ep_plot', network, season, showid AS 'tvdbid', show_name, tv_shows.quality AS quality, tv_shows.status AS 'show_status', tv_shows.paused AS 'paused' FROM tv_episodes outer_eps, tv_shows WHERE season != 0 AND showid NOT IN (" + ','.join(['?'] * len(done_show_list)) + ") AND tv_shows.tvdb_id = outer_eps.showid AND airdate = (SELECT airdate FROM tv_episodes inner_eps WHERE inner_eps.season != 0 AND inner_eps.showid = outer_eps.showid AND inner_eps.airdate >= ? ORDER BY inner_eps.airdate ASC LIMIT 1) AND outer_eps.status NOT IN (" + ','.join(['?'] * len(Quality.DOWNLOADED + Quality.SNATCHED)) + ")", done_show_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED) sql_results += more_sql_results more_sql_results = myDB.select("SELECT airdate, airs, episode, name AS 'ep_name', description AS 'ep_plot', network, season, showid AS 'tvdbid', show_name, tv_shows.quality AS quality, tv_shows.status AS 'show_status', tv_shows.paused AS 'paused' FROM tv_episodes, tv_shows WHERE season != 0 AND tv_shows.tvdb_id = tv_episodes.showid AND airdate < ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN (" + ','.join(['?'] * len(qualList)) + ")", [today, recently, WANTED] + qualList) sql_results += more_sql_results # sort by air date sorts = { 'date': (lambda x, y: cmp(int(x["airdate"]), int(y["airdate"]))), 'show': (lambda a, b: cmp(a["show_name"], b["show_name"])), 'network': (lambda a, b: cmp(a["network"], b["network"])), } sql_results.sort(sorts[self.sort]) finalEpResults = {} # add all requested types or all for curType in self.type: finalEpResults[curType] = [] for ep in sql_results: """ Missed: yesterday... (less than 1week) Today: today Soon: tomorrow till next week Later: later than next week """ if ep["paused"] and not self.paused: continue status = "soon" if ep["airdate"] < today: status = "missed" elif ep["airdate"] >= next_week: status = "later" elif ep["airdate"] >= today and ep["airdate"] < next_week: if ep["airdate"] == today: status = "today" else: status = "soon" # skip unwanted if self.type != None and not status in self.type: continue ordinalAirdate = int(ep["airdate"]) if not ep["network"]: ep["network"] = "" ep["airdate"] = _ordinal_to_dateForm(ordinalAirdate) ep["quality"] = _get_quality_string(ep["quality"]) # clean up tvdb horrible airs field ep["airs"] = str(ep["airs"]).replace('am', ' AM').replace('pm', ' PM').replace(' ', ' ') # start day of the week on 1 (monday) ep["weekday"] = 1 + datetime.date.fromordinal(ordinalAirdate).weekday() # TODO: check if this obsolete if not status in finalEpResults: finalEpResults[status] = [] finalEpResults[status].append(ep) myDB.connection.close() return _responds(RESULT_SUCCESS, finalEpResults) class CMD_Episode(ApiCall): _help = {"desc": "display detailed info about an episode", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, "season": {"desc": "the season number"}, "episode": {"desc": "the episode number"} }, "optionalParameters": {"full_path": {"desc": "show the full absolute path (if valid) instead of a relative path for the episode location"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) self.s, args = self.check_params(args, kwargs, "season", None, True, "int", []) self.e, args = self.check_params(args, kwargs, "episode", None, True, "int", []) # optional self.fullPath, args = self.check_params(args, kwargs, "full_path", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display detailed info about an episode """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") myDB = db.DBConnection(row_type="dict") sqlResults = myDB.select("SELECT name, description, airdate, status, location, file_size, release_name FROM tv_episodes WHERE showid = ? AND episode = ? AND season = ?", [self.tvdbid, self.e, self.s]) if not len(sqlResults) == 1: raise ApiError("Episode not found") episode = sqlResults[0] # handle path options # absolute vs relative vs broken showPath = None try: showPath = showObj.location except sickbeard.exceptions.ShowDirNotFoundException: pass if bool(self.fullPath) == True and showPath: pass elif bool(self.fullPath) == False and showPath: # using the length because lstrip removes to much showPathLength = len(showPath) + 1 # the / or \ yeah not that nice i know episode["location"] = episode["location"][showPathLength:] elif not showPath: # show dir is broken ... episode path will be empty episode["location"] = "" # convert stuff to human form episode["airdate"] = _ordinal_to_dateForm(episode["airdate"]) status, quality = Quality.splitCompositeStatus(int(episode["status"])) episode["status"] = _get_status_Strings(status) episode["quality"] = _get_quality_string(quality) episode["file_size_human"] = _sizeof_fmt(episode["file_size"]) myDB.connection.close() return _responds(RESULT_SUCCESS, episode) class CMD_EpisodeSearch(ApiCall): _help = {"desc": "search for an episode. the response might take some time", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, "season": {"desc": "the season number"}, "episode": {"desc": "the episode number"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) self.s, args = self.check_params(args, kwargs, "season", None, True, "int", []) self.e, args = self.check_params(args, kwargs, "episode", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ search for an episode """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") # retrieve the episode object and fail if we can't get one epObj = showObj.getEpisode(int(self.s), int(self.e)) if isinstance(epObj, str): return _responds(RESULT_FAILURE, msg="Episode not found") # make a queue item for it and put it on the queue ep_queue_item = search_queue.ManualSearchQueueItem(epObj) sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) #@UndefinedVariable # wait until the queue item tells us whether it worked or not while ep_queue_item.success == None: #@UndefinedVariable time.sleep(1) # return the correct json value if ep_queue_item.success: status, quality = Quality.splitCompositeStatus(epObj.status) #@UnusedVariable # TODO: split quality and status? return _responds(RESULT_SUCCESS, {"quality": _get_quality_string(quality)}, "Snatched (" + _get_quality_string(quality) + ")") return _responds(RESULT_FAILURE, msg='Unable to find episode') class CMD_EpisodeSetStatus(ApiCall): _help = {"desc": "set status of an episode or season (when no ep is provided)", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, "season": {"desc": "the season number"}, "status": {"desc": "the status values: wanted, skipped, archived, ignored"} }, "optionalParameters": {"episode": {"desc": "the episode number"}, "force": {"desc": "should we replace existing (downloaded) episodes or not"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) self.s, args = self.check_params(args, kwargs, "season", None, True, "int", []) self.status, args = self.check_params(args, kwargs, "status", None, True, "string", ["wanted", "skipped", "archived", "ignored"]) # optional self.e, args = self.check_params(args, kwargs, "episode", None, False, "int", []) self.force, args = self.check_params(args, kwargs, "force", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ set status of an episode or a season (when no ep is provided) """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") # convert the string status to a int for status in statusStrings.statusStrings: if str(statusStrings[status]).lower() == str(self.status).lower(): self.status = status break else: # if we dont break out of the for loop we got here. # the allowed values has at least one item that could not be matched against the internal status strings raise ApiError("The status string could not be matched to a status. Report to Devs!") ep_list = [] if self.e: epObj = showObj.getEpisode(self.s, self.e) if epObj == None: return _responds(RESULT_FAILURE, msg="Episode not found") ep_list = [epObj] else: # get all episode numbers frome self,season ep_list = showObj.getAllEpisodes(season=self.s) def _epResult(result_code, ep, msg=""): return {'season': ep.season, 'episode': ep.episode, 'status': _get_status_Strings(ep.status), 'result': result_type_map[result_code], 'message': msg} ep_results = [] failure = False start_backlog = False ep_segment = None for epObj in ep_list: if ep_segment == None and self.status == WANTED: # figure out what segment the episode is in and remember it so we can backlog it if showObj.air_by_date: ep_segment = str(epObj.airdate)[:7] else: ep_segment = epObj.season with epObj.lock: # don't let them mess up UNAIRED episodes if epObj.status == UNAIRED: if self.e != None: # setting the status of a unaired is only considert a failure if we directly wanted this episode, but is ignored on a season request ep_results.append(_epResult(RESULT_FAILURE, epObj, "Refusing to change status because it is UNAIRED")) failure = True continue # allow the user to force setting the status for an already downloaded episode if epObj.status in Quality.DOWNLOADED and not self.force: ep_results.append(_epResult(RESULT_FAILURE, epObj, "Refusing to change status because it is already marked as DOWNLOADED")) failure = True continue epObj.status = self.status epObj.saveToDB() if self.status == WANTED: start_backlog = True ep_results.append(_epResult(RESULT_SUCCESS, epObj)) extra_msg = "" if start_backlog: cur_backlog_queue_item = search_queue.BacklogQueueItem(showObj, ep_segment) sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item) #@UndefinedVariable logger.log(u"API :: Starting backlog for " + showObj.name + " season " + str(ep_segment) + " because some episodes were set to WANTED") extra_msg = " Backlog started" if failure: return _responds(RESULT_FAILURE, ep_results, 'Failed to set all or some status. Check data.' + extra_msg) else: return _responds(RESULT_SUCCESS, msg='All status set successfully.' + extra_msg) class CMD_Exceptions(ApiCall): _help = {"desc": "display scene exceptions for all or a given show", "optionalParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, } } def __init__(self, args, kwargs): # required # optional self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, False, "int", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display scene exceptions for all or a given show """ myDB = db.DBConnection("cache.db", row_type="dict") if self.tvdbid == None: sqlResults = myDB.select("SELECT show_name, tvdb_id AS 'tvdbid' FROM scene_exceptions") scene_exceptions = {} for row in sqlResults: tvdbid = row["tvdbid"] if not tvdbid in scene_exceptions: scene_exceptions[tvdbid] = [] scene_exceptions[tvdbid].append(row["show_name"]) else: showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") sqlResults = myDB.select("SELECT show_name, tvdb_id AS 'tvdbid' FROM scene_exceptions WHERE tvdb_id = ?", [self.tvdbid]) scene_exceptions = [] for row in sqlResults: scene_exceptions.append(row["show_name"]) myDB.connection.close() return _responds(RESULT_SUCCESS, scene_exceptions) class CMD_History(ApiCall): _help = {"desc": "display sickbeard downloaded/snatched history", "optionalParameters": {"limit": {"desc": "limit returned results"}, "type": {"desc": "only show a specific type of results"}, } } def __init__(self, args, kwargs): # required # optional self.limit, args = self.check_params(args, kwargs, "limit", 100, False, "int", []) self.type, args = self.check_params(args, kwargs, "type", None, False, "string", ["downloaded", "snatched"]) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display sickbeard downloaded/snatched history """ typeCodes = [] if self.type == "downloaded": self.type = "Downloaded" typeCodes = Quality.DOWNLOADED elif self.type == "snatched": self.type = "Snatched" typeCodes = Quality.SNATCHED else: typeCodes = Quality.SNATCHED + Quality.DOWNLOADED myDB = db.DBConnection(row_type="dict") ulimit = min(int(self.limit), 100) if ulimit == 0: sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id AND action in (" + ','.join(['?'] * len(typeCodes)) + ") ORDER BY date DESC", typeCodes) else: sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id AND action in (" + ','.join(['?'] * len(typeCodes)) + ") ORDER BY date DESC LIMIT ?", typeCodes + [ulimit]) results = [] for row in sqlResults: status, quality = Quality.splitCompositeStatus(int(row["action"])) status = _get_status_Strings(status) if self.type and not status == self.type: continue row["status"] = status row["quality"] = _get_quality_string(quality) row["date"] = _historyDate_to_dateTimeForm(str(row["date"])) del row["action"] _rename_element(row, "showid", "tvdbid") row["resource_path"] = os.path.dirname(row["resource"]) row["resource"] = os.path.basename(row["resource"]) results.append(row) myDB.connection.close() return _responds(RESULT_SUCCESS, results) class CMD_HistoryClear(ApiCall): _help = {"desc": "clear sickbeard's history", } def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ clear sickbeard's history """ myDB = db.DBConnection() myDB.action("DELETE FROM history WHERE 1=1") myDB.connection.close() return _responds(RESULT_SUCCESS, msg="History cleared") class CMD_HistoryTrim(ApiCall): _help = {"desc": "trim sickbeard's history by removing entries greater than 30 days old" } def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ trim sickbeard's history """ myDB = db.DBConnection() myDB.action("DELETE FROM history WHERE date < " + str((datetime.datetime.today() - datetime.timedelta(days=30)).strftime(history.dateFormat))) myDB.connection.close() return _responds(RESULT_SUCCESS, msg="Removed history entries greater than 30 days old") class CMD_Logs(ApiCall): _help = {"desc": "view sickbeard's log", "optionalParameters": {"min_level ": {"desc": "the minimum level classification of log entries to show, with each level inherting its above level"} } } def __init__(self, args, kwargs): # required # optional self.min_level, args = self.check_params(args, kwargs, "min_level", "error", False, "string", ["error", "warning", "info", "debug"]) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ view sickbeard's log """ # 10 = Debug / 20 = Info / 30 = Warning / 40 = Error minLevel = logger.reverseNames[str(self.min_level).upper()] data = [] if os.path.isfile(logger.sb_log_instance.log_file_path): with ek.ek(open, logger.sb_log_instance.log_file_path) as f: data = f.readlines() regex = "^(\d\d\d\d)\-(\d\d)\-(\d\d)\s*(\d\d)\:(\d\d):(\d\d)\s*([A-Z]+)\s*(.+?)\s*\:\:\s*(.*)$" finalData = [] numLines = 0 lastLine = False numToShow = min(50, len(data)) for x in reversed(data): x = x.decode('utf-8') match = re.match(regex, x) if match: level = match.group(7) if level not in logger.reverseNames: lastLine = False continue if logger.reverseNames[level] >= minLevel: lastLine = True finalData.append(x.rstrip("\n")) else: lastLine = False continue elif lastLine: finalData.append("AA" + x) numLines += 1 if numLines >= numToShow: break return _responds(RESULT_SUCCESS, finalData) class CMD_SickBeard(ApiCall): _help = {"desc": "display misc sickbeard related information"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display misc sickbeard related information """ data = {"sb_version": sickbeard.version.SICKBEARD_VERSION, "api_version": Api.version, "api_commands": sorted(_functionMaper.keys())} return _responds(RESULT_SUCCESS, data) class CMD_SickBeardAddRootDir(ApiCall): _help = {"desc": "add a sickbeard user's parent directory", "requiredParameters": {"location": {"desc": "the full path to root (parent) directory"} }, "optionalParameters": {"default": {"desc": "make the location passed the default root (parent) directory"} } } def __init__(self, args, kwargs): # required self.location, args = self.check_params(args, kwargs, "location", None, True, "string", []) # optional self.default, args = self.check_params(args, kwargs, "default", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ add a parent directory to sickbeard's config """ self.location = urllib.unquote_plus(self.location) location_matched = 0 # dissallow adding/setting an invalid dir if not ek.ek(os.path.isdir, self.location): return _responds(RESULT_FAILURE, msg="Location is invalid") root_dirs = [] if sickbeard.ROOT_DIRS == "": self.default = 1 else: root_dirs = sickbeard.ROOT_DIRS.split('|') index = int(sickbeard.ROOT_DIRS.split('|')[0]) root_dirs.pop(0) # clean up the list - replace %xx escapes by their single-character equivalent root_dirs = [urllib.unquote_plus(x) for x in root_dirs] for x in root_dirs: if(x == self.location): location_matched = 1 if (self.default == 1): index = root_dirs.index(self.location) break if(location_matched == 0): if (self.default == 1): index = 0 root_dirs.insert(0, self.location) else: root_dirs.append(self.location) root_dirs_new = [urllib.unquote_plus(x) for x in root_dirs] root_dirs_new.insert(0, index) root_dirs_new = '|'.join(unicode(x) for x in root_dirs_new) sickbeard.ROOT_DIRS = root_dirs_new return _responds(RESULT_SUCCESS, _getRootDirs(), msg="Root directories updated") class CMD_SickBeardCheckScheduler(ApiCall): _help = {"desc": "query the scheduler"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ query the scheduler """ myDB = db.DBConnection() sqlResults = myDB.select("SELECT last_backlog FROM info") backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() #@UndefinedVariable backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() #@UndefinedVariable searchStatus = sickbeard.currentSearchScheduler.action.amActive #@UndefinedVariable nextSearch = str(sickbeard.currentSearchScheduler.timeLeft()).split('.')[0] nextBacklog = sickbeard.backlogSearchScheduler.nextRun().strftime(dateFormat).decode(sickbeard.SYS_ENCODING) myDB.connection.close() data = {"backlog_is_paused": int(backlogPaused), "backlog_is_running": int(backlogRunning), "last_backlog": _ordinal_to_dateForm(sqlResults[0]["last_backlog"]), "search_is_running": int(searchStatus), "next_search": nextSearch, "next_backlog": nextBacklog} return _responds(RESULT_SUCCESS, data) class CMD_SickBeardDeleteRootDir(ApiCall): _help = {"desc": "delete a sickbeard user's parent directory", "requiredParameters": {"location": {"desc": "the full path to root (parent) directory"} } } def __init__(self, args, kwargs): # required self.location, args = self.check_params(args, kwargs, "location", None, True, "string", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ delete a parent directory from sickbeard's config """ if sickbeard.ROOT_DIRS == "": return _responds(RESULT_FAILURE, _getRootDirs(), msg="No root directories detected") root_dirs_new = [] root_dirs = sickbeard.ROOT_DIRS.split('|') index = int(root_dirs[0]) root_dirs.pop(0) # clean up the list - replace %xx escapes by their single-character equivalent root_dirs = [urllib.unquote_plus(x) for x in root_dirs] old_root_dir = root_dirs[index] for curRootDir in root_dirs: if not curRootDir == self.location: root_dirs_new.append(curRootDir) else: newIndex = 0 for curIndex, curNewRootDir in enumerate(root_dirs_new): if curNewRootDir is old_root_dir: newIndex = curIndex break root_dirs_new = [urllib.unquote_plus(x) for x in root_dirs_new] if len(root_dirs_new) > 0: root_dirs_new.insert(0, newIndex) root_dirs_new = "|".join(unicode(x) for x in root_dirs_new) sickbeard.ROOT_DIRS = root_dirs_new # what if the root dir was not found? return _responds(RESULT_SUCCESS, _getRootDirs(), msg="Root directory deleted") class CMD_SickBeardForceSearch(ApiCall): _help = {"desc": "force the episode search early" } def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ force the episode search early """ # Changing all old missing episodes to status WANTED # Beginning search for new episodes on RSS # Searching all providers for any needed episodes result = sickbeard.currentSearchScheduler.forceRun() if result: return _responds(RESULT_SUCCESS, msg="Episode search forced") return _responds(RESULT_FAILURE, msg="Can not search for episode") class CMD_SickBeardGetDefaults(ApiCall): _help = {"desc": "get sickbeard user defaults"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ get sickbeard user defaults """ anyQualities, bestQualities = _mapQuality(sickbeard.QUALITY_DEFAULT) data = {"status": statusStrings[sickbeard.STATUS_DEFAULT].lower(), "flatten_folders": int(sickbeard.FLATTEN_FOLDERS_DEFAULT), "initial": anyQualities, "archive": bestQualities, "future_show_paused": int(sickbeard.COMING_EPS_DISPLAY_PAUSED) } return _responds(RESULT_SUCCESS, data) class CMD_SickBeardGetMessages(ApiCall): _help = {"desc": "get all messages"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): messages = [] for cur_notification in ui.notifications.get_notifications(): messages.append({"title": cur_notification.title, "message": cur_notification.message, "type": cur_notification.type}) return _responds(RESULT_SUCCESS, messages) class CMD_SickBeardGetRootDirs(ApiCall): _help = {"desc": "get sickbeard user parent directories"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ get the parent directories defined in sickbeard's config """ return _responds(RESULT_SUCCESS, _getRootDirs()) class CMD_SickBeardPauseBacklog(ApiCall): _help = {"desc": "pause the backlog search", "optionalParameters": {"pause ": {"desc": "pause or unpause the global backlog"} } } def __init__(self, args, kwargs): # required # optional self.pause, args = self.check_params(args, kwargs, "pause", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ pause the backlog search """ if self.pause == True: sickbeard.searchQueueScheduler.action.pause_backlog() #@UndefinedVariable return _responds(RESULT_SUCCESS, msg="Backlog paused") else: sickbeard.searchQueueScheduler.action.unpause_backlog() #@UndefinedVariable return _responds(RESULT_SUCCESS, msg="Backlog unpaused") class CMD_SickBeardPing(ApiCall): _help = {"desc": "check to see if sickbeard is running"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ check to see if sickbeard is running """ cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store" if sickbeard.started: return _responds(RESULT_SUCCESS, {"pid": sickbeard.PID}, "Pong") else: return _responds(RESULT_SUCCESS, msg="Pong") class CMD_SickBeardRestart(ApiCall): _help = {"desc": "restart sickbeard"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ restart sickbeard """ threading.Timer(2, sickbeard.invoke_restart, [False]).start() return _responds(RESULT_SUCCESS, msg="SickBeard is restarting...") class CMD_SickBeardSearchTVDB(ApiCall): _help = {"desc": "search for show at tvdb with a given string and language", "optionalParameters": {"name": {"desc": "name of the show you want to search for"}, "tvdbid": {"desc": "thetvdb.com unique id of a show"}, "lang": {"desc": "the 2 letter abbreviation lang id"} } } valid_languages = { 'el': 20, 'en': 7, 'zh': 27, 'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9, 'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11, 'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30} def __init__(self, args, kwargs): # required # optional self.name, args = self.check_params(args, kwargs, "name", None, False, "string", []) self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, False, "int", []) self.lang, args = self.check_params(args, kwargs, "lang", "en", False, "string", self.valid_languages.keys()) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ search for show at tvdb with a given string and language """ if self.name and not self.tvdbid: # only name was given baseURL = "http://thetvdb.com/api/GetSeries.php?" params = {"seriesname": str(self.name).encode('utf-8'), 'language': self.lang} finalURL = baseURL + urllib.urlencode(params) urlData = sickbeard.helpers.getURL(finalURL) if urlData is None: return _responds(RESULT_FAILURE, msg="Did not get result from tvdb") else: try: seriesXML = etree.ElementTree(etree.XML(urlData)) except Exception, e: logger.log(u"API :: Unable to parse XML for some reason: " + ex(e) + " from XML: " + urlData, logger.ERROR) return _responds(RESULT_FAILURE, msg="Unable to read result from tvdb") series = seriesXML.getiterator('Series') results = [] for curSeries in series: results.append({"tvdbid": int(curSeries.findtext('seriesid')), "name": curSeries.findtext('SeriesName'), "first_aired": curSeries.findtext('FirstAired')}) lang_id = self.valid_languages[self.lang] return _responds(RESULT_SUCCESS, {"results": results, "langid": lang_id}) elif self.tvdbid: # There's gotta be a better way of doing this but we don't wanna # change the language value elsewhere ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy() lang_id = self.valid_languages[self.lang] if self.lang and not self.lang == 'en': ltvdb_api_parms['language'] = self.lang t = tvdb_api.Tvdb(actors=False, **ltvdb_api_parms) try: myShow = t[int(self.tvdbid)] except (tvdb_exceptions.tvdb_shownotfound, tvdb_exceptions.tvdb_error): logger.log(u"API :: Unable to find show with id " + str(self.tvdbid), logger.WARNING) return _responds(RESULT_SUCCESS, {"results": [], "langid": lang_id}) if not myShow.data['seriesname']: logger.log(u"API :: Found show with tvdbid " + str(self.tvdbid) + ", however it contained no show name", logger.DEBUG) return _responds(RESULT_FAILURE, msg="Show contains no name, invalid result") showOut = [{"tvdbid": self.tvdbid, "name": unicode(myShow.data['seriesname']), "first_aired": myShow.data['firstaired']}] return _responds(RESULT_SUCCESS, {"results": showOut, "langid": lang_id}) else: return _responds(RESULT_FAILURE, msg="Either tvdbid or name is required") class CMD_SickBeardSetDefaults(ApiCall): _help = {"desc": "set sickbeard user defaults", "optionalParameters": {"initial": {"desc": "initial quality for the show"}, "archive": {"desc": "archive quality for the show"}, "flatten_folders": {"desc": "flatten subfolders within the show directory"}, "status": {"desc": "status of missing episodes"} } } def __init__(self, args, kwargs): # required # optional self.initial, args = self.check_params(args, kwargs, "initial", None, False, "list", ["sdtv", "sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray", "unknown"]) self.archive, args = self.check_params(args, kwargs, "archive", None, False, "list", ["sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray"]) self.future_show_paused, args = self.check_params(args, kwargs, "future_show_paused", None, False, "bool", []) self.flatten_folders, args = self.check_params(args, kwargs, "flatten_folders", None, False, "bool", []) self.status, args = self.check_params(args, kwargs, "status", None, False, "string", ["wanted", "skipped", "archived", "ignored"]) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ set sickbeard user defaults """ quality_map = {'sdtv': Quality.SDTV, 'sddvd': Quality.SDDVD, 'hdtv': Quality.HDTV, 'rawhdtv': Quality.RAWHDTV, 'fullhdtv': Quality.FULLHDTV, 'hdwebdl': Quality.HDWEBDL, 'fullhdwebdl': Quality.FULLHDWEBDL, 'hdbluray': Quality.HDBLURAY, 'fullhdbluray': Quality.FULLHDBLURAY, 'unknown': Quality.UNKNOWN} iqualityID = [] aqualityID = [] if self.initial: for quality in self.initial: iqualityID.append(quality_map[quality]) if self.archive: for quality in self.archive: aqualityID.append(quality_map[quality]) if iqualityID or aqualityID: sickbeard.QUALITY_DEFAULT = Quality.combineQualities(iqualityID, aqualityID) if self.status: # convert the string status to a int for status in statusStrings.statusStrings: if statusStrings[status].lower() == str(self.status).lower(): self.status = status break # this should be obsolete bcause of the above if not self.status in statusStrings.statusStrings: raise ApiError("Invalid Status") #only allow the status options we want if int(self.status) not in (3, 5, 6, 7): raise ApiError("Status Prohibited") sickbeard.STATUS_DEFAULT = self.status if self.flatten_folders != None: sickbeard.FLATTEN_FOLDERS_DEFAULT = int(self.flatten_folders) if self.future_show_paused != None: sickbeard.COMING_EPS_DISPLAY_PAUSED = int(self.future_show_paused) return _responds(RESULT_SUCCESS, msg="Saved defaults") class CMD_SickBeardShutdown(ApiCall): _help = {"desc": "shutdown sickbeard"} def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ shutdown sickbeard """ threading.Timer(2, sickbeard.invoke_shutdown).start() return _responds(RESULT_SUCCESS, msg="SickBeard is shutting down...") class CMD_Show(ApiCall): _help = {"desc": "display information for a given show", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display information for a given show """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") showDict = {} showDict["season_list"] = CMD_ShowSeasonList((), {"tvdbid": self.tvdbid}).run()["data"] showDict["cache"] = CMD_ShowCache((), {"tvdbid": self.tvdbid}).run()["data"] genreList = [] if showObj.genre: genreListTmp = showObj.genre.split("|") for genre in genreListTmp: if genre: genreList.append(genre) showDict["genre"] = genreList showDict["quality"] = _get_quality_string(showObj.quality) anyQualities, bestQualities = _mapQuality(showObj.quality) showDict["quality_details"] = {"initial": anyQualities, "archive": bestQualities} try: showDict["location"] = showObj.location except sickbeard.exceptions.ShowDirNotFoundException: showDict["location"] = "" showDict["language"] = showObj.lang showDict["show_name"] = showObj.name showDict["paused"] = showObj.paused showDict["air_by_date"] = showObj.air_by_date showDict["flatten_folders"] = showObj.flatten_folders #clean up tvdb horrible airs field showDict["airs"] = str(showObj.airs).replace('am', ' AM').replace('pm', ' PM').replace(' ', ' ') showDict["tvrage_id"] = showObj.tvrid showDict["tvrage_name"] = showObj.tvrname showDict["network"] = showObj.network if not showDict["network"]: showDict["network"] = "" showDict["status"] = showObj.status nextAirdate = '' nextEps = showObj.nextEpisode() if (len(nextEps) != 0): nextAirdate = _ordinal_to_dateForm(nextEps[0].airdate.toordinal()) showDict["next_ep_airdate"] = nextAirdate return _responds(RESULT_SUCCESS, showDict) class CMD_ShowAddExisting(ApiCall): _help = {"desc": "add a show in sickbeard with an existing folder", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, "location": {"desc": "full path to the existing folder for the show"} }, "optionalParameters": {"initial": {"desc": "initial quality for the show"}, "archive": {"desc": "archive quality for the show"}, "flatten_folders": {"desc": "flatten subfolders for the show"} } } def __init__(self, args, kwargs): # required self.location, args = self.check_params(args, kwargs, "location", None, True, "string", []) self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional self.initial, args = self.check_params(args, kwargs, "initial", None, False, "list", ["sdtv", "sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray", "unknown"]) self.archive, args = self.check_params(args, kwargs, "archive", None, False, "list", ["sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray"]) self.flatten_folders, args = self.check_params(args, kwargs, "flatten_folders", str(sickbeard.FLATTEN_FOLDERS_DEFAULT), False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ add a show in sickbeard with an existing folder """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if showObj: return _responds(RESULT_FAILURE, msg="An existing tvdbid already exists in the database") if not ek.ek(os.path.isdir, self.location): return _responds(RESULT_FAILURE, msg='Not a valid location') tvdbName = None tvdbResult = CMD_SickBeardSearchTVDB([], {"tvdbid": self.tvdbid}).run() if tvdbResult['result'] == result_type_map[RESULT_SUCCESS]: if not tvdbResult['data']['results']: return _responds(RESULT_FAILURE, msg="Empty results returned, check tvdbid and try again") if len(tvdbResult['data']['results']) == 1 and 'name' in tvdbResult['data']['results'][0]: tvdbName = tvdbResult['data']['results'][0]['name'] if not tvdbName: return _responds(RESULT_FAILURE, msg="Unable to retrieve information from tvdb") quality_map = {'sdtv': Quality.SDTV, 'sddvd': Quality.SDDVD, 'hdtv': Quality.HDTV, 'rawhdtv': Quality.RAWHDTV, 'fullhdtv': Quality.FULLHDTV, 'hdwebdl': Quality.HDWEBDL, 'fullhdwebdl': Quality.FULLHDWEBDL, 'hdbluray': Quality.HDBLURAY, 'fullhdbluray': Quality.FULLHDBLURAY, 'unknown': Quality.UNKNOWN} #use default quality as a failsafe newQuality = int(sickbeard.QUALITY_DEFAULT) iqualityID = [] aqualityID = [] if self.initial: for quality in self.initial: iqualityID.append(quality_map[quality]) if self.archive: for quality in self.archive: aqualityID.append(quality_map[quality]) if iqualityID or aqualityID: newQuality = Quality.combineQualities(iqualityID, aqualityID) sickbeard.showQueueScheduler.action.addShow(int(self.tvdbid), self.location, SKIPPED, newQuality, int(self.flatten_folders)) #@UndefinedVariable return _responds(RESULT_SUCCESS, {"name": tvdbName}, tvdbName + " has been queued to be added") class CMD_ShowAddNew(ApiCall): _help = {"desc": "add a new show to sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"} }, "optionalParameters": {"initial": {"desc": "initial quality for the show"}, "location": {"desc": "base path for where the show folder is to be created"}, "archive": {"desc": "archive quality for the show"}, "flatten_folders": {"desc": "flatten subfolders for the show"}, "status": {"desc": "status of missing episodes"}, "lang": {"desc": "the 2 letter lang abbreviation id"} } } valid_languages = { 'el': 20, 'en': 7, 'zh': 27, 'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9, 'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11, 'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30} def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional self.location, args = self.check_params(args, kwargs, "location", None, False, "string", []) self.initial, args = self.check_params(args, kwargs, "initial", None, False, "list", ["sdtv", "sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray", "unknown"]) self.archive, args = self.check_params(args, kwargs, "archive", None, False, "list", ["sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray"]) self.flatten_folders, args = self.check_params(args, kwargs, "flatten_folders", str(sickbeard.FLATTEN_FOLDERS_DEFAULT), False, "bool", []) self.status, args = self.check_params(args, kwargs, "status", None, False, "string", ["wanted", "skipped", "archived", "ignored"]) self.lang, args = self.check_params(args, kwargs, "lang", "en", False, "string", self.valid_languages.keys()) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ add a show in sickbeard with an existing folder """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if showObj: return _responds(RESULT_FAILURE, msg="An existing tvdbid already exists in database") if not self.location: if sickbeard.ROOT_DIRS != "": root_dirs = sickbeard.ROOT_DIRS.split('|') root_dirs.pop(0) default_index = int(sickbeard.ROOT_DIRS.split('|')[0]) self.location = root_dirs[default_index] else: return _responds(RESULT_FAILURE, msg="Root directory is not set, please provide a location") if not ek.ek(os.path.isdir, self.location): return _responds(RESULT_FAILURE, msg="'" + self.location + "' is not a valid location") quality_map = {'sdtv': Quality.SDTV, 'sddvd': Quality.SDDVD, 'hdtv': Quality.HDTV, 'rawhdtv': Quality.RAWHDTV, 'fullhdtv': Quality.FULLHDTV, 'hdwebdl': Quality.HDWEBDL, 'fullhdwebdl': Quality.FULLHDWEBDL, 'hdbluray': Quality.HDBLURAY, 'fullhdbluray': Quality.FULLHDBLURAY, 'unknown': Quality.UNKNOWN} # use default quality as a failsafe newQuality = int(sickbeard.QUALITY_DEFAULT) iqualityID = [] aqualityID = [] if self.initial: for quality in self.initial: iqualityID.append(quality_map[quality]) if self.archive: for quality in self.archive: aqualityID.append(quality_map[quality]) if iqualityID or aqualityID: newQuality = Quality.combineQualities(iqualityID, aqualityID) # use default status as a failsafe newStatus = sickbeard.STATUS_DEFAULT if self.status: # convert the string status to a int for status in statusStrings.statusStrings: if statusStrings[status].lower() == str(self.status).lower(): self.status = status break #TODO: check if obsolete if not self.status in statusStrings.statusStrings: raise ApiError("Invalid Status") # only allow the status options we want if int(self.status) not in (3, 5, 6, 7): return _responds(RESULT_FAILURE, msg="Status prohibited") newStatus = self.status tvdbName = None tvdbResult = CMD_SickBeardSearchTVDB([], {"tvdbid": self.tvdbid}).run() if tvdbResult['result'] == result_type_map[RESULT_SUCCESS]: if not tvdbResult['data']['results']: return _responds(RESULT_FAILURE, msg="Empty results returned, check tvdbid and try again") if len(tvdbResult['data']['results']) == 1 and 'name' in tvdbResult['data']['results'][0]: tvdbName = tvdbResult['data']['results'][0]['name'] if not tvdbName: return _responds(RESULT_FAILURE, msg="Unable to retrieve information from tvdb") # moved the logic check to the end in an attempt to eliminate empty directory being created from previous errors showPath = ek.ek(os.path.join, self.location, helpers.sanitizeFileName(tvdbName)) # don't create show dir if config says not to if sickbeard.ADD_SHOWS_WO_DIR: logger.log(u"Skipping initial creation of " + showPath + " due to config.ini setting") else: dir_exists = helpers.makeDir(showPath) if not dir_exists: logger.log(u"API :: Unable to create the folder " + showPath + ", can't add the show", logger.ERROR) return _responds(RESULT_FAILURE, {"path": showPath}, "Unable to create the folder " + showPath + ", can't add the show") else: helpers.chmodAsParent(showPath) sickbeard.showQueueScheduler.action.addShow(int(self.tvdbid), showPath, newStatus, newQuality, int(self.flatten_folders), self.lang) #@UndefinedVariable return _responds(RESULT_SUCCESS, {"name": tvdbName}, tvdbName + " has been queued to be added") class CMD_ShowCache(ApiCall): _help = {"desc": "check sickbeard's cache to see if the banner or poster image for a show is valid", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ check sickbeard's cache to see if the banner or poster image for a show is valid """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") #TODO: catch if cache dir is missing/invalid.. so it doesn't break show/show.cache #return {"poster": 0, "banner": 0} cache_obj = image_cache.ImageCache() has_poster = 0 has_banner = 0 if ek.ek(os.path.isfile, cache_obj.poster_path(showObj.tvdbid)): has_poster = 1 if ek.ek(os.path.isfile, cache_obj.banner_path(showObj.tvdbid)): has_banner = 1 return _responds(RESULT_SUCCESS, {"poster": has_poster, "banner": has_banner}) class CMD_ShowDelete(ApiCall): _help = {"desc": "delete a show in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ delete a show in sickbeard """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") if sickbeard.showQueueScheduler.action.isBeingAdded(showObj) or sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable return _responds(RESULT_FAILURE, msg="Show can not be deleted while being added or updated") showObj.deleteShow() return _responds(RESULT_SUCCESS, msg=str(showObj.name) + " has been deleted") class CMD_ShowGetQuality(ApiCall): _help = {"desc": "get quality setting for a show in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ get quality setting for a show in sickbeard """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") anyQualities, bestQualities = _mapQuality(showObj.quality) return _responds(RESULT_SUCCESS, {"initial": anyQualities, "archive": bestQualities}) class CMD_ShowGetPoster(ApiCall): _help = {"desc": "get the poster stored for a show in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ get the poster for a show in sickbeard """ return {'outputType': 'image', 'image': webserve.WebInterface().showPoster(self.tvdbid, 'poster')} class CMD_ShowGetBanner(ApiCall): _help = {"desc": "get the banner stored for a show in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ get the banner for a show in sickbeard """ return {'outputType': 'image', 'image': webserve.WebInterface().showPoster(self.tvdbid, 'banner')} class CMD_ShowPause(ApiCall): _help = {"desc": "set a show's paused state in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, }, "optionalParameters": {"pause": {"desc": "set the pause state of the show"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional self.pause, args = self.check_params(args, kwargs, "pause", 0, False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ set a show's paused state in sickbeard """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") if self.pause == True: showObj.paused = 1 return _responds(RESULT_SUCCESS, msg=str(showObj.name) + " has been paused") else: showObj.paused = 0 return _responds(RESULT_SUCCESS, msg=str(showObj.name) + " has been unpaused") return _responds(RESULT_FAILURE, msg=str(showObj.name) + " was unable to be paused") class CMD_ShowRefresh(ApiCall): _help = {"desc": "refresh a show in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ refresh a show in sickbeard """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") try: sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable return _responds(RESULT_SUCCESS, msg=str(showObj.name) + " has queued to be refreshed") except exceptions.CantRefreshException: # TODO: log the excption return _responds(RESULT_FAILURE, msg="Unable to refresh " + str(showObj.name)) class CMD_ShowSeasonList(ApiCall): _help = {"desc": "display the season list for a given show", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, }, "optionalParameters": {"sort": {"desc": "change the sort order from descending to ascending"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional self.sort, args = self.check_params(args, kwargs, "sort", "desc", False, "string", ["asc", "desc"]) # "asc" and "desc" default and fallback is "desc" # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display the season list for a given show """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") myDB = db.DBConnection(row_type="dict") if self.sort == "asc": sqlResults = myDB.select("SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season ASC", [self.tvdbid]) else: sqlResults = myDB.select("SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season DESC", [self.tvdbid]) seasonList = [] # a list with all season numbers for row in sqlResults: seasonList.append(int(row["season"])) myDB.connection.close() return _responds(RESULT_SUCCESS, seasonList) class CMD_ShowSeasons(ApiCall): _help = {"desc": "display a listing of episodes for all or a given season", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, }, "optionalParameters": {"season": {"desc": "the season number"}, } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional self.season, args = self.check_params(args, kwargs, "season", None, False, "int", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display a listing of episodes for all or a given show """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") myDB = db.DBConnection(row_type="dict") if self.season == None: sqlResults = myDB.select("SELECT name, episode, airdate, status, season FROM tv_episodes WHERE showid = ?", [self.tvdbid]) seasons = {} for row in sqlResults: status, quality = Quality.splitCompositeStatus(int(row["status"])) row["status"] = _get_status_Strings(status) row["quality"] = _get_quality_string(quality) row["airdate"] = _ordinal_to_dateForm(row["airdate"]) curSeason = int(row["season"]) curEpisode = int(row["episode"]) del row["season"] del row["episode"] if not curSeason in seasons: seasons[curSeason] = {} seasons[curSeason][curEpisode] = row else: sqlResults = myDB.select("SELECT name, episode, airdate, status FROM tv_episodes WHERE showid = ? AND season = ?", [self.tvdbid, self.season]) if len(sqlResults) is 0: return _responds(RESULT_FAILURE, msg="Season not found") seasons = {} for row in sqlResults: curEpisode = int(row["episode"]) del row["episode"] status, quality = Quality.splitCompositeStatus(int(row["status"])) row["status"] = _get_status_Strings(status) row["quality"] = _get_quality_string(quality) row["airdate"] = _ordinal_to_dateForm(row["airdate"]) if not curEpisode in seasons: seasons[curEpisode] = {} seasons[curEpisode] = row myDB.connection.close() return _responds(RESULT_SUCCESS, seasons) class CMD_ShowSetQuality(ApiCall): _help = {"desc": "set desired quality of a show in sickbeard. if neither initial or archive are provided then the config default quality will be used", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"} }, "optionalParameters": {"initial": {"desc": "initial quality for the show"}, "archive": {"desc": "archive quality for the show"} } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # this for whatever reason removes hdbluray not sdtv... which is just wrong. reverting to previous code.. plus we didnt use the new code everywhere. # self.archive, args = self.check_params(args, kwargs, "archive", None, False, "list", _getQualityMap().values()[1:]) self.initial, args = self.check_params(args, kwargs, "initial", None, False, "list", ["sdtv", "sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray", "unknown"]) self.archive, args = self.check_params(args, kwargs, "archive", None, False, "list", ["sddvd", "hdtv", "rawhdtv", "fullhdtv", "hdwebdl", "fullhdwebdl", "hdbluray", "fullhdbluray"]) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ set the quality for a show in sickbeard by taking in a deliminated string of qualities, map to their value and combine for new values """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") quality_map = {'sdtv': Quality.SDTV, 'sddvd': Quality.SDDVD, 'hdtv': Quality.HDTV, 'rawhdtv': Quality.RAWHDTV, 'fullhdtv': Quality.FULLHDTV, 'hdwebdl': Quality.HDWEBDL, 'fullhdwebdl': Quality.FULLHDWEBDL, 'hdbluray': Quality.HDBLURAY, 'fullhdbluray': Quality.FULLHDBLURAY, 'unknown': Quality.UNKNOWN} #use default quality as a failsafe newQuality = int(sickbeard.QUALITY_DEFAULT) iqualityID = [] aqualityID = [] if self.initial: for quality in self.initial: iqualityID.append(quality_map[quality]) if self.archive: for quality in self.archive: aqualityID.append(quality_map[quality]) if iqualityID or aqualityID: newQuality = Quality.combineQualities(iqualityID, aqualityID) showObj.quality = newQuality return _responds(RESULT_SUCCESS, msg=showObj.name + " quality has been changed to " + _get_quality_string(showObj.quality)) class CMD_ShowStats(ApiCall): _help = {"desc": "display episode statistics for a given show", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display episode statistics for a given show """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") # show stats episode_status_counts_total = {} episode_status_counts_total["total"] = 0 for status in statusStrings.statusStrings.keys(): if status in [UNKNOWN, DOWNLOADED, SNATCHED, SNATCHED_PROPER]: continue episode_status_counts_total[status] = 0 # add all the downloaded qualities episode_qualities_counts_download = {} episode_qualities_counts_download["total"] = 0 for statusCode in Quality.DOWNLOADED: status, quality = Quality.splitCompositeStatus(statusCode) if quality in [Quality.NONE]: continue episode_qualities_counts_download[statusCode] = 0 # add all snatched qualities episode_qualities_counts_snatch = {} episode_qualities_counts_snatch["total"] = 0 for statusCode in Quality.SNATCHED + Quality.SNATCHED_PROPER: status, quality = Quality.splitCompositeStatus(statusCode) if quality in [Quality.NONE]: continue episode_qualities_counts_snatch[statusCode] = 0 myDB = db.DBConnection(row_type="dict") sqlResults = myDB.select("SELECT status, season FROM tv_episodes WHERE season != 0 AND showid = ?", [self.tvdbid]) # the main loop that goes through all episodes for row in sqlResults: status, quality = Quality.splitCompositeStatus(int(row["status"])) episode_status_counts_total["total"] += 1 if status in Quality.DOWNLOADED: episode_qualities_counts_download["total"] += 1 episode_qualities_counts_download[int(row["status"])] += 1 elif status in Quality.SNATCHED + Quality.SNATCHED_PROPER: episode_qualities_counts_snatch["total"] += 1 episode_qualities_counts_snatch[int(row["status"])] += 1 elif status == 0: # we dont count NONE = 0 = N/A pass else: episode_status_counts_total[status] += 1 # the outgoing container episodes_stats = {} episodes_stats["downloaded"] = {} # truning codes into strings for statusCode in episode_qualities_counts_download: if statusCode is "total": episodes_stats["downloaded"]["total"] = episode_qualities_counts_download[statusCode] continue status, quality = Quality.splitCompositeStatus(int(statusCode)) statusString = Quality.qualityStrings[quality].lower().replace(" ", "_").replace("(", "").replace(")", "") episodes_stats["downloaded"][statusString] = episode_qualities_counts_download[statusCode] episodes_stats["snatched"] = {} # truning codes into strings # and combining proper and normal for statusCode in episode_qualities_counts_snatch: if statusCode is "total": episodes_stats["snatched"]["total"] = episode_qualities_counts_snatch[statusCode] continue status, quality = Quality.splitCompositeStatus(int(statusCode)) statusString = Quality.qualityStrings[quality].lower().replace(" ", "_").replace("(", "").replace(")", "") if Quality.qualityStrings[quality] in episodes_stats["snatched"]: episodes_stats["snatched"][statusString] += episode_qualities_counts_snatch[statusCode] else: episodes_stats["snatched"][statusString] = episode_qualities_counts_snatch[statusCode] #episodes_stats["total"] = {} for statusCode in episode_status_counts_total: if statusCode is "total": episodes_stats["total"] = episode_status_counts_total[statusCode] continue status, quality = Quality.splitCompositeStatus(int(statusCode)) statusString = statusStrings.statusStrings[statusCode].lower().replace(" ", "_").replace("(", "").replace(")", "") episodes_stats[statusString] = episode_status_counts_total[statusCode] myDB.connection.close() return _responds(RESULT_SUCCESS, episodes_stats) class CMD_ShowUpdate(ApiCall): _help = {"desc": "update a show in sickbeard", "requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"}, } } def __init__(self, args, kwargs): # required self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", []) # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ update a show in sickbeard """ showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid)) if not showObj: return _responds(RESULT_FAILURE, msg="Show not found") try: sickbeard.showQueueScheduler.action.updateShow(showObj, True) #@UndefinedVariable return _responds(RESULT_SUCCESS, msg=str(showObj.name) + " has queued to be updated") except exceptions.CantUpdateException, e: logger.log(u"API:: Unable to update " + str(showObj.name) + ". " + str(ex(e)), logger.ERROR) return _responds(RESULT_FAILURE, msg="Unable to update " + str(showObj.name)) class CMD_Shows(ApiCall): _help = {"desc": "display all shows in sickbeard", "optionalParameters": {"sort": {"desc": "sort the list of shows by show name instead of tvdbid"}, "paused": {"desc": "only show the shows that are set to paused"}, }, } def __init__(self, args, kwargs): # required # optional self.sort, args = self.check_params(args, kwargs, "sort", "id", False, "string", ["id", "name"]) self.paused, args = self.check_params(args, kwargs, "paused", None, False, "bool", []) # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display_is_int_multi( self.tvdbid )shows in sickbeard """ shows = {} for curShow in sickbeard.showList: nextAirdate = '' nextEps = curShow.nextEpisode() if (len(nextEps) != 0): nextAirdate = _ordinal_to_dateForm(nextEps[0].airdate.toordinal()) if self.paused != None and bool(self.paused) != bool(curShow.paused): continue showDict = {"paused": curShow.paused, "quality": _get_quality_string(curShow.quality), "language": curShow.lang, "air_by_date": curShow.air_by_date, "tvdbid": curShow.tvdbid, "tvrage_id": curShow.tvrid, "tvrage_name": curShow.tvrname, "network": curShow.network, "show_name": curShow.name, "status": curShow.status, "next_ep_airdate": nextAirdate} showDict["cache"] = CMD_ShowCache((), {"tvdbid": curShow.tvdbid}).run()["data"] if not showDict["network"]: showDict["network"] = "" if self.sort == "name": shows[curShow.name] = showDict else: shows[curShow.tvdbid] = showDict return _responds(RESULT_SUCCESS, shows) class CMD_ShowsStats(ApiCall): _help = {"desc": "display the global shows and episode stats" } def __init__(self, args, kwargs): # required # optional # super, missing, help ApiCall.__init__(self, args, kwargs) def run(self): """ display the global shows and episode stats """ stats = {} myDB = db.DBConnection() today = str(datetime.date.today().toordinal()) stats["shows_total"] = len(sickbeard.showList) stats["shows_active"] = len([show for show in sickbeard.showList if show.paused == 0 and show.status != "Ended"]) stats["ep_downloaded"] = myDB.select("SELECT COUNT(*) FROM tv_episodes WHERE status IN (" + ",".join([str(show) for show in Quality.DOWNLOADED + [ARCHIVED]]) + ") AND season != 0 and episode != 0 AND airdate <= " + today + "")[0][0] stats["ep_total"] = myDB.select("SELECT COUNT(*) FROM tv_episodes WHERE season != 0 AND episode != 0 AND (airdate != 1 OR status IN (" + ",".join([str(show) for show in (Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER) + [ARCHIVED]]) + ")) AND airdate <= " + today + " AND status != " + str(IGNORED) + "")[0][0] myDB.connection.close() return _responds(RESULT_SUCCESS, stats) # WARNING: never define a cmd call string that contains a "_" (underscore) # this is reserved for cmd indexes used while cmd chaining # WARNING: never define a param name that contains a "." (dot) # this is reserved for cmd namspaces used while cmd chaining _functionMaper = {"help": CMD_Help, "future": CMD_ComingEpisodes, "episode": CMD_Episode, "episode.search": CMD_EpisodeSearch, "episode.setstatus": CMD_EpisodeSetStatus, "exceptions": CMD_Exceptions, "history": CMD_History, "history.clear": CMD_HistoryClear, "history.trim": CMD_HistoryTrim, "logs": CMD_Logs, "sb": CMD_SickBeard, "sb.addrootdir": CMD_SickBeardAddRootDir, "sb.checkscheduler": CMD_SickBeardCheckScheduler, "sb.deleterootdir": CMD_SickBeardDeleteRootDir, "sb.forcesearch": CMD_SickBeardForceSearch, "sb.getdefaults": CMD_SickBeardGetDefaults, "sb.getmessages": CMD_SickBeardGetMessages, "sb.getrootdirs": CMD_SickBeardGetRootDirs, "sb.pausebacklog": CMD_SickBeardPauseBacklog, "sb.ping": CMD_SickBeardPing, "sb.restart": CMD_SickBeardRestart, "sb.searchtvdb": CMD_SickBeardSearchTVDB, "sb.setdefaults": CMD_SickBeardSetDefaults, "sb.shutdown": CMD_SickBeardShutdown, "show": CMD_Show, "show.addexisting": CMD_ShowAddExisting, "show.addnew": CMD_ShowAddNew, "show.cache": CMD_ShowCache, "show.delete": CMD_ShowDelete, "show.getquality": CMD_ShowGetQuality, "show.getposter": CMD_ShowGetPoster, "show.getbanner": CMD_ShowGetBanner, "show.pause": CMD_ShowPause, "show.refresh": CMD_ShowRefresh, "show.seasonlist": CMD_ShowSeasonList, "show.seasons": CMD_ShowSeasons, "show.setquality": CMD_ShowSetQuality, "show.stats": CMD_ShowStats, "show.update": CMD_ShowUpdate, "shows": CMD_Shows, "shows.stats": CMD_ShowsStats }
druids/django-chamber
refs/heads/master
chamber/shortcuts.py
1
from datetime import date, datetime, time from django.http.response import Http404 from django.shortcuts import _get_queryset from django.utils import timezone from django.core.exceptions import ValidationError def get_object_or_none(klass, *args, **kwargs): queryset = _get_queryset(klass) try: return queryset.get(*args, **kwargs) except (queryset.model.DoesNotExist, ValueError, ValidationError): return None def get_object_or_404(klass, *args, **kwargs): queryset = _get_queryset(klass) try: return queryset.get(*args, **kwargs) except (queryset.model.DoesNotExist, ValueError, ValidationError): raise Http404 def distinct_field(klass, *args, **kwargs): return _get_queryset(klass).order_by().values_list(*args, **kwargs).distinct() def filter_or_exclude_by_date(negate, klass, **kwargs): filter_kwargs = {} for key, date_value in kwargs.items(): assert isinstance(date_value, date) date_range = ( timezone.make_aware(datetime.combine(date_value, time.min), timezone.get_current_timezone()), timezone.make_aware(datetime.combine(date_value, time.max), timezone.get_current_timezone()) ) filter_kwargs['%s__range' % key] = date_range if negate: return _get_queryset(klass).exclude(**filter_kwargs) else: return _get_queryset(klass).filter(**filter_kwargs) def filter_by_date(klass, **kwargs): return filter_or_exclude_by_date(False, klass, **kwargs) def exclude_by_date(klass, **kwargs): return filter_or_exclude_by_date(True, klass, **kwargs) def change(obj, **changed_fields): """ Changes a given `changed_fields` on object and returns changed object. """ obj_field_names = { field.name for field in obj._meta.fields } | { field.attname for field in obj._meta.fields } | {'pk'} for field_name, value in changed_fields.items(): if field_name not in obj_field_names: raise ValueError("'{}' is an invalid field name".format(field_name)) setattr(obj, field_name, value) return obj def change_and_save(obj, update_only_changed_fields=False, save_kwargs=None, **changed_fields): """ Changes a given `changed_fields` on object, saves it and returns changed object. """ from chamber.models import SmartModel change(obj, **changed_fields) if update_only_changed_fields and not isinstance(obj, SmartModel): raise TypeError('update_only_changed_fields can be used only with SmartModel') save_kwargs = save_kwargs if save_kwargs is not None else {} if update_only_changed_fields: save_kwargs['update_only_changed_fields'] = True obj.save(**save_kwargs) return obj def bulk_change(iterable, **changed_fields): """ Changes a given `changed_fields` on each object in a given `iterable`, returns the changed objects. """ return [change(obj, **changed_fields) for obj in iterable] def bulk_change_and_save(iterable, update_only_changed_fields=False, save_kwargs=None, **changed_fields): """ Changes a given `changed_fields` on each object in a given `iterable`, saves objects and returns the changed objects. """ return [ change_and_save(obj, update_only_changed_fields=update_only_changed_fields, save_kwargs=save_kwargs, **changed_fields) for obj in iterable ] def bulk_save(iterable): """ Saves a objects in a given `iterable`. """ return [obj.save() for obj in iterable]
Denisolt/IEEE-NYIT-MA
refs/heads/master
local/lib/python2.7/site-packages/unidecode/x1d6.py
248
data = ( 's', # 0x00 't', # 0x01 'u', # 0x02 'v', # 0x03 'w', # 0x04 'x', # 0x05 'y', # 0x06 'z', # 0x07 'A', # 0x08 'B', # 0x09 'C', # 0x0a 'D', # 0x0b 'E', # 0x0c 'F', # 0x0d 'G', # 0x0e 'H', # 0x0f 'I', # 0x10 'J', # 0x11 'K', # 0x12 'L', # 0x13 'M', # 0x14 'N', # 0x15 'O', # 0x16 'P', # 0x17 'Q', # 0x18 'R', # 0x19 'S', # 0x1a 'T', # 0x1b 'U', # 0x1c 'V', # 0x1d 'W', # 0x1e 'X', # 0x1f 'Y', # 0x20 'Z', # 0x21 'a', # 0x22 'b', # 0x23 'c', # 0x24 'd', # 0x25 'e', # 0x26 'f', # 0x27 'g', # 0x28 'h', # 0x29 'i', # 0x2a 'j', # 0x2b 'k', # 0x2c 'l', # 0x2d 'm', # 0x2e 'n', # 0x2f 'o', # 0x30 'p', # 0x31 'q', # 0x32 'r', # 0x33 's', # 0x34 't', # 0x35 'u', # 0x36 'v', # 0x37 'w', # 0x38 'x', # 0x39 'y', # 0x3a 'z', # 0x3b 'A', # 0x3c 'B', # 0x3d 'C', # 0x3e 'D', # 0x3f 'E', # 0x40 'F', # 0x41 'G', # 0x42 'H', # 0x43 'I', # 0x44 'J', # 0x45 'K', # 0x46 'L', # 0x47 'M', # 0x48 'N', # 0x49 'O', # 0x4a 'P', # 0x4b 'Q', # 0x4c 'R', # 0x4d 'S', # 0x4e 'T', # 0x4f 'U', # 0x50 'V', # 0x51 'W', # 0x52 'X', # 0x53 'Y', # 0x54 'Z', # 0x55 'a', # 0x56 'b', # 0x57 'c', # 0x58 'd', # 0x59 'e', # 0x5a 'f', # 0x5b 'g', # 0x5c 'h', # 0x5d 'i', # 0x5e 'j', # 0x5f 'k', # 0x60 'l', # 0x61 'm', # 0x62 'n', # 0x63 'o', # 0x64 'p', # 0x65 'q', # 0x66 'r', # 0x67 's', # 0x68 't', # 0x69 'u', # 0x6a 'v', # 0x6b 'w', # 0x6c 'x', # 0x6d 'y', # 0x6e 'z', # 0x6f 'A', # 0x70 'B', # 0x71 'C', # 0x72 'D', # 0x73 'E', # 0x74 'F', # 0x75 'G', # 0x76 'H', # 0x77 'I', # 0x78 'J', # 0x79 'K', # 0x7a 'L', # 0x7b 'M', # 0x7c 'N', # 0x7d 'O', # 0x7e 'P', # 0x7f 'Q', # 0x80 'R', # 0x81 'S', # 0x82 'T', # 0x83 'U', # 0x84 'V', # 0x85 'W', # 0x86 'X', # 0x87 'Y', # 0x88 'Z', # 0x89 'a', # 0x8a 'b', # 0x8b 'c', # 0x8c 'd', # 0x8d 'e', # 0x8e 'f', # 0x8f 'g', # 0x90 'h', # 0x91 'i', # 0x92 'j', # 0x93 'k', # 0x94 'l', # 0x95 'm', # 0x96 'n', # 0x97 'o', # 0x98 'p', # 0x99 'q', # 0x9a 'r', # 0x9b 's', # 0x9c 't', # 0x9d 'u', # 0x9e 'v', # 0x9f 'w', # 0xa0 'x', # 0xa1 'y', # 0xa2 'z', # 0xa3 'i', # 0xa4 'j', # 0xa5 '', # 0xa6 '', # 0xa7 'Alpha', # 0xa8 'Beta', # 0xa9 'Gamma', # 0xaa 'Delta', # 0xab 'Epsilon', # 0xac 'Zeta', # 0xad 'Eta', # 0xae 'Theta', # 0xaf 'Iota', # 0xb0 'Kappa', # 0xb1 'Lamda', # 0xb2 'Mu', # 0xb3 'Nu', # 0xb4 'Xi', # 0xb5 'Omicron', # 0xb6 'Pi', # 0xb7 'Rho', # 0xb8 'Theta', # 0xb9 'Sigma', # 0xba 'Tau', # 0xbb 'Upsilon', # 0xbc 'Phi', # 0xbd 'Chi', # 0xbe 'Psi', # 0xbf 'Omega', # 0xc0 'nabla', # 0xc1 'alpha', # 0xc2 'beta', # 0xc3 'gamma', # 0xc4 'delta', # 0xc5 'epsilon', # 0xc6 'zeta', # 0xc7 'eta', # 0xc8 'theta', # 0xc9 'iota', # 0xca 'kappa', # 0xcb 'lamda', # 0xcc 'mu', # 0xcd 'nu', # 0xce 'xi', # 0xcf 'omicron', # 0xd0 'pi', # 0xd1 'rho', # 0xd2 'sigma', # 0xd3 'sigma', # 0xd4 'tai', # 0xd5 'upsilon', # 0xd6 'phi', # 0xd7 'chi', # 0xd8 'psi', # 0xd9 'omega', # 0xda '', # 0xdb '', # 0xdc '', # 0xdd '', # 0xde '', # 0xdf '', # 0xe0 '', # 0xe1 '', # 0xe2 '', # 0xe3 '', # 0xe4 '', # 0xe5 '', # 0xe6 '', # 0xe7 '', # 0xe8 '', # 0xe9 '', # 0xea '', # 0xeb '', # 0xec '', # 0xed '', # 0xee '', # 0xef '', # 0xf0 '', # 0xf1 '', # 0xf2 '', # 0xf3 '', # 0xf4 '', # 0xf5 '', # 0xf6 '', # 0xf7 '', # 0xf8 '', # 0xf9 '', # 0xfa '', # 0xfb '', # 0xfc '', # 0xfd '', # 0xfe '', # 0xff )
Benniphx/server-tools
refs/heads/8.0
fetchmail_notify_error_to_sender/fetchmail.py
35
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2015 Lorenzo Battistini <lorenzo.battistini@agilebg.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields class fetchmail_server(models.Model): _inherit = 'fetchmail.server' error_notice_template_id = fields.Many2one( 'email.template', string="Error notice template", help="Set here the template to use to send notice to sender when " "errors occur while fetching email")
350dotorg/Django
refs/heads/master
tests/modeltests/m2m_signals/models.py
130
from django.db import models class Part(models.Model): name = models.CharField(max_length=20) class Meta: ordering = ('name',) def __unicode__(self): return self.name class Car(models.Model): name = models.CharField(max_length=20) default_parts = models.ManyToManyField(Part) optional_parts = models.ManyToManyField(Part, related_name='cars_optional') class Meta: ordering = ('name',) def __unicode__(self): return self.name class SportsCar(Car): price = models.IntegerField() class Person(models.Model): name = models.CharField(max_length=20) fans = models.ManyToManyField('self', related_name='idols', symmetrical=False) friends = models.ManyToManyField('self') class Meta: ordering = ('name',) def __unicode__(self): return self.name
samabhi/pstHealth
refs/heads/master
venv/lib/python2.7/site-packages/PIL/ImageFile.py
15
# # The Python Imaging Library. # $Id$ # # base class for image file handlers # # history: # 1995-09-09 fl Created # 1996-03-11 fl Fixed load mechanism. # 1996-04-15 fl Added pcx/xbm decoders. # 1996-04-30 fl Added encoders. # 1996-12-14 fl Added load helpers # 1997-01-11 fl Use encode_to_file where possible # 1997-08-27 fl Flush output in _save # 1998-03-05 fl Use memory mapping for some modes # 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B" # 1999-05-31 fl Added image parser # 2000-10-12 fl Set readonly flag on memory-mapped images # 2002-03-20 fl Use better messages for common decoder errors # 2003-04-21 fl Fall back on mmap/map_buffer if map is not available # 2003-10-30 fl Added StubImageFile class # 2004-02-25 fl Made incremental parser more robust # # Copyright (c) 1997-2004 by Secret Labs AB # Copyright (c) 1995-2004 by Fredrik Lundh # # See the README file for information on usage and redistribution. # from PIL import Image from PIL._util import isPath import io import os import sys import struct MAXBLOCK = 65536 SAFEBLOCK = 1024*1024 LOAD_TRUNCATED_IMAGES = False ERRORS = { -1: "image buffer overrun error", -2: "decoding error", -3: "unknown error", -8: "bad configuration", -9: "out of memory error" } def raise_ioerror(error): try: message = Image.core.getcodecstatus(error) except AttributeError: message = ERRORS.get(error) if not message: message = "decoder error %d" % error raise IOError(message + " when reading image file") # # -------------------------------------------------------------------- # Helpers def _tilesort(t): # sort on offset return t[2] # # -------------------------------------------------------------------- # ImageFile base class class ImageFile(Image.Image): "Base class for image file format handlers." def __init__(self, fp=None, filename=None): Image.Image.__init__(self) self.tile = None self.readonly = 1 # until we know better self.decoderconfig = () self.decodermaxblock = MAXBLOCK if isPath(fp): # filename self.fp = open(fp, "rb") self.filename = fp else: # stream self.fp = fp self.filename = filename try: self._open() except (IndexError, # end of data TypeError, # end of data (ord) KeyError, # unsupported mode EOFError, # got header but not the first frame struct.error) as v: raise SyntaxError(v) if not self.mode or self.size[0] <= 0: raise SyntaxError("not identified by this driver") def draft(self, mode, size): "Set draft mode" pass def verify(self): "Check file integrity" # raise exception if something's wrong. must be called # directly after open, and closes file when finished. self.fp = None def load(self): "Load image data based on tile list" pixel = Image.Image.load(self) if self.tile is None: raise IOError("cannot load this image") if not self.tile: return pixel self.map = None use_mmap = self.filename and len(self.tile) == 1 # As of pypy 2.1.0, memory mapping was failing here. use_mmap = use_mmap and not hasattr(sys, 'pypy_version_info') readonly = 0 # look for read/seek overrides try: read = self.load_read # don't use mmap if there are custom read/seek functions use_mmap = False except AttributeError: read = self.fp.read try: seek = self.load_seek use_mmap = False except AttributeError: seek = self.fp.seek if use_mmap: # try memory mapping d, e, o, a = self.tile[0] if d == "raw" and a[0] == self.mode and a[0] in Image._MAPMODES: try: if hasattr(Image.core, "map"): # use built-in mapper self.map = Image.core.map(self.filename) self.map.seek(o) self.im = self.map.readimage( self.mode, self.size, a[1], a[2] ) else: # use mmap, if possible import mmap fp = open(self.filename, "r+") size = os.path.getsize(self.filename) # FIXME: on Unix, use PROT_READ etc self.map = mmap.mmap(fp.fileno(), size) self.im = Image.core.map_buffer( self.map, self.size, d, e, o, a ) readonly = 1 except (AttributeError, EnvironmentError, ImportError): self.map = None self.load_prepare() if not self.map: # sort tiles in file order self.tile.sort(key=_tilesort) try: # FIXME: This is a hack to handle TIFF's JpegTables tag. prefix = self.tile_prefix except AttributeError: prefix = b"" for d, e, o, a in self.tile: d = Image._getdecoder(self.mode, d, a, self.decoderconfig) seek(o) try: d.setimage(self.im, e) except ValueError: continue b = prefix while True: try: s = read(self.decodermaxblock) except (IndexError, struct.error): # truncated png/gif if LOAD_TRUNCATED_IMAGES: break else: raise IOError("image file is truncated") if not s and not d.handles_eof: # truncated jpeg self.tile = [] # JpegDecode needs to clean things up here either way # If we don't destroy the decompressor, # we have a memory leak. d.cleanup() if LOAD_TRUNCATED_IMAGES: break else: raise IOError("image file is truncated " "(%d bytes not processed)" % len(b)) b = b + s n, e = d.decode(b) if n < 0: break b = b[n:] # Need to cleanup here to prevent leaks in PyPy d.cleanup() self.tile = [] self.readonly = readonly self.fp = None # might be shared if not self.map and not LOAD_TRUNCATED_IMAGES and e < 0: # still raised if decoder fails to return anything raise_ioerror(e) # post processing if hasattr(self, "tile_post_rotate"): # FIXME: This is a hack to handle rotated PCD's self.im = self.im.rotate(self.tile_post_rotate) self.size = self.im.size self.load_end() return Image.Image.load(self) def load_prepare(self): # create image memory if necessary if not self.im or\ self.im.mode != self.mode or self.im.size != self.size: self.im = Image.core.new(self.mode, self.size) # create palette (optional) if self.mode == "P": Image.Image.load(self) def load_end(self): # may be overridden pass # may be defined for contained formats # def load_seek(self, pos): # pass # may be defined for blocked formats (e.g. PNG) # def load_read(self, bytes): # pass class StubImageFile(ImageFile): """ Base class for stub image loaders. A stub loader is an image loader that can identify files of a certain format, but relies on external code to load the file. """ def _open(self): raise NotImplementedError( "StubImageFile subclass must implement _open" ) def load(self): loader = self._load() if loader is None: raise IOError("cannot find loader for this %s file" % self.format) image = loader.load(self) assert image is not None # become the other object (!) self.__class__ = image.__class__ self.__dict__ = image.__dict__ def _load(self): "(Hook) Find actual image loader." raise NotImplementedError( "StubImageFile subclass must implement _load" ) class Parser(object): """ Incremental image parser. This class implements the standard feed/close consumer interface. In Python 2.x, this is an old-style class. """ incremental = None image = None data = None decoder = None offset = 0 finished = 0 def reset(self): """ (Consumer) Reset the parser. Note that you can only call this method immediately after you've created a parser; parser instances cannot be reused. """ assert self.data is None, "cannot reuse parsers" def feed(self, data): """ (Consumer) Feed data to the parser. :param data: A string buffer. :exception IOError: If the parser failed to parse the image file. """ # collect data if self.finished: return if self.data is None: self.data = data else: self.data = self.data + data # parse what we have if self.decoder: if self.offset > 0: # skip header skip = min(len(self.data), self.offset) self.data = self.data[skip:] self.offset = self.offset - skip if self.offset > 0 or not self.data: return n, e = self.decoder.decode(self.data) if n < 0: # end of stream self.data = None self.finished = 1 if e < 0: # decoding error self.image = None raise_ioerror(e) else: # end of image return self.data = self.data[n:] elif self.image: # if we end up here with no decoder, this file cannot # be incrementally parsed. wait until we've gotten all # available data pass else: # attempt to open this file try: try: fp = io.BytesIO(self.data) im = Image.open(fp) finally: fp.close() # explicitly close the virtual file except IOError: # traceback.print_exc() pass # not enough data else: flag = hasattr(im, "load_seek") or hasattr(im, "load_read") if flag or len(im.tile) != 1: # custom load code, or multiple tiles self.decode = None else: # initialize decoder im.load_prepare() d, e, o, a = im.tile[0] im.tile = [] self.decoder = Image._getdecoder( im.mode, d, a, im.decoderconfig ) self.decoder.setimage(im.im, e) # calculate decoder offset self.offset = o if self.offset <= len(self.data): self.data = self.data[self.offset:] self.offset = 0 self.image = im def close(self): """ (Consumer) Close the stream. :returns: An image object. :exception IOError: If the parser failed to parse the image file either because it cannot be identified or cannot be decoded. """ # finish decoding if self.decoder: # get rid of what's left in the buffers self.feed(b"") self.data = self.decoder = None if not self.finished: raise IOError("image was incomplete") if not self.image: raise IOError("cannot parse this image") if self.data: # incremental parsing not possible; reopen the file # not that we have all data try: fp = io.BytesIO(self.data) self.image = Image.open(fp) finally: self.image.load() fp.close() # explicitly close the virtual file return self.image # -------------------------------------------------------------------- def _save(im, fp, tile, bufsize=0): """Helper to save image based on tile list :param im: Image object. :param fp: File object. :param tile: Tile list. :param bufsize: Optional buffer size """ im.load() if not hasattr(im, "encoderconfig"): im.encoderconfig = () tile.sort(key=_tilesort) # FIXME: make MAXBLOCK a configuration parameter # It would be great if we could have the encoder specify what it needs # But, it would need at least the image size in most cases. RawEncode is # a tricky case. bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c if fp == sys.stdout: fp.flush() return try: fh = fp.fileno() fp.flush() except (AttributeError, io.UnsupportedOperation): # compress to Python file-compatible object for e, b, o, a in tile: e = Image._getencoder(im.mode, e, a, im.encoderconfig) if o > 0: fp.seek(o, 0) e.setimage(im.im, b) while True: l, s, d = e.encode(bufsize) fp.write(d) if s: break if s < 0: raise IOError("encoder error %d when writing image file" % s) e.cleanup() else: # slight speedup: compress to real file object for e, b, o, a in tile: e = Image._getencoder(im.mode, e, a, im.encoderconfig) if o > 0: fp.seek(o, 0) e.setimage(im.im, b) s = e.encode_to_file(fh, bufsize) if s < 0: raise IOError("encoder error %d when writing image file" % s) e.cleanup() if hasattr(fp, "flush"): fp.flush() def _safe_read(fp, size): """ Reads large blocks in a safe way. Unlike fp.read(n), this function doesn't trust the user. If the requested size is larger than SAFEBLOCK, the file is read block by block. :param fp: File handle. Must implement a <b>read</b> method. :param size: Number of bytes to read. :returns: A string containing up to <i>size</i> bytes of data. """ if size <= 0: return b"" if size <= SAFEBLOCK: return fp.read(size) data = [] while size > 0: block = fp.read(min(size, SAFEBLOCK)) if not block: break data.append(block) size -= len(block) return b"".join(data)
wiltonlazary/arangodb
refs/heads/devel
3rdParty/V8/V8-5.0.71.39/build/gyp/test/defines/gyptest-define-override.py
239
#!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that a default gyp define can be overridden. """ import os import TestGyp test = TestGyp.TestGyp() # CMake loudly warns about passing '#' to the compiler and drops the define. expect_stderr = '' if test.format == 'cmake': expect_stderr = ( """WARNING: Preprocessor definitions containing '#' may not be passed on the""" """ compiler command line because many compilers do not support it.\n""" """CMake is dropping a preprocessor definition: HASH_VALUE="a#1"\n""" """Consider defining the macro in a (configured) header file.\n\n""") # Command-line define test.run_gyp('defines.gyp', '-D', 'OS=fakeos') test.build('defines.gyp', stderr=expect_stderr) test.built_file_must_exist('fakeosprogram', type=test.EXECUTABLE) # Clean up the exe so subsequent tests don't find an old exe. os.remove(test.built_file_path('fakeosprogram', type=test.EXECUTABLE)) # Without "OS" override, fokeosprogram shouldn't be built. test.run_gyp('defines.gyp') test.build('defines.gyp', stderr=expect_stderr) test.built_file_must_not_exist('fakeosprogram', type=test.EXECUTABLE) # Environment define os.environ['GYP_DEFINES'] = 'OS=fakeos' test.run_gyp('defines.gyp') test.build('defines.gyp', stderr=expect_stderr) test.built_file_must_exist('fakeosprogram', type=test.EXECUTABLE) test.pass_test()
lhopps/grit-i18n
refs/heads/master
grit/format/data_pack_unittest.py
49
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. '''Unit tests for grit.format.data_pack''' import os import sys if __name__ == '__main__': sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) import unittest from grit.format import data_pack class FormatDataPackUnittest(unittest.TestCase): def testWriteDataPack(self): expected = ( '\x04\x00\x00\x00' # header(version '\x04\x00\x00\x00' # no. entries, '\x01' # encoding) '\x01\x00\x27\x00\x00\x00' # index entry 1 '\x04\x00\x27\x00\x00\x00' # index entry 4 '\x06\x00\x33\x00\x00\x00' # index entry 6 '\x0a\x00\x3f\x00\x00\x00' # index entry 10 '\x00\x00\x3f\x00\x00\x00' # extra entry for the size of last 'this is id 4this is id 6') # data input = {1: '', 4: 'this is id 4', 6: 'this is id 6', 10: ''} output = data_pack.WriteDataPackToString(input, data_pack.UTF8) self.failUnless(output == expected) def testRePackUnittest(self): expected_with_whitelist = { 1: 'Never gonna', 10: 'give you up', 20: 'Never gonna let', 30: 'you down', 40: 'Never', 50: 'gonna run around and', 60: 'desert you'} expected_without_whitelist = { 1: 'Never gonna', 10: 'give you up', 20: 'Never gonna let', 65: 'Close', 30: 'you down', 40: 'Never', 50: 'gonna run around and', 4: 'click', 60: 'desert you', 6: 'chirr', 32: 'oops, try again', 70: 'Awww, snap!'} inputs = [{1: 'Never gonna', 4: 'click', 6: 'chirr', 10: 'give you up'}, {20: 'Never gonna let', 30: 'you down', 32: 'oops, try again'}, {40: 'Never', 50: 'gonna run around and', 60: 'desert you'}, {65: 'Close', 70: 'Awww, snap!'}] whitelist = [1, 10, 20, 30, 40, 50, 60] inputs = [data_pack.DataPackContents(input, data_pack.UTF8) for input in inputs] # RePack using whitelist output, _ = data_pack.RePackFromDataPackStrings(inputs, whitelist) self.assertDictEqual(expected_with_whitelist, output, 'Incorrect resource output') # RePack a None whitelist output, _ = data_pack.RePackFromDataPackStrings(inputs, None) self.assertDictEqual(expected_without_whitelist, output, 'Incorrect resource output') if __name__ == '__main__': unittest.main()
nashve/mythbox
refs/heads/master
resources/lib/twisted/twisted/trial/test/test_doctest.py
61
# Copyright (c) 2001-2008 Twisted Matrix Laboratories. # See LICENSE for details. """ Test twisted's doctest support. """ from twisted.trial import itrial, runner, unittest, reporter from twisted.trial.test import mockdoctest class TestRunners(unittest.TestCase): """ Tests for Twisted's doctest support. """ def test_id(self): """ Check that the id() of the doctests' case object contains the FQPN of the actual tests. We need this because id() has weird behaviour w/ doctest in Python 2.3. """ loader = runner.TestLoader() suite = loader.loadDoctests(mockdoctest) idPrefix = 'twisted.trial.test.mockdoctest.Counter' for test in suite._tests: self.assertIn(idPrefix, itrial.ITestCase(test).id()) def makeDocSuite(self, module): """ Return a L{runner.DocTestSuite} for the doctests in C{module}. """ return self.assertWarns( DeprecationWarning, "DocTestSuite is deprecated in Twisted 8.0.", __file__, lambda: runner.DocTestSuite(mockdoctest)) def test_correctCount(self): """ L{countTestCases} returns the number of doctests in the module. """ suite = self.makeDocSuite(mockdoctest) self.assertEqual(7, suite.countTestCases()) def test_basicTrialIntegration(self): """ L{loadDoctests} loads all of the doctests in the given module. """ loader = runner.TestLoader() suite = loader.loadDoctests(mockdoctest) self.assertEqual(7, suite.countTestCases()) def _testRun(self, suite): """ Run C{suite} and check the result. """ result = reporter.TestResult() suite.run(result) self.assertEqual(5, result.successes) # doctest reports failures as errors in 2.3 self.assertEqual(2, len(result.errors) + len(result.failures)) def test_expectedResults(self, count=1): """ Trial can correctly run doctests with its xUnit test APIs. """ suite = runner.TestLoader().loadDoctests(mockdoctest) self._testRun(suite) def test_repeatable(self): """ Doctests should be runnable repeatably. """ suite = runner.TestLoader().loadDoctests(mockdoctest) self._testRun(suite) self._testRun(suite)
mahendra-r/edx-platform
refs/heads/master
cms/djangoapps/contentstore/features/pages.py
66
# pylint: disable=missing-docstring # pylint: disable=redefined-outer-name # pylint: disable=unused-argument from lettuce import world, step from nose.tools import assert_equal, assert_in # pylint: disable=no-name-in-module CSS_FOR_TAB_ELEMENT = "li[data-tab-id='{0}'] input.toggle-checkbox" @step(u'I go to the pages page$') def go_to_static(step): menu_css = 'li.nav-course-courseware' static_css = 'li.nav-course-courseware-pages a' world.css_click(menu_css) world.css_click(static_css) @step(u'I add a new static page$') def add_page(step): button_css = 'a.new-button' world.css_click(button_css) @step(u'I should see a static page named "([^"]*)"$') def see_a_static_page_named_foo(step, name): pages_css = 'div.xmodule_StaticTabModule' page_name_html = world.css_html(pages_css) assert_equal(page_name_html.strip(), name) @step(u'I should not see any static pages$') def not_see_any_static_pages(step): pages_css = 'div.xmodule_StaticTabModule' assert world.is_css_not_present(pages_css, wait_time=30) @step(u'I "(edit|delete)" the static page$') def click_edit_or_delete(step, edit_or_delete): button_css = 'ul.component-actions a.%s-button' % edit_or_delete world.css_click(button_css) @step(u'I change the name to "([^"]*)"$') def change_name(step, new_name): settings_css = '.settings-button' world.css_click(settings_css) input_css = 'input.setting-input' world.css_fill(input_css, new_name) if world.is_firefox(): world.trigger_event(input_css) world.save_component() @step(u'I drag the first static page to the last$') def drag_first_static_page_to_last(step): drag_first_to_last_with_css('.component') @step(u'I have created a static page$') def create_static_page(step): step.given('I have opened the pages page in a new course') step.given('I add a new static page') @step(u'I have opened the pages page in a new course$') def open_pages_page_in_new_course(step): step.given('I have opened a new course in Studio') step.given('I go to the pages page') @step(u'I have created two different static pages$') def create_two_pages(step): step.given('I have created a static page') step.given('I "edit" the static page') step.given('I change the name to "First"') step.given('I add a new static page') # Verify order of pages _verify_page_names('First', 'Empty') @step(u'the static pages are switched$') def static_pages_are_switched(step): _verify_page_names('Empty', 'First') def _verify_page_names(first, second): world.wait_for( func=lambda _: len(world.css_find('.xmodule_StaticTabModule')) == 2, timeout=200, timeout_msg="Timed out waiting for two pages to be present" ) pages = world.css_find('.xmodule_StaticTabModule') assert_equal(pages[0].text, first) assert_equal(pages[1].text, second) @step(u'the built-in pages are in the default order$') def built_in_pages_in_default_order(step): expected_pages = ['Courseware', 'Course Info', 'Wiki', 'Progress'] see_pages_in_expected_order(expected_pages) @step(u'the built-in pages are switched$') def built_in_pages_switched(step): expected_pages = ['Courseware', 'Course Info', 'Progress', 'Wiki'] see_pages_in_expected_order(expected_pages) @step(u'the pages are in the default order$') def pages_in_default_order(step): expected_pages = ['Courseware', 'Course Info', 'Wiki', 'Progress', 'First', 'Empty'] see_pages_in_expected_order(expected_pages) @step(u'the pages are switched$$') def pages_are_switched(step): expected_pages = ['Courseware', 'Course Info', 'Progress', 'First', 'Empty', 'Wiki'] see_pages_in_expected_order(expected_pages) @step(u'I drag the first page to the last$') def drag_first_page_to_last(step): drag_first_to_last_with_css('.is-movable') @step(u'I should see the "([^"]*)" page as "(visible|hidden)"$') def page_is_visible_or_hidden(step, page_id, visible_or_hidden): hidden = visible_or_hidden == "hidden" assert_equal(world.css_find(CSS_FOR_TAB_ELEMENT.format(page_id)).checked, hidden) @step(u'I toggle the visibility of the "([^"]*)" page$') def page_toggle_visibility(step, page_id): world.css_find(CSS_FOR_TAB_ELEMENT.format(page_id))[0].click() def drag_first_to_last_with_css(css_class): # For some reason, the drag_and_drop method did not work in this case. draggables = world.css_find(css_class + ' .drag-handle') source = draggables.first target = draggables.last source.action_chains.click_and_hold(source._element).perform() # pylint: disable=protected-access source.action_chains.move_to_element_with_offset(target._element, 0, 50).perform() # pylint: disable=protected-access source.action_chains.release().perform() def see_pages_in_expected_order(page_names_in_expected_order): pages = world.css_find("li.course-tab") assert_equal(len(page_names_in_expected_order), len(pages)) for i, page_name in enumerate(page_names_in_expected_order): assert_in(page_name, pages[i].text)
MattsFleaMarket/python-for-android
refs/heads/master
python3-alpha/python3-src/android-scripts/pip_console.py
47
import os,os.path,sys,readline def modcmd(arg): os.system(sys.executable+" "+sys.prefix+"/bin/"+arg) if not(os.path.exists(sys.prefix+"/bin/pip")): print("You need to install pip first.") print("Input pip commands, ie: pip install {module}") while(True): cmd=input("-->") if (cmd==""): break; modcmd(cmd)
markand/duktape
refs/heads/master
tests/perf/test-base64-decode.py
4
import math import random def test(): tmp1 = [] tmp2 = [] print('build') for i in xrange(1024): tmp1.append('%x' % math.floor(random.random() * 16)) tmp1 = ''.join(tmp1) for i in xrange(1024): tmp2.append(tmp1) tmp2 = ''.join(tmp2) tmp2 = tmp2.encode('base64') print(len(tmp2)) print('run') for i in xrange(2000): res = tmp2.decode('base64') test()
ssanderson/numpy
refs/heads/master
numpy/lib/tests/test_shape_base.py
3
from __future__ import division, absolute_import, print_function import numpy as np from numpy.lib.shape_base import ( apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit, vsplit, dstack, column_stack, kron, tile ) from numpy.testing import ( run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, assert_raises, assert_warns ) class TestApplyAlongAxis(TestCase): def test_simple(self): a = np.ones((20, 10), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_simple101(self, level=11): a = np.ones((10, 101), 'd') assert_array_equal( apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1])) def test_3d(self): a = np.arange(27).reshape((3, 3, 3)) assert_array_equal(apply_along_axis(np.sum, 0, a), [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) def test_preserve_subclass(self): # this test is particularly malicious because matrix # refuses to become 1d def double(row): return row * 2 m = np.matrix([[0, 1], [2, 3]]) expected = np.matrix([[0, 2], [4, 6]]) result = apply_along_axis(double, 0, m) assert_(isinstance(result, np.matrix)) assert_array_equal(result, expected) result = apply_along_axis(double, 1, m) assert_(isinstance(result, np.matrix)) assert_array_equal(result, expected) def test_subclass(self): class MinimalSubclass(np.ndarray): data = 1 def minimal_function(array): return array.data a = np.zeros((6, 3)).view(MinimalSubclass) assert_array_equal( apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) ) def test_scalar_array(self, cls=np.ndarray): a = np.ones((6, 3)).view(cls) res = apply_along_axis(np.sum, 0, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([6, 6, 6]).view(cls)) def test_0d_array(self, cls=np.ndarray): def sum_to_0d(x): """ Sum x, returning a 0d array of the same class """ assert_equal(x.ndim, 1) return np.squeeze(np.sum(x, keepdims=True)) a = np.ones((6, 3)).view(cls) res = apply_along_axis(sum_to_0d, 0, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([6, 6, 6]).view(cls)) res = apply_along_axis(sum_to_0d, 1, a) assert_(isinstance(res, cls)) assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) def test_axis_insertion(self, cls=np.ndarray): def f1to2(x): """produces an assymmetric non-square matrix from x""" assert_equal(x.ndim, 1) return (x[::-1] * x[1:,None]).view(cls) a2d = np.arange(6*3).reshape((6, 3)) # 2d insertion along first axis actual = apply_along_axis(f1to2, 0, a2d) expected = np.stack([ f1to2(a2d[:,i]) for i in range(a2d.shape[1]) ], axis=-1).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) # 2d insertion along last axis actual = apply_along_axis(f1to2, 1, a2d) expected = np.stack([ f1to2(a2d[i,:]) for i in range(a2d.shape[0]) ], axis=0).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) # 3d insertion along middle axis a3d = np.arange(6*5*3).reshape((6, 5, 3)) actual = apply_along_axis(f1to2, 1, a3d) expected = np.stack([ np.stack([ f1to2(a3d[i,:,j]) for i in range(a3d.shape[0]) ], axis=0) for j in range(a3d.shape[2]) ], axis=-1).view(cls) assert_equal(type(actual), type(expected)) assert_equal(actual, expected) def test_subclass_preservation(self): class MinimalSubclass(np.ndarray): pass self.test_scalar_array(MinimalSubclass) self.test_0d_array(MinimalSubclass) self.test_axis_insertion(MinimalSubclass) def test_axis_insertion_ma(self): def f1to2(x): """produces an assymmetric non-square matrix from x""" assert_equal(x.ndim, 1) res = x[::-1] * x[1:,None] return np.ma.masked_where(res%5==0, res) a = np.arange(6*3).reshape((6, 3)) res = apply_along_axis(f1to2, 0, a) assert_(isinstance(res, np.ma.masked_array)) assert_equal(res.ndim, 3) assert_array_equal(res[:,:,0].mask, f1to2(a[:,0]).mask) assert_array_equal(res[:,:,1].mask, f1to2(a[:,1]).mask) assert_array_equal(res[:,:,2].mask, f1to2(a[:,2]).mask) def test_tuple_func1d(self): def sample_1d(x): return x[1], x[0] res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) assert_array_equal(res, np.array([[2, 1], [4, 3]])) def test_empty(self): # can't apply_along_axis when there's no chance to call the function def never_call(x): assert_(False) # should never be reached a = np.empty((0, 0)) assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) # but it's sometimes ok with some non-zero dimensions def empty_to_1(x): assert_(len(x) == 0) return 1 a = np.empty((10, 0)) actual = np.apply_along_axis(empty_to_1, 1, a) assert_equal(actual, np.ones(10)) assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) class TestApplyOverAxes(TestCase): def test_simple(self): a = np.arange(24).reshape(2, 3, 4) aoa_a = apply_over_axes(np.sum, a, [0, 2]) assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) class TestArraySplit(TestCase): def test_integer_0_split(self): a = np.arange(10) assert_raises(ValueError, array_split, a, 0) def test_integer_split(self): a = np.arange(10) res = array_split(a, 1) desired = [np.arange(10)] compare_results(res, desired) res = array_split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) res = array_split(a, 3) desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] compare_results(res, desired) res = array_split(a, 4) desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 5) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 10)] compare_results(res, desired) res = array_split(a, 6) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 7) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 8) desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 9) desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 10) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] compare_results(res, desired) res = array_split(a, 11) desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), np.arange(9, 10), np.array([])] compare_results(res, desired) def test_integer_split_2D_rows(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=0) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # Same thing for manual splits: res = array_split(a, [0, 1, 2], axis=0) tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), np.array([np.arange(10)])] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) def test_integer_split_2D_cols(self): a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3, axis=-1) desired = [np.array([np.arange(4), np.arange(4)]), np.array([np.arange(4, 7), np.arange(4, 7)]), np.array([np.arange(7, 10), np.arange(7, 10)])] compare_results(res, desired) def test_integer_split_2D_default(self): """ This will fail if we change default axis """ a = np.array([np.arange(10), np.arange(10)]) res = array_split(a, 3) tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), np.zeros((0, 10))] compare_results(res, tgt) assert_(a.dtype.type is res[-1].dtype.type) # perhaps should check higher dimensions def test_index_split_simple(self): a = np.arange(10) indices = [1, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_low_bound(self): a = np.arange(10) indices = [0, 5, 7] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10)] compare_results(res, desired) def test_index_split_high_bound(self): a = np.arange(10) indices = [0, 5, 7, 10, 12] res = array_split(a, indices, axis=-1) desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), np.arange(7, 10), np.array([]), np.array([])] compare_results(res, desired) class TestSplit(TestCase): # The split function is essentially the same as array_split, # except that it test if splitting will result in an # equal split. Only test for this case. def test_equal_split(self): a = np.arange(10) res = split(a, 2) desired = [np.arange(5), np.arange(5, 10)] compare_results(res, desired) def test_unequal_split(self): a = np.arange(10) assert_raises(ValueError, split, a, 3) class TestColumnStack(TestCase): def test_non_iterable(self): assert_raises(TypeError, column_stack, 1) class TestDstack(TestCase): def test_non_iterable(self): assert_raises(TypeError, dstack, 1) def test_0D_array(self): a = np.array(1) b = np.array(2) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_1D_array(self): a = np.array([1]) b = np.array([2]) res = dstack([a, b]) desired = np.array([[[1, 2]]]) assert_array_equal(res, desired) def test_2D_array(self): a = np.array([[1], [2]]) b = np.array([[1], [2]]) res = dstack([a, b]) desired = np.array([[[1, 1]], [[2, 2, ]]]) assert_array_equal(res, desired) def test_2D_array2(self): a = np.array([1, 2]) b = np.array([1, 2]) res = dstack([a, b]) desired = np.array([[[1, 1], [2, 2]]]) assert_array_equal(res, desired) # array_split has more comprehensive test of splitting. # only do simple test on hsplit, vsplit, and dsplit class TestHsplit(TestCase): """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, hsplit, 1, 1) def test_0D_array(self): a = np.array(1) try: hsplit(a, 2) assert_(0) except ValueError: pass def test_1D_array(self): a = np.array([1, 2, 3, 4]) res = hsplit(a, 2) desired = [np.array([1, 2]), np.array([3, 4])] compare_results(res, desired) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = hsplit(a, 2) desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] compare_results(res, desired) class TestVsplit(TestCase): """Only testing for integer splits. """ def test_non_iterable(self): assert_raises(ValueError, vsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, vsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) try: vsplit(a, 2) assert_(0) except ValueError: pass def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) res = vsplit(a, 2) desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] compare_results(res, desired) class TestDsplit(TestCase): # Only testing for integer splits. def test_non_iterable(self): assert_raises(ValueError, dsplit, 1, 1) def test_0D_array(self): a = np.array(1) assert_raises(ValueError, dsplit, a, 2) def test_1D_array(self): a = np.array([1, 2, 3, 4]) assert_raises(ValueError, dsplit, a, 2) def test_2D_array(self): a = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) try: dsplit(a, 2) assert_(0) except ValueError: pass def test_3D_array(self): a = np.array([[[1, 2, 3, 4], [1, 2, 3, 4]], [[1, 2, 3, 4], [1, 2, 3, 4]]]) res = dsplit(a, 2) desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] compare_results(res, desired) class TestSqueeze(TestCase): def test_basic(self): from numpy.random import rand a = rand(20, 10, 10, 1, 1) b = rand(20, 1, 10, 1, 20) c = rand(1, 1, 20, 10) assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) # Squeezing to 0-dim should still give an ndarray a = [[[1.5]]] res = np.squeeze(a) assert_equal(res, 1.5) assert_equal(res.ndim, 0) assert_equal(type(res), np.ndarray) class TestKron(TestCase): def test_return_type(self): a = np.ones([2, 2]) m = np.asmatrix(a) assert_equal(type(kron(a, a)), np.ndarray) assert_equal(type(kron(m, m)), np.matrix) assert_equal(type(kron(a, m)), np.matrix) assert_equal(type(kron(m, a)), np.matrix) class myarray(np.ndarray): __array_priority__ = 0.0 ma = myarray(a.shape, a.dtype, a.data) assert_equal(type(kron(a, a)), np.ndarray) assert_equal(type(kron(ma, ma)), myarray) assert_equal(type(kron(a, ma)), np.ndarray) assert_equal(type(kron(ma, a)), myarray) class TestTile(TestCase): def test_basic(self): a = np.array([0, 1, 2]) b = [[1, 2], [3, 4]] assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]]) def test_tile_one_repetition_on_array_gh4679(self): a = np.arange(5) b = tile(a, 1) b += 2 assert_equal(a, np.arange(5)) def test_empty(self): a = np.array([[[]]]) b = np.array([[], []]) c = tile(b, 2).shape d = tile(a, (3, 2, 5)).shape assert_equal(c, (2, 0)) assert_equal(d, (3, 2, 0)) def test_kroncompare(self): from numpy.random import randint reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] for s in shape: b = randint(0, 10, size=s) for r in reps: a = np.ones(r, b.dtype) large = tile(b, r) klarge = kron(a, b) assert_equal(large, klarge) class TestMayShareMemory(TestCase): def test_basic(self): d = np.ones((50, 60)) d2 = np.ones((30, 60, 6)) self.assertTrue(np.may_share_memory(d, d)) self.assertTrue(np.may_share_memory(d, d[::-1])) self.assertTrue(np.may_share_memory(d, d[::2])) self.assertTrue(np.may_share_memory(d, d[1:, ::-1])) self.assertFalse(np.may_share_memory(d[::-1], d2)) self.assertFalse(np.may_share_memory(d[::2], d2)) self.assertFalse(np.may_share_memory(d[1:, ::-1], d2)) self.assertTrue(np.may_share_memory(d2[1:, ::-1], d2)) # Utility def compare_results(res, desired): for i in range(len(desired)): assert_array_equal(res[i], desired[i]) if __name__ == "__main__": run_module_suite()
hellofreedom/ansible-modules-core
refs/heads/devel
database/postgresql/postgresql_privs.py
144
#!/usr/bin/python # -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = """ --- module: postgresql_privs version_added: "1.2" short_description: Grant or revoke privileges on PostgreSQL database objects. description: - Grant or revoke privileges on PostgreSQL database objects. - This module is basically a wrapper around most of the functionality of PostgreSQL's GRANT and REVOKE statements with detection of changes (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)) options: database: description: - Name of database to connect to. - 'Alias: I(db)' required: yes state: description: - If C(present), the specified privileges are granted, if C(absent) they are revoked. required: no default: present choices: [present, absent] privs: description: - Comma separated list of privileges to grant/revoke. - 'Alias: I(priv)' required: no type: description: - Type of database object to set privileges on. required: no default: table choices: [table, sequence, function, database, schema, language, tablespace, group] objs: description: - Comma separated list of database objects to set privileges on. - If I(type) is C(table) or C(sequence), the special value C(ALL_IN_SCHEMA) can be provided instead to specify all database objects of type I(type) in the schema specified via I(schema). (This also works with PostgreSQL < 9.0.) - If I(type) is C(database), this parameter can be omitted, in which case privileges are set for the database specified via I(database). - 'If I(type) is I(function), colons (":") in object names will be replaced with commas (needed to specify function signatures, see examples)' - 'Alias: I(obj)' required: no schema: description: - Schema that contains the database objects specified via I(objs). - May only be provided if I(type) is C(table), C(sequence) or C(function). Defaults to C(public) in these cases. required: no roles: description: - Comma separated list of role (user/group) names to set permissions for. - The special value C(PUBLIC) can be provided instead to set permissions for the implicitly defined PUBLIC group. - 'Alias: I(role)' required: yes grant_option: description: - Whether C(role) may grant/revoke the specified privileges/group memberships to others. - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes. - I(grant_option) only has an effect if I(state) is C(present). - 'Alias: I(admin_option)' required: no choices: ['yes', 'no'] host: description: - Database host address. If unspecified, connect via Unix socket. - 'Alias: I(login_host)' default: null required: no port: description: - Database port to connect to. required: no default: 5432 unix_socket: description: - Path to a Unix domain socket for local connections. - 'Alias: I(login_unix_socket)' required: false default: null login: description: - The username to authenticate with. - 'Alias: I(login_user)' default: postgres password: description: - The password to authenticate with. - 'Alias: I(login_password))' default: null required: no notes: - Default authentication assumes that postgresql_privs is run by the C(postgres) user on the remote host. (Ansible's C(user) or C(sudo-user)). - This module requires Python package I(psycopg2) to be installed on the remote host. In the default case of the remote host also being the PostgreSQL server, PostgreSQL has to be installed there as well, obviously. For Debian/Ubuntu-based systems, install packages I(postgresql) and I(python-psycopg2). - Parameters that accept comma separated lists (I(privs), I(objs), I(roles)) have singular alias names (I(priv), I(obj), I(role)). - To revoke only C(GRANT OPTION) for a specific object, set I(state) to C(present) and I(grant_option) to C(no) (see examples). - Note that when revoking privileges from a role R, this role may still have access via privileges granted to any role R is a member of including C(PUBLIC). - Note that when revoking privileges from a role R, you do so as the user specified via I(login). If R has been granted the same privileges by another user also, R can still access database objects via these privileges. - When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs). requirements: [psycopg2] author: "Bernhard Weitzhofer (@b6d)" """ EXAMPLES = """ # On database "library": # GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors # TO librarian, reader WITH GRANT OPTION - postgresql_privs: > database=library state=present privs=SELECT,INSERT,UPDATE type=table objs=books,authors schema=public roles=librarian,reader grant_option=yes # Same as above leveraging default values: - postgresql_privs: > db=library privs=SELECT,INSERT,UPDATE objs=books,authors roles=librarian,reader grant_option=yes # REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader # Note that role "reader" will be *granted* INSERT privilege itself if this # isn't already the case (since state=present). - postgresql_privs: > db=library state=present priv=INSERT obj=books role=reader grant_option=no # REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader # "public" is the default schema. This also works for PostgreSQL 8.x. - postgresql_privs: > db=library state=absent privs=INSERT,UPDATE objs=ALL_IN_SCHEMA role=reader # GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian - postgresql_privs: > db=library privs=ALL type=schema objs=public,math role=librarian # GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader # Note the separation of arguments with colons. - postgresql_privs: > db=library privs=ALL type=function obj=add(int:int) schema=math roles=librarian,reader # GRANT librarian, reader TO alice, bob WITH ADMIN OPTION # Note that group role memberships apply cluster-wide and therefore are not # restricted to database "library" here. - postgresql_privs: > db=library type=group objs=librarian,reader roles=alice,bob admin_option=yes # GRANT ALL PRIVILEGES ON DATABASE library TO librarian # Note that here "db=postgres" specifies the database to connect to, not the # database to grant privileges on (which is specified via the "objs" param) - postgresql_privs: > db=postgres privs=ALL type=database obj=library role=librarian # GRANT ALL PRIVILEGES ON DATABASE library TO librarian # If objs is omitted for type "database", it defaults to the database # to which the connection is established - postgresql_privs: > db=library privs=ALL type=database role=librarian """ try: import psycopg2 import psycopg2.extensions except ImportError: psycopg2 = None VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE')) class Error(Exception): pass # We don't have functools.partial in Python < 2.5 def partial(f, *args, **kwargs): """Partial function application""" def g(*g_args, **g_kwargs): new_kwargs = kwargs.copy() new_kwargs.update(g_kwargs) return f(*(args + g_args), **g_kwargs) g.f = f g.args = args g.kwargs = kwargs return g class Connection(object): """Wrapper around a psycopg2 connection with some convenience methods""" def __init__(self, params): self.database = params.database # To use defaults values, keyword arguments must be absent, so # check which values are empty and don't include in the **kw # dictionary params_map = { "host":"host", "login":"user", "password":"password", "port":"port", "database": "database", } kw = dict( (params_map[k], getattr(params, k)) for k in params_map if getattr(params, k) != '' ) # If a unix_socket is specified, incorporate it here. is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" if is_localhost and params.unix_socket != "": kw["host"] = params.unix_socket self.connection = psycopg2.connect(**kw) self.cursor = self.connection.cursor() def commit(self): self.connection.commit() def rollback(self): self.connection.rollback() @property def encoding(self): """Connection encoding in Python-compatible form""" return psycopg2.extensions.encodings[self.connection.encoding] ### Methods for querying database objects # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like # phrases in GRANT or REVOKE statements, therefore alternative methods are # provided here. def schema_exists(self, schema): query = """SELECT count(*) FROM pg_catalog.pg_namespace WHERE nspname = %s""" self.cursor.execute(query, (schema,)) return self.cursor.fetchone()[0] > 0 def get_all_tables_in_schema(self, schema): if not self.schema_exists(schema): raise Error('Schema "%s" does not exist.' % schema) query = """SELECT relname FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind in ('r', 'v')""" self.cursor.execute(query, (schema,)) return [t[0] for t in self.cursor.fetchall()] def get_all_sequences_in_schema(self, schema): if not self.schema_exists(schema): raise Error('Schema "%s" does not exist.' % schema) query = """SELECT relname FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind = 'S'""" self.cursor.execute(query, (schema,)) return [t[0] for t in self.cursor.fetchall()] ### Methods for getting access control lists and group membership info # To determine whether anything has changed after granting/revoking # privileges, we compare the access control lists of the specified database # objects before and afterwards. Python's list/string comparison should # suffice for change detection, we should not actually have to parse ACLs. # The same should apply to group membership information. def get_table_acls(self, schema, tables): query = """SELECT relacl FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind = 'r' AND relname = ANY (%s) ORDER BY relname""" self.cursor.execute(query, (schema, tables)) return [t[0] for t in self.cursor.fetchall()] def get_sequence_acls(self, schema, sequences): query = """SELECT relacl FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s) ORDER BY relname""" self.cursor.execute(query, (schema, sequences)) return [t[0] for t in self.cursor.fetchall()] def get_function_acls(self, schema, function_signatures): funcnames = [f.split('(', 1)[0] for f in function_signatures] query = """SELECT proacl FROM pg_catalog.pg_proc p JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace WHERE nspname = %s AND proname = ANY (%s) ORDER BY proname, proargtypes""" self.cursor.execute(query, (schema, funcnames)) return [t[0] for t in self.cursor.fetchall()] def get_schema_acls(self, schemas): query = """SELECT nspacl FROM pg_catalog.pg_namespace WHERE nspname = ANY (%s) ORDER BY nspname""" self.cursor.execute(query, (schemas,)) return [t[0] for t in self.cursor.fetchall()] def get_language_acls(self, languages): query = """SELECT lanacl FROM pg_catalog.pg_language WHERE lanname = ANY (%s) ORDER BY lanname""" self.cursor.execute(query, (languages,)) return [t[0] for t in self.cursor.fetchall()] def get_tablespace_acls(self, tablespaces): query = """SELECT spcacl FROM pg_catalog.pg_tablespace WHERE spcname = ANY (%s) ORDER BY spcname""" self.cursor.execute(query, (tablespaces,)) return [t[0] for t in self.cursor.fetchall()] def get_database_acls(self, databases): query = """SELECT datacl FROM pg_catalog.pg_database WHERE datname = ANY (%s) ORDER BY datname""" self.cursor.execute(query, (databases,)) return [t[0] for t in self.cursor.fetchall()] def get_group_memberships(self, groups): query = """SELECT roleid, grantor, member, admin_option FROM pg_catalog.pg_auth_members am JOIN pg_catalog.pg_roles r ON r.oid = am.roleid WHERE r.rolname = ANY(%s) ORDER BY roleid, grantor, member""" self.cursor.execute(query, (groups,)) return self.cursor.fetchall() ### Manipulating privileges def manipulate_privs(self, obj_type, privs, objs, roles, state, grant_option, schema_qualifier=None): """Manipulate database object privileges. :param obj_type: Type of database object to grant/revoke privileges for. :param privs: Either a list of privileges to grant/revoke or None if type is "group". :param objs: List of database objects to grant/revoke privileges for. :param roles: Either a list of role names or "PUBLIC" for the implicitly defined "PUBLIC" group :param state: "present" to grant privileges, "absent" to revoke. :param grant_option: Only for state "present": If True, set grant/admin option. If False, revoke it. If None, don't change grant option. :param schema_qualifier: Some object types ("TABLE", "SEQUENCE", "FUNCTION") must be qualified by schema. Ignored for other Types. """ # get_status: function to get current status if obj_type == 'table': get_status = partial(self.get_table_acls, schema_qualifier) elif obj_type == 'sequence': get_status = partial(self.get_sequence_acls, schema_qualifier) elif obj_type == 'function': get_status = partial(self.get_function_acls, schema_qualifier) elif obj_type == 'schema': get_status = self.get_schema_acls elif obj_type == 'language': get_status = self.get_language_acls elif obj_type == 'tablespace': get_status = self.get_tablespace_acls elif obj_type == 'database': get_status = self.get_database_acls elif obj_type == 'group': get_status = self.get_group_memberships else: raise Error('Unsupported database object type "%s".' % obj_type) # Return False (nothing has changed) if there are no objs to work on. if not objs: return False # obj_ids: quoted db object identifiers (sometimes schema-qualified) if obj_type == 'function': obj_ids = [] for obj in objs: try: f, args = obj.split('(', 1) except: raise Error('Illegal function signature: "%s".' % obj) obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args)) elif obj_type in ['table', 'sequence']: obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs] else: obj_ids = ['"%s"' % o for o in objs] # set_what: SQL-fragment specifying what to set for the target roles: # Either group membership or privileges on objects of a certain type if obj_type == 'group': set_what = ','.join(pg_quote_identifier(i, 'role') for i in obj_ids) else: # function types are already quoted above if obj_type != 'function': obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids] # Note: obj_type has been checked against a set of string literals # and privs was escaped when it was parsed set_what = '%s ON %s %s' % (','.join(privs), obj_type, ','.join(obj_ids)) # for_whom: SQL-fragment specifying for whom to set the above if roles == 'PUBLIC': for_whom = 'PUBLIC' else: for_whom = ','.join(pg_quote_identifier(r, 'role') for r in roles) status_before = get_status(objs) if state == 'present': if grant_option: if obj_type == 'group': query = 'GRANT %s TO %s WITH ADMIN OPTION' else: query = 'GRANT %s TO %s WITH GRANT OPTION' else: query = 'GRANT %s TO %s' self.cursor.execute(query % (set_what, for_whom)) # Only revoke GRANT/ADMIN OPTION if grant_option actually is False. if grant_option == False: if obj_type == 'group': query = 'REVOKE ADMIN OPTION FOR %s FROM %s' else: query = 'REVOKE GRANT OPTION FOR %s FROM %s' self.cursor.execute(query % (set_what, for_whom)) else: query = 'REVOKE %s FROM %s' self.cursor.execute(query % (set_what, for_whom)) status_after = get_status(objs) return status_before != status_after def main(): module = AnsibleModule( argument_spec = dict( database=dict(required=True, aliases=['db']), state=dict(default='present', choices=['present', 'absent']), privs=dict(required=False, aliases=['priv']), type=dict(default='table', choices=['table', 'sequence', 'function', 'database', 'schema', 'language', 'tablespace', 'group']), objs=dict(required=False, aliases=['obj']), schema=dict(required=False), roles=dict(required=True, aliases=['role']), grant_option=dict(required=False, type='bool', aliases=['admin_option']), host=dict(default='', aliases=['login_host']), port=dict(type='int', default=5432), unix_socket=dict(default='', aliases=['login_unix_socket']), login=dict(default='postgres', aliases=['login_user']), password=dict(default='', aliases=['login_password']) ), supports_check_mode = True ) # Create type object as namespace for module params p = type('Params', (), module.params) # param "schema": default, allowed depends on param "type" if p.type in ['table', 'sequence', 'function']: p.schema = p.schema or 'public' elif p.schema: module.fail_json(msg='Argument "schema" is not allowed ' 'for type "%s".' % p.type) # param "objs": default, required depends on param "type" if p.type == 'database': p.objs = p.objs or p.database elif not p.objs: module.fail_json(msg='Argument "objs" is required ' 'for type "%s".' % p.type) # param "privs": allowed, required depends on param "type" if p.type == 'group': if p.privs: module.fail_json(msg='Argument "privs" is not allowed ' 'for type "group".') elif not p.privs: module.fail_json(msg='Argument "privs" is required ' 'for type "%s".' % p.type) # Connect to Database if not psycopg2: module.fail_json(msg='Python module "psycopg2" must be installed.') try: conn = Connection(p) except psycopg2.Error, e: module.fail_json(msg='Could not connect to database: %s' % e) try: # privs if p.privs: privs = frozenset(pr.upper() for pr in p.privs.split(',')) if not privs.issubset(VALID_PRIVS): module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS)) else: privs = None # objs: if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA': objs = conn.get_all_tables_in_schema(p.schema) elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA': objs = conn.get_all_sequences_in_schema(p.schema) else: objs = p.objs.split(',') # function signatures are encoded using ':' to separate args if p.type == 'function': objs = [obj.replace(':', ',') for obj in objs] # roles if p.roles == 'PUBLIC': roles = 'PUBLIC' else: roles = p.roles.split(',') changed = conn.manipulate_privs( obj_type = p.type, privs = privs, objs = objs, roles = roles, state = p.state, grant_option = p.grant_option, schema_qualifier=p.schema ) except Error, e: conn.rollback() module.fail_json(msg=e.message) except psycopg2.Error, e: conn.rollback() # psycopg2 errors come in connection encoding, reencode msg = e.message.decode(conn.encoding).encode(sys.getdefaultencoding(), 'replace') module.fail_json(msg=msg) if module.check_mode: conn.rollback() else: conn.commit() module.exit_json(changed=changed) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.database import * if __name__ == '__main__': main()
suvitorg/suvit-odoo
refs/heads/10.0
suvit_database_cleanup/wizards/__init__.py
1
from . import purge_fields from . import database_all_cleanup
vipins/ccccms
refs/heads/master
env/Lib/site-packages/easy_thumbnails/engine.py
4
import os try: from cStringIO import StringIO except ImportError: from StringIO import StringIO try: from PIL import Image except ImportError: import Image from easy_thumbnails import utils from easy_thumbnails.conf import settings def _use_default_options(options): if not settings.THUMBNAIL_DEFAULT_OPTIONS: return options default_options = settings.THUMBNAIL_DEFAULT_OPTIONS.copy() default_options.update(options) return default_options def process_image(source, processor_options, processors=None): """ Process a source PIL image through a series of image processors, returning the (potentially) altered image. """ processor_options = _use_default_options(processor_options) if processors is None: processors = [utils.dynamic_import(name) for name in settings.THUMBNAIL_PROCESSORS] image = source for processor in processors: image = processor(image, **processor_options) return image def save_image(image, destination=None, filename=None, **options): """ Save a PIL image. """ if destination is None: destination = StringIO() filename = filename or '' format = Image.EXTENSION.get(os.path.splitext(filename)[1], 'JPEG') if format == 'JPEG': options.setdefault('quality', 85) try: image.save(destination, format=format, optimize=1, **options) except IOError: # Try again, without optimization (PIL can't optimize an image # larger than ImageFile.MAXBLOCK, which is 64k by default) pass image.save(destination, format=format, **options) if hasattr(destination, 'seek'): destination.seek(0) return destination def generate_source_image(source_file, processor_options, generators=None): """ Processes a source ``File`` through a series of source generators, stopping once a generator returns an image. The return value is this image instance or ``None`` if no generators return an image. If the source file cannot be opened, it will be set to ``None`` and still passed to the generators. """ processor_options = _use_default_options(processor_options) was_closed = source_file.closed if generators is None: generators = [utils.dynamic_import(name) for name in settings.THUMBNAIL_SOURCE_GENERATORS] try: source = source_file try: source.open() except Exception: source = None was_closed = False for generator in generators: image = generator(source, **processor_options) if image: return image finally: if was_closed: source_file.close()
edx-solutions/edx-platform
refs/heads/master
common/djangoapps/third_party_auth/tests/test_settings.py
4
"""Unit tests for settings.py.""" import unittest from mock import patch from third_party_auth import provider, settings from third_party_auth.tests import testutil _ORIGINAL_AUTHENTICATION_BACKENDS = ['first_authentication_backend'] _ORIGINAL_INSTALLED_APPS = ['first_installed_app'] _ORIGINAL_MIDDLEWARE_CLASSES = ['first_middleware_class'] _ORIGINAL_TEMPLATE_CONTEXT_PROCESSORS = ['first_template_context_preprocessor'] _SETTINGS_MAP = { 'AUTHENTICATION_BACKENDS': _ORIGINAL_AUTHENTICATION_BACKENDS, 'INSTALLED_APPS': _ORIGINAL_INSTALLED_APPS, 'MIDDLEWARE': _ORIGINAL_MIDDLEWARE_CLASSES, 'TEMPLATES': [{ 'OPTIONS': { 'context_processors': _ORIGINAL_TEMPLATE_CONTEXT_PROCESSORS } }], 'FEATURES': {}, } _SETTINGS_MAP['DEFAULT_TEMPLATE_ENGINE'] = _SETTINGS_MAP['TEMPLATES'][0] class SettingsUnitTest(testutil.TestCase): """Unit tests for settings management code.""" # Suppress spurious no-member warning on fakes. # pylint: disable=no-member def setUp(self): super(SettingsUnitTest, self).setUp() self.settings = testutil.FakeDjangoSettings(_SETTINGS_MAP) def test_apply_settings_adds_exception_middleware(self): settings.apply_settings(self.settings) self.assertIn('third_party_auth.middleware.ExceptionMiddleware', self.settings.MIDDLEWARE) def test_apply_settings_adds_fields_stored_in_session(self): settings.apply_settings(self.settings) self.assertEqual(['auth_entry', 'next'], self.settings.FIELDS_STORED_IN_SESSION) @unittest.skipUnless(testutil.AUTH_FEATURE_ENABLED, testutil.AUTH_FEATURES_KEY + ' not enabled') def test_apply_settings_enables_no_providers_by_default(self): # Providers are only enabled via ConfigurationModels in the database settings.apply_settings(self.settings) self.assertEqual([], provider.Registry.enabled()) def test_apply_settings_turns_off_raising_social_exceptions(self): # Guard against submitting a conf change that's convenient in dev but # bad in prod. settings.apply_settings(self.settings) self.assertFalse(self.settings.SOCIAL_AUTH_RAISE_EXCEPTIONS) def test_apply_settings_turns_off_redirect_sanitization(self): settings.apply_settings(self.settings) self.assertFalse(self.settings.SOCIAL_AUTH_SANITIZE_REDIRECTS) def test_apply_settings_avoids_default_username_check(self): # Avoid the default username check where non-ascii characters are not # allowed when unicode username is enabled settings.apply_settings(self.settings) self.assertTrue(self.settings.SOCIAL_AUTH_CLEAN_USERNAMES) # verify default behavior with patch.dict('django.conf.settings.FEATURES', {'ENABLE_UNICODE_USERNAME': True}): settings.apply_settings(self.settings) self.assertFalse(self.settings.SOCIAL_AUTH_CLEAN_USERNAMES)
kcarnold/counterfactual-lm
refs/heads/master
code/simulation.py
1
import pickle import numpy as np from scipy.optimize import minimize import matplotlib.pyplot as plt import string import pandas as pd import seaborn as sns import tqdm from joblib import Memory import util import decoder from paths import paths from util import logsumexp from counterfactual import get_features, Objective, contextual_expected_reward_samples #%% mem = Memory('cache') #%% Load docs print("Loading docs") docsets = {k: pickle.load(open(paths.parent + '/yelp_preproc/{}_data.pkl'.format(k), 'rb')) for k in ['train', 'valid', 'test']} #%% Load model all_model = decoder.Model.from_basename(paths.model_basename('yelp_all_as_sents')) all_model.prune_bigrams() #%% num_samples = 2000 num_suggestions_per_context = 3 num_words_per_suggestion = 5 num_suggestions_offered = num_samples * num_suggestions_per_context #%% @mem.cache() def gen_suggs(num_contexts, docset, rand_seed, **params): params.setdefault('temperature', .5) docs = docsets[docset] rs = np.random.RandomState(rand_seed) np.random.seed(rand_seed) sampled_docids = rs.choice(len(docs), num_contexts, replace=False) sampled_texts = docs.tokenized.iloc[sampled_docids] contexts = [] suggestions = [] for text in tqdm.tqdm(sampled_texts): while True: sent = rs.choice(text.split('\n')).lower().split() loc = rs.choice(len(sent)) context = sent[:loc] try: suggs = decoder.generate_diverse_phrases(all_model, ['<s>'] + context, num_suggestions_per_context, num_words_per_suggestion, **params) if len(suggs) < 3: continue except decoder.GenerationFailedException: continue contexts.append(context) suggestions.append(suggs) break return contexts, suggestions #%% print("Generate initial suggestions") contexts, ref_suggestions = gen_suggs(num_contexts=num_samples, docset='train', rand_seed=0) #%% Define some desirability functions def desire_null(context, phrase): return 0. def desire_longword(context, phrase, length_bonus_min_length=6): word_lengths = np.array([len(w) >= length_bonus_min_length if w[0] in string.ascii_letters else 0 for w in phrase]) return np.mean(word_lengths) #%% def log_left_over(xx): return np.log1p(-np.sum(np.exp(xx))) #%% Simulate acceptance behavior # Define actions actions = [(None, 0)] + [(sug_no, num_words) for sug_no in range(num_suggestions_per_context) for num_words in range(1, num_words_per_suggestion + 1)] act_which_sugg, act_num_words = zip(*actions) act_which_sugg = np.array(act_which_sugg) act_num_words = np.array(act_num_words) #%% from collections import namedtuple class Policy(namedtuple('Policy', 'desire gain bias after_first_word_scaling')): @classmethod def make(cls, desire, gain=1., bias=0., after_first_word_scaling=1.): return cls(desire, gain, bias, after_first_word_scaling) def action_scores(context, suggs, policy, base_model=all_model): '''Compute logits for possible actions for a batch of suggestions. suggs: [(words, gen_logprobs)] desirability_fn: words -> [desirability(accept up to word i) for i in 1..|W|] ''' desirability_fn = policy.desire gain = policy.gain bias = policy.bias after_first_word_scaling = policy.after_first_word_scaling state = base_model.get_state(context, bos=True)[0] sugg_words = [words for words, _ in suggs] assert all(len(words) == num_words_per_suggestion for words in sugg_words) predictive_logprobs = [base_model.score_seq_by_word(state, words) for words in sugg_words] # Desirability is per _suggestion_ desirabilities = gain * np.array([desirability_fn(context, words) for words in sugg_words]) + bias assert len(desirabilities) == num_suggestions_per_context # Pick a suggestion, or null, according to the desirability-biased probability of its first word. raw_logprobs = [plp[0] for plp in predictive_logprobs] assert len(raw_logprobs) == num_suggestions_per_context everything_else_logprob = log_left_over(raw_logprobs) fw_option_logprobs = np.r_[everything_else_logprob, raw_logprobs + desirabilities] fw_option_logprobs -= logsumexp(fw_option_logprobs) # Continue it according to the conditional distribution. logprobs_by_action = [fw_option_logprobs[0]] # null action for sug_idx, (baseline_logprob, words, plp, desired) in enumerate(zip(fw_option_logprobs[1:], sugg_words, predictive_logprobs, desirabilities)): # Conditional on this suggestion being chosen, what's the prob of picking n words? # Consider stopping at each word in succession for word_idx, word in enumerate(words[1:], start=1): logprob = plp[word_idx] everything_else_logprob = np.log1p(-np.exp(logprob)) option_logprobs = np.r_[everything_else_logprob, logprob + desired * after_first_word_scaling] option_logprobs -= logsumexp(option_logprobs) option_logprobs += baseline_logprob # The probability of accepting n words is the probability of stopping at word n+1 logprobs_by_action.append(option_logprobs[0]) baseline_logprob = option_logprobs[1] logprobs_by_action.append(baseline_logprob) assert len(logprobs_by_action) == len(actions) assert np.isclose(logsumexp(np.array(logprobs_by_action)), 0.) return logprobs_by_action def all_action_scores(contexts, suggestions, policy, base_model=all_model): return np.array([action_scores(context, suggs, policy, base_model=base_model) for context, suggs in zip(contexts, suggestions)]) null_policy = Policy.make(desire_null) null_scores = all_action_scores(contexts, ref_suggestions, null_policy) longword_policy = Policy.make(desire_longword, gain=10) longword_scores = all_action_scores(contexts, ref_suggestions, longword_policy) #%% print("The two policies agree on which suggestion to take {:.1%}".format( np.mean(act_which_sugg[np.argmax(null_scores, axis=1)] == act_which_sugg[np.argmax(longword_scores, axis=1)]))) #%% def summarize_policy(name, logprobs): probs = np.exp(logprobs) expected_numwords = np.sum(act_num_words * probs, axis=1) print("{} would accept {:.1%} +- {:.1%}, expect {:.1f} words per suggestion +- {:.2f}".format( name, np.sum(1-probs[:, 0]) / num_suggestions_offered, np.std(1 - probs[:, 0]) / np.sqrt(num_suggestions_offered), np.sum(expected_numwords)/num_suggestions_offered, np.std(expected_numwords) / np.sqrt(num_suggestions_offered))) summarize_policy('null', null_scores) summarize_policy('longword', longword_scores) #%% def get_all_features(contexts, suggestions, base_model=all_model): return [[get_features(base_model, context, sugg_words) for sugg_words, _ in suggs] for context, suggs in zip(tqdm.tqdm(contexts), suggestions)] print("Computing features for ref suggestions") ref_feats = get_all_features(contexts, ref_suggestions) #%% def collect_acceptances(suggestions, features, action_choices, base_model=all_model): suggss = [] all_features = [] accepted_features = [] generation_logprobs = [] chosens = [] observed_rewards = [] for suggs, feats, choice in zip(suggestions, features, action_choices): if choice == 0: continue sug_no, num_words = actions[choice] sugg_words, gen_probs = suggs[sug_no] generation_logprobs.append(gen_probs) suggss.append(suggs) all_features.append(feats) accepted_features.append(feats[sug_no]) chosens.append(sug_no) observed_rewards.append(num_words) return suggss, all_features, accepted_features, generation_logprobs, chosens, observed_rewards #%% rs = np.random.RandomState(100) longword_action_choices = np.array([rs.choice(len(p), p=np.exp(p)) for p in longword_scores]) print('Accepted {:.1%} of suggestions, {} words, avg {:.2f} words per sugg offered'.format( np.sum(longword_action_choices > 0) / num_suggestions_offered, np.sum(act_num_words[longword_action_choices]), np.sum(act_num_words[longword_action_choices]) / num_suggestions_offered)) #%% suggss, all_features, features_chosen, generation_logprobs, chosens, observed_rewards = collect_acceptances( ref_suggestions, ref_feats, longword_action_choices) #%% #xx = np.random.standard_normal(2000) #samps = xx[np.random.randint(len(xx), size=(len(xx), 10000))] #means = np.mean(samps, axis=0) ##low_percentile = np.percentile(samps, 2.5, axis=0) #sns.distplot(means) #(1/np.std(means))**2 #%% #%% Compute TIP-estimated expected reward given a dataset of suggestion -> acceptances # NOTE: temperature is the *inverse* of weights[0]. ref_weights= np.r_[2., 0., np.zeros(12)] NUM_WEIGHTS = len(ref_weights) #%% def plot_expected_reward_estimate(cers, Ms, orig_scores, new_scores, filt=None): qs, ps, rewards = np.array(cers).T importance_ratio = np.exp(ps-qs) estimated_reward = rewards[:,None] * np.minimum(importance_ratio[:,None], Ms) if filt is not None: estimated_reward = estimated_reward[filt] frac_accepted = len(estimated_reward) / len(orig_scores) estimated_reward *= frac_accepted mean = np.mean(estimated_reward, axis=0) sem = .1*Ms # sem = np.std(estimated_reward, axis=0) / np.sqrt(len(ps)) plt.fill_between(Ms, mean - sem, mean + sem, color=[.8,.8,.8]) plt.plot(Ms, mean) # plt.errorbar(Ms, [np.mean((rewards * np.minimum(M, np.exp(ps-qs)))) for M in Ms], # yerr=[2*np.std(rewards * np.minimum(M, np.exp(ps-qs)))/np.sqrt(len(ps)) for M in Ms]) # plt.plot(Ms, [np.mean((rewards * np.minimum(M, np.exp(ps-qs)))) - 2*np.std(rewards * np.minimum(M, np.exp(ps-qs)))/np.sqrt(len(ps)) for M in Ms]) if orig_scores is not None: original_expected = np.mean(np.sum(act_num_words * np.exp(orig_scores), axis=1)) plt.hlines(original_expected, Ms.min(), Ms.max(), color='gray') if new_scores is not None: new_expected = np.mean(np.sum(act_num_words * np.exp(new_scores), axis=1)) plt.hlines(new_expected, Ms.min(), Ms.max()) plt.xlabel('M') plt.ylabel('estimated expected reward, by context') #%% Generate a new set of suggestions under the new suggestion policy def weights_to_model_params(weights): return dict( temperature=1./weights[0], length_bonus_amt=weights[1], pos_weights=weights[2:]) #%% def actual_expected_reward_from_weights(weights, accept_policy, num_contexts=500): model_params = weights_to_model_params(weights) contexts, suggestions = gen_suggs(num_contexts=num_contexts, docset='valid', rand_seed=0, **model_params) scores_under_policy = all_action_scores(contexts, suggestions, accept_policy) expected_num_words = np.sum(act_num_words * np.exp(scores_under_policy), axis=1) return np.mean(expected_num_words), np.std(expected_num_words) / np.sqrt(num_contexts) #%% Vary amount of data and M, train model, compute actual expected reward under new suggestion policy. def subsample_trial(i, num_train, M, ref_suggestions, ref_feats, ref_scores, accept_policy, val_num_contexts=500): print("Starting trial", i, num_train, M) # Subsample. assert num_train <= len(ref_suggestions) rs = np.random.RandomState(i) train_samples = rs.choice(len(ref_suggestions), num_train, replace=False) ref_suggestions = [ref_suggestions[i] for i in train_samples] ref_feats = [ref_feats[i] for i in train_samples] ref_scores = [ref_scores[i] for i in train_samples] # Pick actions. action_choices = np.array([rs.choice(len(p), p=np.exp(p)) for p in ref_scores]) suggs, features, features_chosen, generation_logprobs, chosens, observed_rewards = collect_acceptances( ref_suggestions, ref_feats, action_choices) # Fit weights obj = Objective(num_suggestions_offered, features_chosen, generation_logprobs, observed_rewards, M=M, regularization=0.) x0 = ref_weights while True: res = minimize(obj, x0, jac=True, options=dict(disp=True)) assert res.x[0] > 0 if res.nit < 2: print("WARNING: SMALL # OF ITERATIONS, retry", i, num_train, M, repr(res.x)) x0 = rs.standard_normal(NUM_WEIGHTS) x0[0] = 1. continue break weights = res.x # Expected reward cers = contextual_expected_reward_samples(weights, suggs, features, chosens, observed_rewards) # Evaluate actual reward mean, sem = actual_expected_reward_from_weights(weights, accept_policy, num_contexts=val_num_contexts) return weights, cers, mean, sem #%% def trial_wrapper(args): import traceback basename, i, num_train, M, scores, policy = args try: res = subsample_trial(i=i, num_train=num_train, ref_scores=scores, M=M, ref_suggestions=ref_suggestions, ref_feats=ref_feats, accept_policy=policy) with open('{}-subsample-trial-{}.pkl'.format(basename, i), 'wb') as f: pickle.dump(dict(args=args, res=res), f, -1) return res except Exception: traceback.print_exc() #%% from joblib import Parallel, delayed def run_subsample_trial(basename, num_trains, Ms, policy): scores = all_action_scores(contexts, ref_suggestions, policy) params = [(num_train, M) for num_train in num_trains for M in Ms] trials = Parallel(n_jobs=-1, verbose=10, backend='multiprocessing')( delayed(trial_wrapper)((basename, i, num_train, M, scores, policy)) for i, (num_train, M) in enumerate(params)) with open(basename+'-subsample_trials.pkl', 'wb') as f: pickle.dump(dict(params=params, trials=trials, baseline_scores=scores), f, -1) NAME = 'longword5' run_subsample_trial( NAME, # num_trains=np.logspace(0, np.log10(1000),10).astype(int),#np.logspace(2, np.log10(2000), 5).astype(int), num_trains=np.linspace(10, 2000, 20).astype(int), Ms=np.ones(10) * 10., policy=longword_policy) #%% def bounded_estimate_reward(cers, orig_num_suggestions, M): qs, ps, rewards = np.array(cers).T importance_ratio = np.exp(ps-qs) return np.sum(rewards * np.minimum(importance_ratio, M)) / orig_num_suggestions #%% def plot_results(data, name): subsample_trial_params = data['params'] subsample_trials = data['trials'] baseline_scores = data['baseline_scores'] processed_data = [] for params, trial_data in zip(subsample_trial_params, subsample_trials): if trial_data is None: continue num_train, M = params weights, cers, mean, sem = trial_data processed_data.append(dict( num_train=num_train, M=M, weights=weights, cers=cers, mean=mean, sem=sem, reward_estimate=bounded_estimate_reward(cers, M=M, orig_num_suggestions=num_train))) df = pd.DataFrame(processed_data) with util.fig(f"{name}_simulation_reward_by_training_set_size"): sns.set_style('whitegrid') for color, (M, data) in zip(sns.color_palette(), df.groupby('M')): if M < 3: continue data_mean = data.groupby('num_train').mean().reset_index() data_sem = data.groupby('num_train').sem().reset_index() plt.errorbar(data_mean['num_train'], data_mean['reward_estimate'], yerr=data_sem['reward_estimate'], linestyle=':', label='counterfactual estimate from training'.format(M), color=color) plt.errorbar(data_mean['num_train'], data_mean['mean'], yerr=data_sem['mean'], label='actual performance on testing'.format(M), color=color) orig_reward = np.mean(np.sum(act_num_words * np.exp(baseline_scores), axis=1)) plt.hlines(orig_reward, *plt.xlim(), label='logging policy $h_0$') plt.legend(loc='best') plt.xlabel("# training samples") plt.ylabel("Reward (# words accepted per suggestion)") plt.ylim([0, 4]) plt.tight_layout() d = pickle.load(open(f'{NAME}-subsample_trials.pkl', 'rb')) plot_results(d, NAME) #%% Try varying: individual variation in propensity to accept, varying M, variyng amount of data
davidyezsetz/kuma
refs/heads/master
vendor/packages/Babel/babel/messages/tests/data/project/file2.py
63
# -*- coding: utf-8 -*- # file2.py for tests from gettext import ngettext def foo(): # Note: This will have the TRANSLATOR: tag but shouldn't # be included on the extracted stuff print ngettext('foobar', 'foobars', 1)
Stryn/citationhunt
refs/heads/fin
chstrings/__init__.py
1
import flask import os import json def _link(url, title): return flask.Markup( '<a target="_blank" href=%s>%s</a>' % (url, title)) def _preprocess_variables(config, strings): strings['in_page'] = \ flask.Markup(strings['in_page']) % _link('%s', '%s') if config.lead_section_policy_link: strings['lead_section_hint'] = \ flask.Markup(strings['lead_section_hint']) % _link( config.lead_section_policy_link, config.lead_section_policy_link_title) else: strings['lead_section_hint'] = '' beginners_hint_link = _link( config.beginners_link, config.beginners_link_title) strings['beginners_hint'] = \ flask.Markup(strings['beginners_hint']) % beginners_hint_link if '404' not in config.flagged_off: strings['page_not_found_text'] = \ flask.Markup(strings['page_not_found_text']) % _link( config.lang_code, 'Citation Hunt') strings.setdefault('instructions_goal', '') strings.setdefault('instructions_details', '') if strings['instructions_details']: strings['instructions_details'] = flask.Markup( strings['instructions_details']) % ( flask.Markup('<b>' + strings['button_wikilink'] + '</b>'), flask.Markup('<b>' + strings['button_next'] + '</b>'), beginners_hint_link) strings.setdefault('footer', '') if strings['footer']: # We replace the URLs in the template itself strings['footer'] = flask.Markup(strings['footer']) % ( _link('%s', 'Tools Labs'), _link('%s', 'translatewiki.net')) return strings def get_localized_strings(config, lang_code): strings_dir = os.path.dirname(__file__) strings = json.load(file(os.path.join(strings_dir, lang_code + '.json'))) return _preprocess_variables(config, strings)
areski/django
refs/heads/master
django/contrib/staticfiles/apps.py
473
from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class StaticFilesConfig(AppConfig): name = 'django.contrib.staticfiles' verbose_name = _("Static Files")
siosio/intellij-community
refs/heads/master
python/testData/inspections/PyUnresolvedReferencesInspection/comprehensionScope27.py
83
xs = [x for x in range(10)] x #pass
kohout/djangocms-getaweb-personlist
refs/heads/master
djangocms_personlist/cms_app.py
1
from cms.app_base import CMSApp from cms.apphook_pool import apphook_pool from django.utils.translation import ugettext_lazy as _ from .menu import TeamsMenu class TeamApp(CMSApp): name = _('Team Module') urls = ['djangocms_personlist.urls'] app_name = 'khw-team' menus = [TeamsMenu] apphook_pool.register(TeamApp)
chyeh727/django
refs/heads/master
tests/template_tests/syntax_tests/test_list_index.py
521
from django.test import SimpleTestCase from ..utils import setup class ListIndexTests(SimpleTestCase): @setup({'list-index01': '{{ var.1 }}'}) def test_list_index01(self): """ List-index syntax allows a template to access a certain item of a subscriptable object. """ output = self.engine.render_to_string('list-index01', {'var': ['first item', 'second item']}) self.assertEqual(output, 'second item') @setup({'list-index02': '{{ var.5 }}'}) def test_list_index02(self): """ Fail silently when the list index is out of range. """ output = self.engine.render_to_string('list-index02', {'var': ['first item', 'second item']}) if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'list-index03': '{{ var.1 }}'}) def test_list_index03(self): """ Fail silently when the list index is out of range. """ output = self.engine.render_to_string('list-index03', {'var': None}) if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'list-index04': '{{ var.1 }}'}) def test_list_index04(self): """ Fail silently when variable is a dict without the specified key. """ output = self.engine.render_to_string('list-index04', {'var': {}}) if self.engine.string_if_invalid: self.assertEqual(output, 'INVALID') else: self.assertEqual(output, '') @setup({'list-index05': '{{ var.1 }}'}) def test_list_index05(self): """ Dictionary lookup wins out when dict's key is a string. """ output = self.engine.render_to_string('list-index05', {'var': {'1': "hello"}}) self.assertEqual(output, 'hello') @setup({'list-index06': '{{ var.1 }}'}) def test_list_index06(self): """ But list-index lookup wins out when dict's key is an int, which behind the scenes is really a dictionary lookup (for a dict) after converting the key to an int. """ output = self.engine.render_to_string('list-index06', {"var": {1: "hello"}}) self.assertEqual(output, 'hello') @setup({'list-index07': '{{ var.1 }}'}) def test_list_index07(self): """ Dictionary lookup wins out when there is a string and int version of the key. """ output = self.engine.render_to_string('list-index07', {"var": {'1': "hello", 1: "world"}}) self.assertEqual(output, 'hello')
MicroWorldwide/namebench
refs/heads/master
nb_third_party/dns/rdtypes/ANY/RP.py
248
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import dns.exception import dns.rdata import dns.name class RP(dns.rdata.Rdata): """RP record @ivar mbox: The responsible person's mailbox @type mbox: dns.name.Name object @ivar txt: The owner name of a node with TXT records, or the root name if no TXT records are associated with this RP. @type txt: dns.name.Name object @see: RFC 1183""" __slots__ = ['mbox', 'txt'] def __init__(self, rdclass, rdtype, mbox, txt): super(RP, self).__init__(rdclass, rdtype) self.mbox = mbox self.txt = txt def to_text(self, origin=None, relativize=True, **kw): mbox = self.mbox.choose_relativity(origin, relativize) txt = self.txt.choose_relativity(origin, relativize) return "%s %s" % (str(mbox), str(txt)) def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True): mbox = tok.get_name() txt = tok.get_name() mbox = mbox.choose_relativity(origin, relativize) txt = txt.choose_relativity(origin, relativize) tok.get_eol() return cls(rdclass, rdtype, mbox, txt) from_text = classmethod(from_text) def to_wire(self, file, compress = None, origin = None): self.mbox.to_wire(file, None, origin) self.txt.to_wire(file, None, origin) def to_digestable(self, origin = None): return self.mbox.to_digestable(origin) + \ self.txt.to_digestable(origin) def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None): (mbox, cused) = dns.name.from_wire(wire[: current + rdlen], current) current += cused rdlen -= cused if rdlen <= 0: raise dns.exception.FormError (txt, cused) = dns.name.from_wire(wire[: current + rdlen], current) if cused != rdlen: raise dns.exception.FormError if not origin is None: mbox = mbox.relativize(origin) txt = txt.relativize(origin) return cls(rdclass, rdtype, mbox, txt) from_wire = classmethod(from_wire) def choose_relativity(self, origin = None, relativize = True): self.mbox = self.mbox.choose_relativity(origin, relativize) self.txt = self.txt.choose_relativity(origin, relativize) def _cmp(self, other): v = cmp(self.mbox, other.mbox) if v == 0: v = cmp(self.txt, other.txt) return v
elviejo79/scrum_developer
refs/heads/master
practicas/lettuce/myCalculatorApp/tests/features/steps.py
1
from lettuce import * from nose.tools import assert_equals from calculator import Calculator @step(u'Given: I am using the calculator') def i_am_using_the_calculator(step): print ('Attempting to use calculator...') world.calc = Calculator() #Scenario: Calculate 2 plus 2 on our calculator (verbose) @step('Given: I have the first number (\d+)') def have_the_number(step, x): world.number1 = int(x) @step('And: I have the second number (\d+)') def have_the_number(step, y): world.number2 = int(y) @step(u'When: I input (\d+) add (\d+)') def i_input_add(step, x, y): world.result = world.calc.add(world.number1 , world.number2) @step(u'Then: I should see (\d+)') def I_should_see(step, expected_result): actual_result = world.result assert_equals(int(expected_result), actual_result) #Scenario: Calculate 2 plus 2 on our calculator @step(u'I input "([^"]*)" add "([^"]*)"') def given_i_input_group1_add_group1(step, x, y): world.result = world.calc.add(int(x), int(y)) @step(u'I should see "([^"]+)"') def result(step, expected_result): actual_result = world.result assert_equals(int(expected_result), actual_result) #Scenario: Calculate 1 minsinweeks on our calculator @step('Given: I have the week number (\d+)') def have_the_number(step, x): world.number = int(x) @step(u'When: I input (\d+) minsinweeks') def i_input_add(step, x): world.result = world.calc.minsinweeks(world.number) @step(u'Then: I should see (\d+)') def I_should_see(step, expected_result): actual_result = world.result assert_equals(int(expected_result), actual_result) #Scenario: Teorema de pit'agoras @step(u'Given: Un triangulo rectangulo') def given_un_triangulo_rectangulo(step): assert True; @step(u'When: Primer cateto es "(\d+)" y el segundo es "(\d+)"') def when_primer_cateto_es_group1_y_el_segundo_es_group2(step, a, b): world.a = int(a) world.b = int(b) assert True @step(u'Then: Entonces la hipotenusa es "(\d+)"') def then_entonces_la_hipotenusa_es_group1(step, c): assert int(c) == world.calc.teorema_de_pitagoras(world.a,world.b), 'calculo mal la hipotenusa'
nilmini20s/gem5-2016-08-13
refs/heads/master
src/mem/slicc/ast/OperatorExprAST.py
30
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood # Copyright (c) 2009 The Hewlett-Packard Development Company # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from slicc.ast.ExprAST import ExprAST from slicc.symbols import Type class InfixOperatorExprAST(ExprAST): def __init__(self, slicc, left, op, right): super(InfixOperatorExprAST, self).__init__(slicc) self.left = left self.op = op self.right = right def __repr__(self): return "[InfixExpr: %r %s %r]" % (self.left, self.op, self.right) def generate(self, code): lcode = self.slicc.codeFormatter() rcode = self.slicc.codeFormatter() ltype = self.left.generate(lcode) rtype = self.right.generate(rcode) # Figure out what the input and output types should be if self.op in ("==", "!=", ">=", "<=", ">", "<"): output = "bool" if (ltype != rtype): self.error("Type mismatch: left and right operands of " + "operator '%s' must be the same type. " + "left: '%s', right: '%s'", self.op, ltype, rtype) else: expected_types = [] output = None if self.op in ("&&", "||"): # boolean inputs and output expected_types = [("bool", "bool", "bool")] elif self.op in ("<<", ">>"): expected_types = [("int", "int", "int"), ("Cycles", "int", "Cycles")] elif self.op in ("+", "-", "*", "/"): expected_types = [("int", "int", "int"), ("Cycles", "Cycles", "Cycles"), ("Tick", "Tick", "Tick"), ("Cycles", "int", "Cycles"), ("Scalar", "int", "Scalar"), ("int", "bool", "int"), ("bool", "int", "int"), ("int", "Cycles", "Cycles")] else: self.error("No operator matched with {0}!" .format(self.op)) for expected_type in expected_types: left_input_type = self.symtab.find(expected_type[0], Type) right_input_type = self.symtab.find(expected_type[1], Type) if (left_input_type == ltype) and (right_input_type == rtype): output = expected_type[2] if output == None: self.error("Type mismatch: operands ({0}, {1}) for operator " \ "'{2}' failed to match with the expected types" . format(ltype, rtype, self.op)) # All is well fix = code.nofix() code("($lcode ${{self.op}} $rcode)") code.fix(fix) return self.symtab.find(output, Type) class PrefixOperatorExprAST(ExprAST): def __init__(self, slicc, op, operand): super(PrefixOperatorExprAST, self).__init__(slicc) self.op = op self.operand = operand def __repr__(self): return "[PrefixExpr: %s %r]" % (self.op, self.operand) def generate(self, code): opcode = self.slicc.codeFormatter() optype = self.operand.generate(opcode) # Figure out what the input and output types should be opmap = {"!": "bool", "-": "int", "++": "Scalar"} if self.op in opmap: output = opmap[self.op] type_in_symtab = self.symtab.find(opmap[self.op], Type) if (optype != type_in_symtab): self.error("Type mismatch: right operand of " + "unary operator '%s' must be of type '%s'. ", self.op, type_in_symtab) else: self.error("Invalid prefix operator '%s'", self.op) # All is well fix = code.nofix() code("(${{self.op}} $opcode)") code.fix(fix) return self.symtab.find(output, Type)
skoslowski/gnuradio
refs/heads/master
grc/core/blocks/dummy.py
1
# Copyright 2016 Free Software Foundation, Inc. # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-2.0-or-later # from __future__ import absolute_import from . import Block, register_build_in from ._build import build_params @register_build_in class DummyBlock(Block): is_dummy_block = True label = "Missing Block" key = "_dummy" def __init__(self, parent, missing_block_id, parameters, **_): self.key = missing_block_id self.parameters_data = build_params([], False, False, self.flags, self.key) super(DummyBlock, self).__init__(parent=parent) param_factory = self.parent_platform.make_param for param_id in parameters: self.params.setdefault( param_id, param_factory(parent=self, id=param_id, dtype="string") ) def is_valid(self): return False @property def enabled(self): return False def add_missing_port(self, port_id, direction): port = self.parent_platform.make_port( parent=self, direction=direction, id=port_id, name="?", dtype="", ) if port.is_source: self.sources.append(port) else: self.sinks.append(port) return port
Edraak/edx-platform
refs/heads/master
common/test/acceptance/pages/lms/course_wiki.py
146
""" Wiki tab on courses """ from .course_page import CoursePage from ...pages.studio.utils import type_in_codemirror class CourseWikiPage(CoursePage): """ Course wiki navigation and objects. """ url_path = "wiki" def is_browser_on_page(self): """ Browser is on the wiki page if the wiki breadcrumb is present """ return self.q(css='.breadcrumb').present def open_editor(self): """ Replace content of a wiki article with new content """ edit_button = self.q(css='.fa-pencil') edit_button.click() @property def article_name(self): """ Return the name of the article """ return str(self.q(css='.main-article h1').text[0]) class CourseWikiEditPage(CoursePage): """ Editor page """ def __init__(self, browser, course_id, course_info): """ Course ID is currently of the form "edx/999/2013_Spring" but this format could change. """ super(CourseWikiEditPage, self).__init__(browser, course_id) self.course_id = course_id self.course_info = course_info self.article_name = "{org}.{course_number}.{course_run}".format( org=self.course_info['org'], course_number=self.course_info['number'], course_run=self.course_info['run'] ) @property def url_path(self): """ Construct a URL to the page within the course. """ return "/wiki/" + self.article_name + "/_edit" def is_browser_on_page(self): """ The wiki page editor """ return self.q(css='.CodeMirror-scroll').present def replace_wiki_content(self, content): """ Editor must be open already. This will replace any content in the editor with new content """ type_in_codemirror(self, 0, content) def save_wiki_content(self): """ When the editor is open, click save """ self.q(css='button[name="save"]').click() self.wait_for_element_presence('.alert-success', 'wait for the article to be saved')
bnaul/scikit-learn
refs/heads/master
sklearn/model_selection/_split.py
2
""" The :mod:`sklearn.model_selection._split` module includes classes and functions to split the data based on a preset strategy. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>, # Gael Varoquaux <gael.varoquaux@normalesup.org>, # Olivier Grisel <olivier.grisel@ensta.org> # Raghav RV <rvraghav93@gmail.com> # License: BSD 3 clause from collections.abc import Iterable import warnings from itertools import chain, combinations from math import ceil, floor import numbers from abc import ABCMeta, abstractmethod from inspect import signature import numpy as np from scipy.special import comb from ..utils import indexable, check_random_state, _safe_indexing from ..utils import _approximate_mode from ..utils.validation import _num_samples, column_or_1d from ..utils.validation import check_array from ..utils.validation import _deprecate_positional_args from ..utils.multiclass import type_of_target from ..base import _pprint __all__ = ['BaseCrossValidator', 'KFold', 'GroupKFold', 'LeaveOneGroupOut', 'LeaveOneOut', 'LeavePGroupsOut', 'LeavePOut', 'RepeatedStratifiedKFold', 'RepeatedKFold', 'ShuffleSplit', 'GroupShuffleSplit', 'StratifiedKFold', 'StratifiedShuffleSplit', 'PredefinedSplit', 'train_test_split', 'check_cv'] class BaseCrossValidator(metaclass=ABCMeta): """Base class for all cross-validators Implementations must define `_iter_test_masks` or `_iter_test_indices`. """ def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,) The target variable for supervised learning problems. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, groups = indexable(X, y, groups) indices = np.arange(_num_samples(X)) for test_index in self._iter_test_masks(X, y, groups): train_index = indices[np.logical_not(test_index)] test_index = indices[test_index] yield train_index, test_index # Since subclasses must implement either _iter_test_masks or # _iter_test_indices, neither can be abstract. def _iter_test_masks(self, X=None, y=None, groups=None): """Generates boolean masks corresponding to test sets. By default, delegates to _iter_test_indices(X, y, groups) """ for test_index in self._iter_test_indices(X, y, groups): test_mask = np.zeros(_num_samples(X), dtype=bool) test_mask[test_index] = True yield test_mask def _iter_test_indices(self, X=None, y=None, groups=None): """Generates integer indices corresponding to test sets.""" raise NotImplementedError @abstractmethod def get_n_splits(self, X=None, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator""" def __repr__(self): return _build_repr(self) class LeaveOneOut(BaseCrossValidator): """Leave-One-Out cross-validator Provides train/test indices to split data in train/test sets. Each sample is used once as a test set (singleton) while the remaining samples form the training set. Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and ``LeavePOut(p=1)`` where ``n`` is the number of samples. Due to the high number of test sets (which is the same as the number of samples) this cross-validation method can be very costly. For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit` or :class:`StratifiedKFold`. Read more in the :ref:`User Guide <cross_validation>`. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import LeaveOneOut >>> X = np.array([[1, 2], [3, 4]]) >>> y = np.array([1, 2]) >>> loo = LeaveOneOut() >>> loo.get_n_splits(X) 2 >>> print(loo) LeaveOneOut() >>> for train_index, test_index in loo.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [1] TEST: [0] [[3 4]] [[1 2]] [2] [1] TRAIN: [0] TEST: [1] [[1 2]] [[3 4]] [1] [2] See also -------- LeaveOneGroupOut For splitting the data according to explicit, domain-specific stratification of the dataset. GroupKFold: K-fold iterator variant with non-overlapping groups. """ def _iter_test_indices(self, X, y=None, groups=None): n_samples = _num_samples(X) if n_samples <= 1: raise ValueError( 'Cannot perform LeaveOneOut with n_samples={}.'.format( n_samples) ) return range(n_samples) def get_n_splits(self, X, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ if X is None: raise ValueError("The 'X' parameter should not be None.") return _num_samples(X) class LeavePOut(BaseCrossValidator): """Leave-P-Out cross-validator Provides train/test indices to split data in train/test sets. This results in testing on all distinct samples of size p, while the remaining n - p samples form the training set in each iteration. Note: ``LeavePOut(p)`` is NOT equivalent to ``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets. Due to the high number of iterations which grows combinatorically with the number of samples this cross-validation method can be very costly. For large datasets one should favor :class:`KFold`, :class:`StratifiedKFold` or :class:`ShuffleSplit`. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- p : int Size of the test sets. Must be strictly less than the number of samples. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import LeavePOut >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 3, 4]) >>> lpo = LeavePOut(2) >>> lpo.get_n_splits(X) 6 >>> print(lpo) LeavePOut(p=2) >>> for train_index, test_index in lpo.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [1 3] TEST: [0 2] TRAIN: [1 2] TEST: [0 3] TRAIN: [0 3] TEST: [1 2] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 1] TEST: [2 3] """ def __init__(self, p): self.p = p def _iter_test_indices(self, X, y=None, groups=None): n_samples = _num_samples(X) if n_samples <= self.p: raise ValueError( 'p={} must be strictly less than the number of ' 'samples={}'.format(self.p, n_samples) ) for combination in combinations(range(n_samples), self.p): yield np.array(combination) def get_n_splits(self, X, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. """ if X is None: raise ValueError("The 'X' parameter should not be None.") return int(comb(_num_samples(X), self.p, exact=True)) class _BaseKFold(BaseCrossValidator, metaclass=ABCMeta): """Base class for KFold, GroupKFold, and StratifiedKFold""" @abstractmethod @_deprecate_positional_args def __init__(self, n_splits, *, shuffle, random_state): if not isinstance(n_splits, numbers.Integral): raise ValueError('The number of folds must be of Integral type. ' '%s of type %s was passed.' % (n_splits, type(n_splits))) n_splits = int(n_splits) if n_splits <= 1: raise ValueError( "k-fold cross-validation requires at least one" " train/test split by setting n_splits=2 or more," " got n_splits={0}.".format(n_splits)) if not isinstance(shuffle, bool): raise TypeError("shuffle must be True or False;" " got {0}".format(shuffle)) if not shuffle and random_state is not None: # None is the default raise ValueError( 'Setting a random_state has no effect since shuffle is ' 'False. You should leave ' 'random_state to its default (None), or set shuffle=True.', ) self.n_splits = n_splits self.shuffle = shuffle self.random_state = random_state def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, groups = indexable(X, y, groups) n_samples = _num_samples(X) if self.n_splits > n_samples: raise ValueError( ("Cannot have number of splits n_splits={0} greater" " than the number of samples: n_samples={1}.") .format(self.n_splits, n_samples)) for train, test in super().split(X, y, groups): yield train, test def get_n_splits(self, X=None, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ return self.n_splits class KFold(_BaseKFold): """K-Folds cross-validator Provides train/test indices to split data in train/test sets. Split dataset into k consecutive folds (without shuffling by default). Each fold is then used once as a validation while the k - 1 remaining folds form the training set. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int, default=5 Number of folds. Must be at least 2. .. versionchanged:: 0.22 ``n_splits`` default value changed from 3 to 5. shuffle : bool, default=False Whether to shuffle the data before splitting into batches. Note that the samples within each split will not be shuffled. random_state : int or RandomState instance, default=None When `shuffle` is True, `random_state` affects the ordering of the indices, which controls the randomness of each fold. Otherwise, this parameter has no effect. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import KFold >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([1, 2, 3, 4]) >>> kf = KFold(n_splits=2) >>> kf.get_n_splits(X) 2 >>> print(kf) KFold(n_splits=2, random_state=None, shuffle=False) >>> for train_index, test_index in kf.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [0 1] TEST: [2 3] Notes ----- The first ``n_samples % n_splits`` folds have size ``n_samples // n_splits + 1``, other folds have size ``n_samples // n_splits``, where ``n_samples`` is the number of samples. Randomized CV splitters may return different results for each call of split. You can make the results identical by setting `random_state` to an integer. See also -------- StratifiedKFold Takes group information into account to avoid building folds with imbalanced class distributions (for binary or multiclass classification tasks). GroupKFold: K-fold iterator variant with non-overlapping groups. RepeatedKFold: Repeats K-Fold n times. """ @_deprecate_positional_args def __init__(self, n_splits=5, *, shuffle=False, random_state=None): super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state) def _iter_test_indices(self, X, y=None, groups=None): n_samples = _num_samples(X) indices = np.arange(n_samples) if self.shuffle: check_random_state(self.random_state).shuffle(indices) n_splits = self.n_splits fold_sizes = np.full(n_splits, n_samples // n_splits, dtype=int) fold_sizes[:n_samples % n_splits] += 1 current = 0 for fold_size in fold_sizes: start, stop = current, current + fold_size yield indices[start:stop] current = stop class GroupKFold(_BaseKFold): """K-fold iterator variant with non-overlapping groups. The same group will not appear in two different folds (the number of distinct groups has to be at least equal to the number of folds). The folds are approximately balanced in the sense that the number of distinct groups is approximately the same in each fold. Parameters ---------- n_splits : int, default=5 Number of folds. Must be at least 2. .. versionchanged:: 0.22 ``n_splits`` default value changed from 3 to 5. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import GroupKFold >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 3, 4]) >>> groups = np.array([0, 0, 2, 2]) >>> group_kfold = GroupKFold(n_splits=2) >>> group_kfold.get_n_splits(X, y, groups) 2 >>> print(group_kfold) GroupKFold(n_splits=2) >>> for train_index, test_index in group_kfold.split(X, y, groups): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) ... TRAIN: [0 1] TEST: [2 3] [[1 2] [3 4]] [[5 6] [7 8]] [1 2] [3 4] TRAIN: [2 3] TEST: [0 1] [[5 6] [7 8]] [[1 2] [3 4]] [3 4] [1 2] See also -------- LeaveOneGroupOut For splitting the data according to explicit domain-specific stratification of the dataset. """ def __init__(self, n_splits=5): super().__init__(n_splits, shuffle=False, random_state=None) def _iter_test_indices(self, X, y, groups): if groups is None: raise ValueError("The 'groups' parameter should not be None.") groups = check_array(groups, ensure_2d=False, dtype=None) unique_groups, groups = np.unique(groups, return_inverse=True) n_groups = len(unique_groups) if self.n_splits > n_groups: raise ValueError("Cannot have number of splits n_splits=%d greater" " than the number of groups: %d." % (self.n_splits, n_groups)) # Weight groups by their number of occurrences n_samples_per_group = np.bincount(groups) # Distribute the most frequent groups first indices = np.argsort(n_samples_per_group)[::-1] n_samples_per_group = n_samples_per_group[indices] # Total weight of each fold n_samples_per_fold = np.zeros(self.n_splits) # Mapping from group index to fold index group_to_fold = np.zeros(len(unique_groups)) # Distribute samples by adding the largest weight to the lightest fold for group_index, weight in enumerate(n_samples_per_group): lightest_fold = np.argmin(n_samples_per_fold) n_samples_per_fold[lightest_fold] += weight group_to_fold[indices[group_index]] = lightest_fold indices = group_to_fold[groups] for f in range(self.n_splits): yield np.where(indices == f)[0] def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ return super().split(X, y, groups) class StratifiedKFold(_BaseKFold): """Stratified K-Folds cross-validator Provides train/test indices to split data in train/test sets. This cross-validation object is a variation of KFold that returns stratified folds. The folds are made by preserving the percentage of samples for each class. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int, default=5 Number of folds. Must be at least 2. .. versionchanged:: 0.22 ``n_splits`` default value changed from 3 to 5. shuffle : bool, default=False Whether to shuffle each class's samples before splitting into batches. Note that the samples within each split will not be shuffled. random_state : int or RandomState instance, default=None When `shuffle` is True, `random_state` affects the ordering of the indices, which controls the randomness of each fold for each class. Otherwise, leave `random_state` as `None`. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import StratifiedKFold >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> skf = StratifiedKFold(n_splits=2) >>> skf.get_n_splits(X, y) 2 >>> print(skf) StratifiedKFold(n_splits=2, random_state=None, shuffle=False) >>> for train_index, test_index in skf.split(X, y): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 3] TEST: [0 2] TRAIN: [0 2] TEST: [1 3] Notes ----- The implementation is designed to: * Generate test sets such that all contain the same distribution of classes, or as close as possible. * Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to ``y = [1, 0]`` should not change the indices generated. * Preserve order dependencies in the dataset ordering, when ``shuffle=False``: all samples from class k in some test set were contiguous in y, or separated in y by samples from classes other than k. * Generate test sets where the smallest and largest differ by at most one sample. .. versionchanged:: 0.22 The previous implementation did not follow the last constraint. See also -------- RepeatedStratifiedKFold: Repeats Stratified K-Fold n times. """ @_deprecate_positional_args def __init__(self, n_splits=5, *, shuffle=False, random_state=None): super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state) def _make_test_folds(self, X, y=None): rng = check_random_state(self.random_state) y = np.asarray(y) type_of_target_y = type_of_target(y) allowed_target_types = ('binary', 'multiclass') if type_of_target_y not in allowed_target_types: raise ValueError( 'Supported target types are: {}. Got {!r} instead.'.format( allowed_target_types, type_of_target_y)) y = column_or_1d(y) _, y_idx, y_inv = np.unique(y, return_index=True, return_inverse=True) # y_inv encodes y according to lexicographic order. We invert y_idx to # map the classes so that they are encoded by order of appearance: # 0 represents the first label appearing in y, 1 the second, etc. _, class_perm = np.unique(y_idx, return_inverse=True) y_encoded = class_perm[y_inv] n_classes = len(y_idx) y_counts = np.bincount(y_encoded) min_groups = np.min(y_counts) if np.all(self.n_splits > y_counts): raise ValueError("n_splits=%d cannot be greater than the" " number of members in each class." % (self.n_splits)) if self.n_splits > min_groups: warnings.warn(("The least populated class in y has only %d" " members, which is less than n_splits=%d." % (min_groups, self.n_splits)), UserWarning) # Determine the optimal number of samples from each class in each fold, # using round robin over the sorted y. (This can be done direct from # counts, but that code is unreadable.) y_order = np.sort(y_encoded) allocation = np.asarray( [np.bincount(y_order[i::self.n_splits], minlength=n_classes) for i in range(self.n_splits)]) # To maintain the data order dependencies as best as possible within # the stratification constraint, we assign samples from each class in # blocks (and then mess that up when shuffle=True). test_folds = np.empty(len(y), dtype='i') for k in range(n_classes): # since the kth column of allocation stores the number of samples # of class k in each test set, this generates blocks of fold # indices corresponding to the allocation for class k. folds_for_class = np.arange(self.n_splits).repeat(allocation[:, k]) if self.shuffle: rng.shuffle(folds_for_class) test_folds[y_encoded == k] = folds_for_class return test_folds def _iter_test_masks(self, X, y=None, groups=None): test_folds = self._make_test_folds(X, y) for i in range(self.n_splits): yield test_folds == i def split(self, X, y, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. Note that providing ``y`` is sufficient to generate the splits and hence ``np.zeros(n_samples)`` may be used as a placeholder for ``X`` instead of actual training data. y : array-like of shape (n_samples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting `random_state` to an integer. """ y = check_array(y, ensure_2d=False, dtype=None) return super().split(X, y, groups) class TimeSeriesSplit(_BaseKFold): """Time Series cross-validator .. versionadded:: 0.18 Provides train/test indices to split time series data samples that are observed at fixed time intervals, in train/test sets. In each split, test indices must be higher than before, and thus shuffling in cross validator is inappropriate. This cross-validation object is a variation of :class:`KFold`. In the kth split, it returns first k folds as train set and the (k+1)th fold as test set. Note that unlike standard cross-validation methods, successive training sets are supersets of those that come before them. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int, default=5 Number of splits. Must be at least 2. .. versionchanged:: 0.22 ``n_splits`` default value changed from 3 to 5. max_train_size : int, default=None Maximum size for a single training set. test_size : int, default=None Used to limit the size of the test set. Defaults to ``n_samples // (n_splits + 1)``, which is the maximum allowed value with ``gap=0``. gap : int, default=0 Number of samples to exclude from the end of each train set before the test set. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import TimeSeriesSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([1, 2, 3, 4, 5, 6]) >>> tscv = TimeSeriesSplit() >>> print(tscv) TimeSeriesSplit(gap=0, max_train_size=None, n_splits=5, test_size=None) >>> for train_index, test_index in tscv.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [0] TEST: [1] TRAIN: [0 1] TEST: [2] TRAIN: [0 1 2] TEST: [3] TRAIN: [0 1 2 3] TEST: [4] TRAIN: [0 1 2 3 4] TEST: [5] >>> # Fix test_size to 2 with 12 samples >>> X = np.random.randn(12, 2) >>> y = np.random.randint(0, 2, 12) >>> tscv = TimeSeriesSplit(n_splits=3, test_size=2) >>> for train_index, test_index in tscv.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [0 1 2 3 4 5] TEST: [6 7] TRAIN: [0 1 2 3 4 5 6 7] TEST: [8 9] TRAIN: [0 1 2 3 4 5 6 7 8 9] TEST: [10 11] >>> # Add in a 2 period gap >>> tscv = TimeSeriesSplit(n_splits=3, test_size=2, gap=2) >>> for train_index, test_index in tscv.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [0 1 2 3] TEST: [6 7] TRAIN: [0 1 2 3 4 5] TEST: [8 9] TRAIN: [0 1 2 3 4 5 6 7] TEST: [10 11] Notes ----- The training set has size ``i * n_samples // (n_splits + 1) + n_samples % (n_splits + 1)`` in the ``i``th split, with a test set of size ``n_samples//(n_splits + 1)`` by default, where ``n_samples`` is the number of samples. """ @_deprecate_positional_args def __init__(self, n_splits=5, *, max_train_size=None, test_size=None, gap=0): super().__init__(n_splits, shuffle=False, random_state=None) self.max_train_size = max_train_size self.test_size = test_size self.gap = gap def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,) Always ignored, exists for compatibility. groups : array-like of shape (n_samples,) Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, groups = indexable(X, y, groups) n_samples = _num_samples(X) n_splits = self.n_splits n_folds = n_splits + 1 gap = self.gap test_size = self.test_size if self.test_size is not None \ else n_samples // n_folds # Make sure we have enough samples for the given split parameters if n_folds > n_samples: raise ValueError( (f"Cannot have number of folds={n_folds} greater" f" than the number of samples={n_samples}.")) if n_samples - gap - (test_size * n_splits) <= 0: raise ValueError( (f"Too many splits={n_splits} for number of samples" f"={n_samples} with test_size={test_size} and gap={gap}.")) indices = np.arange(n_samples) test_starts = range(n_samples - n_splits * test_size, n_samples, test_size) for test_start in test_starts: train_end = test_start - gap if self.max_train_size and self.max_train_size < train_end: yield (indices[train_end - self.max_train_size:train_end], indices[test_start:test_start + test_size]) else: yield (indices[:train_end], indices[test_start:test_start + test_size]) class LeaveOneGroupOut(BaseCrossValidator): """Leave One Group Out cross-validator Provides train/test indices to split data according to a third-party provided group. This group information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the groups could be the year of collection of the samples and thus allow for cross-validation against time-based splits. Read more in the :ref:`User Guide <cross_validation>`. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import LeaveOneGroupOut >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 1, 2]) >>> groups = np.array([1, 1, 2, 2]) >>> logo = LeaveOneGroupOut() >>> logo.get_n_splits(X, y, groups) 2 >>> logo.get_n_splits(groups=groups) # 'groups' is always required 2 >>> print(logo) LeaveOneGroupOut() >>> for train_index, test_index in logo.split(X, y, groups): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2 3] TEST: [0 1] [[5 6] [7 8]] [[1 2] [3 4]] [1 2] [1 2] TRAIN: [0 1] TEST: [2 3] [[1 2] [3 4]] [[5 6] [7 8]] [1 2] [1 2] """ def _iter_test_masks(self, X, y, groups): if groups is None: raise ValueError("The 'groups' parameter should not be None.") # We make a copy of groups to avoid side-effects during iteration groups = check_array(groups, copy=True, ensure_2d=False, dtype=None) unique_groups = np.unique(groups) if len(unique_groups) <= 1: raise ValueError( "The groups parameter contains fewer than 2 unique groups " "(%s). LeaveOneGroupOut expects at least 2." % unique_groups) for i in unique_groups: yield groups == i def get_n_splits(self, X=None, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. This 'groups' parameter must always be specified to calculate the number of splits, though the other parameters can be omitted. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ if groups is None: raise ValueError("The 'groups' parameter should not be None.") groups = check_array(groups, ensure_2d=False, dtype=None) return len(np.unique(groups)) def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ return super().split(X, y, groups) class LeavePGroupsOut(BaseCrossValidator): """Leave P Group(s) Out cross-validator Provides train/test indices to split data according to a third-party provided group. This group information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the groups could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePGroupsOut and LeaveOneGroupOut is that the former builds the test sets with all the samples assigned to ``p`` different values of the groups while the latter uses samples all assigned the same groups. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_groups : int Number of groups (``p``) to leave out in the test split. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import LeavePGroupsOut >>> X = np.array([[1, 2], [3, 4], [5, 6]]) >>> y = np.array([1, 2, 1]) >>> groups = np.array([1, 2, 3]) >>> lpgo = LeavePGroupsOut(n_groups=2) >>> lpgo.get_n_splits(X, y, groups) 3 >>> lpgo.get_n_splits(groups=groups) # 'groups' is always required 3 >>> print(lpgo) LeavePGroupsOut(n_groups=2) >>> for train_index, test_index in lpgo.split(X, y, groups): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2] TEST: [0 1] [[5 6]] [[1 2] [3 4]] [1] [1 2] TRAIN: [1] TEST: [0 2] [[3 4]] [[1 2] [5 6]] [2] [1 1] TRAIN: [0] TEST: [1 2] [[1 2]] [[3 4] [5 6]] [1] [2 1] See also -------- GroupKFold: K-fold iterator variant with non-overlapping groups. """ def __init__(self, n_groups): self.n_groups = n_groups def _iter_test_masks(self, X, y, groups): if groups is None: raise ValueError("The 'groups' parameter should not be None.") groups = check_array(groups, copy=True, ensure_2d=False, dtype=None) unique_groups = np.unique(groups) if self.n_groups >= len(unique_groups): raise ValueError( "The groups parameter contains fewer than (or equal to) " "n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut " "expects that at least n_groups + 1 (%d) unique groups be " "present" % (self.n_groups, unique_groups, self.n_groups + 1)) combi = combinations(range(len(unique_groups)), self.n_groups) for indices in combi: test_index = np.zeros(_num_samples(X), dtype=bool) for l in unique_groups[np.array(indices)]: test_index[groups == l] = True yield test_index def get_n_splits(self, X=None, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. This 'groups' parameter must always be specified to calculate the number of splits, though the other parameters can be omitted. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ if groups is None: raise ValueError("The 'groups' parameter should not be None.") groups = check_array(groups, ensure_2d=False, dtype=None) return int(comb(len(np.unique(groups)), self.n_groups, exact=True)) def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ return super().split(X, y, groups) class _RepeatedSplits(metaclass=ABCMeta): """Repeated splits for an arbitrary randomized CV splitter. Repeats splits for cross-validators n times with different randomization in each repetition. Parameters ---------- cv : callable Cross-validator class. n_repeats : int, default=10 Number of times cross-validator needs to be repeated. random_state : int or RandomState instance, default=None Passes `random_state` to the arbitrary repeating cross validator. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. **cvargs : additional params Constructor parameters for cv. Must not contain random_state and shuffle. """ @_deprecate_positional_args def __init__(self, cv, *, n_repeats=10, random_state=None, **cvargs): if not isinstance(n_repeats, numbers.Integral): raise ValueError("Number of repetitions must be of Integral type.") if n_repeats <= 0: raise ValueError("Number of repetitions must be greater than 0.") if any(key in cvargs for key in ('random_state', 'shuffle')): raise ValueError( "cvargs must not contain random_state or shuffle.") self.cv = cv self.n_repeats = n_repeats self.random_state = random_state self.cvargs = cvargs def split(self, X, y=None, groups=None): """Generates indices to split data into training and test set. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like of length n_samples The target variable for supervised learning problems. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ n_repeats = self.n_repeats rng = check_random_state(self.random_state) for idx in range(n_repeats): cv = self.cv(random_state=rng, shuffle=True, **self.cvargs) for train_index, test_index in cv.split(X, y, groups): yield train_index, test_index def get_n_splits(self, X=None, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. ``np.zeros(n_samples)`` may be used as a placeholder. y : object Always ignored, exists for compatibility. ``np.zeros(n_samples)`` may be used as a placeholder. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ rng = check_random_state(self.random_state) cv = self.cv(random_state=rng, shuffle=True, **self.cvargs) return cv.get_n_splits(X, y, groups) * self.n_repeats def __repr__(self): return _build_repr(self) class RepeatedKFold(_RepeatedSplits): """Repeated K-Fold cross validator. Repeats K-Fold n times with different randomization in each repetition. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int, default=5 Number of folds. Must be at least 2. n_repeats : int, default=10 Number of times cross-validator needs to be repeated. random_state : int or RandomState instance, default=None Controls the randomness of each repeated cross-validation instance. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import RepeatedKFold >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124) >>> for train_index, test_index in rkf.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... TRAIN: [0 1] TEST: [2 3] TRAIN: [2 3] TEST: [0 1] TRAIN: [1 2] TEST: [0 3] TRAIN: [0 3] TEST: [1 2] Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting `random_state` to an integer. See also -------- RepeatedStratifiedKFold: Repeats Stratified K-Fold n times. """ @_deprecate_positional_args def __init__(self, *, n_splits=5, n_repeats=10, random_state=None): super().__init__( KFold, n_repeats=n_repeats, random_state=random_state, n_splits=n_splits) class RepeatedStratifiedKFold(_RepeatedSplits): """Repeated Stratified K-Fold cross validator. Repeats Stratified K-Fold n times with different randomization in each repetition. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int, default=5 Number of folds. Must be at least 2. n_repeats : int, default=10 Number of times cross-validator needs to be repeated. random_state : int or RandomState instance, default=None Controls the generation of the random states for each repetition. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import RepeatedStratifiedKFold >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2, ... random_state=36851234) >>> for train_index, test_index in rskf.split(X, y): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... TRAIN: [1 2] TEST: [0 3] TRAIN: [0 3] TEST: [1 2] TRAIN: [1 3] TEST: [0 2] TRAIN: [0 2] TEST: [1 3] Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting `random_state` to an integer. See also -------- RepeatedKFold: Repeats K-Fold n times. """ @_deprecate_positional_args def __init__(self, *, n_splits=5, n_repeats=10, random_state=None): super().__init__( StratifiedKFold, n_repeats=n_repeats, random_state=random_state, n_splits=n_splits) class BaseShuffleSplit(metaclass=ABCMeta): """Base class for ShuffleSplit and StratifiedShuffleSplit""" @_deprecate_positional_args def __init__(self, n_splits=10, *, test_size=None, train_size=None, random_state=None): self.n_splits = n_splits self.test_size = test_size self.train_size = train_size self.random_state = random_state self._default_test_size = 0.1 def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,) The target variable for supervised learning problems. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting `random_state` to an integer. """ X, y, groups = indexable(X, y, groups) for train, test in self._iter_indices(X, y, groups): yield train, test @abstractmethod def _iter_indices(self, X, y=None, groups=None): """Generate (train, test) indices""" def get_n_splits(self, X=None, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ return self.n_splits def __repr__(self): return _build_repr(self) class ShuffleSplit(BaseShuffleSplit): """Random permutation cross-validator Yields indices to split data into training and test sets. Note: contrary to other cross-validation strategies, random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int, default=10 Number of re-shuffling & splitting iterations. test_size : float or int, default=None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is set to the complement of the train size. If ``train_size`` is also None, it will be set to 0.1. train_size : float or int, default=None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState instance, default=None Controls the randomness of the training and testing indices produced. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import ShuffleSplit >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [3, 4], [5, 6]]) >>> y = np.array([1, 2, 1, 2, 1, 2]) >>> rs = ShuffleSplit(n_splits=5, test_size=.25, random_state=0) >>> rs.get_n_splits(X) 5 >>> print(rs) ShuffleSplit(n_splits=5, random_state=0, test_size=0.25, train_size=None) >>> for train_index, test_index in rs.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) TRAIN: [1 3 0 4] TEST: [5 2] TRAIN: [4 0 2 5] TEST: [1 3] TRAIN: [1 2 4 0] TEST: [3 5] TRAIN: [3 4 1 0] TEST: [5 2] TRAIN: [3 5 1 0] TEST: [2 4] >>> rs = ShuffleSplit(n_splits=5, train_size=0.5, test_size=.25, ... random_state=0) >>> for train_index, test_index in rs.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) TRAIN: [1 3 0] TEST: [5 2] TRAIN: [4 0 2] TEST: [1 3] TRAIN: [1 2 4] TEST: [3 5] TRAIN: [3 4 1] TEST: [5 2] TRAIN: [3 5 1] TEST: [2 4] """ @_deprecate_positional_args def __init__(self, n_splits=10, *, test_size=None, train_size=None, random_state=None): super().__init__( n_splits=n_splits, test_size=test_size, train_size=train_size, random_state=random_state) self._default_test_size = 0.1 def _iter_indices(self, X, y=None, groups=None): n_samples = _num_samples(X) n_train, n_test = _validate_shuffle_split( n_samples, self.test_size, self.train_size, default_test_size=self._default_test_size) rng = check_random_state(self.random_state) for i in range(self.n_splits): # random partition permutation = rng.permutation(n_samples) ind_test = permutation[:n_test] ind_train = permutation[n_test:(n_test + n_train)] yield ind_train, ind_test class GroupShuffleSplit(ShuffleSplit): '''Shuffle-Group(s)-Out cross-validation iterator Provides randomized train/test indices to split data according to a third-party provided group. This group information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the groups could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePGroupsOut and GroupShuffleSplit is that the former generates splits using all subsets of size ``p`` unique groups, whereas GroupShuffleSplit generates a user-determined number of random test splits, each with a user-determined fraction of unique groups. For example, a less computationally intensive alternative to ``LeavePGroupsOut(p=10)`` would be ``GroupShuffleSplit(test_size=10, n_splits=100)``. Note: The parameters ``test_size`` and ``train_size`` refer to groups, and not to samples, as in ShuffleSplit. Parameters ---------- n_splits : int, default=5 Number of re-shuffling & splitting iterations. test_size : float, int, default=0.2 If float, should be between 0.0 and 1.0 and represent the proportion of groups to include in the test split (rounded up). If int, represents the absolute number of test groups. If None, the value is set to the complement of the train size. The default will change in version 0.21. It will remain 0.2 only if ``train_size`` is unspecified, otherwise it will complement the specified ``train_size``. train_size : float or int, default=None If float, should be between 0.0 and 1.0 and represent the proportion of the groups to include in the train split. If int, represents the absolute number of train groups. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState instance, default=None Controls the randomness of the training and testing indices produced. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import GroupShuffleSplit >>> X = np.ones(shape=(8, 2)) >>> y = np.ones(shape=(8, 1)) >>> groups = np.array([1, 1, 2, 2, 2, 3, 3, 3]) >>> print(groups.shape) (8,) >>> gss = GroupShuffleSplit(n_splits=2, train_size=.7, random_state=42) >>> gss.get_n_splits() 2 >>> for train_idx, test_idx in gss.split(X, y, groups): ... print("TRAIN:", train_idx, "TEST:", test_idx) TRAIN: [2 3 4 5 6 7] TEST: [0 1] TRAIN: [0 1 5 6 7] TEST: [2 3 4] ''' @_deprecate_positional_args def __init__(self, n_splits=5, *, test_size=None, train_size=None, random_state=None): super().__init__( n_splits=n_splits, test_size=test_size, train_size=train_size, random_state=random_state) self._default_test_size = 0.2 def _iter_indices(self, X, y, groups): if groups is None: raise ValueError("The 'groups' parameter should not be None.") groups = check_array(groups, ensure_2d=False, dtype=None) classes, group_indices = np.unique(groups, return_inverse=True) for group_train, group_test in super()._iter_indices(X=classes): # these are the indices of classes in the partition # invert them into data indices train = np.flatnonzero(np.in1d(group_indices, group_train)) test = np.flatnonzero(np.in1d(group_indices, group_test)) yield train, test def split(self, X, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting `random_state` to an integer. """ return super().split(X, y, groups) class StratifiedShuffleSplit(BaseShuffleSplit): """Stratified ShuffleSplit cross-validator Provides train/test indices to split data in train/test sets. This cross-validation object is a merge of StratifiedKFold and ShuffleSplit, which returns stratified randomized folds. The folds are made by preserving the percentage of samples for each class. Note: like the ShuffleSplit strategy, stratified random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n_splits : int, default=10 Number of re-shuffling & splitting iterations. test_size : float or int, default=None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is set to the complement of the train size. If ``train_size`` is also None, it will be set to 0.1. train_size : float or int, default=None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState instance, default=None Controls the randomness of the training and testing indices produced. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import StratifiedShuffleSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 0, 1, 1, 1]) >>> sss = StratifiedShuffleSplit(n_splits=5, test_size=0.5, random_state=0) >>> sss.get_n_splits(X, y) 5 >>> print(sss) StratifiedShuffleSplit(n_splits=5, random_state=0, ...) >>> for train_index, test_index in sss.split(X, y): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [5 2 3] TEST: [4 1 0] TRAIN: [5 1 4] TEST: [0 2 3] TRAIN: [5 0 2] TEST: [4 3 1] TRAIN: [4 1 0] TEST: [2 3 5] TRAIN: [0 5 1] TEST: [3 4 2] """ @_deprecate_positional_args def __init__(self, n_splits=10, *, test_size=None, train_size=None, random_state=None): super().__init__( n_splits=n_splits, test_size=test_size, train_size=train_size, random_state=random_state) self._default_test_size = 0.1 def _iter_indices(self, X, y, groups=None): n_samples = _num_samples(X) y = check_array(y, ensure_2d=False, dtype=None) n_train, n_test = _validate_shuffle_split( n_samples, self.test_size, self.train_size, default_test_size=self._default_test_size) if y.ndim == 2: # for multi-label y, map each distinct row to a string repr # using join because str(row) uses an ellipsis if len(row) > 1000 y = np.array([' '.join(row.astype('str')) for row in y]) classes, y_indices = np.unique(y, return_inverse=True) n_classes = classes.shape[0] class_counts = np.bincount(y_indices) if np.min(class_counts) < 2: raise ValueError("The least populated class in y has only 1" " member, which is too few. The minimum" " number of groups for any class cannot" " be less than 2.") if n_train < n_classes: raise ValueError('The train_size = %d should be greater or ' 'equal to the number of classes = %d' % (n_train, n_classes)) if n_test < n_classes: raise ValueError('The test_size = %d should be greater or ' 'equal to the number of classes = %d' % (n_test, n_classes)) # Find the sorted list of instances for each class: # (np.unique above performs a sort, so code is O(n logn) already) class_indices = np.split(np.argsort(y_indices, kind='mergesort'), np.cumsum(class_counts)[:-1]) rng = check_random_state(self.random_state) for _ in range(self.n_splits): # if there are ties in the class-counts, we want # to make sure to break them anew in each iteration n_i = _approximate_mode(class_counts, n_train, rng) class_counts_remaining = class_counts - n_i t_i = _approximate_mode(class_counts_remaining, n_test, rng) train = [] test = [] for i in range(n_classes): permutation = rng.permutation(class_counts[i]) perm_indices_class_i = class_indices[i].take(permutation, mode='clip') train.extend(perm_indices_class_i[:n_i[i]]) test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]]) train = rng.permutation(train) test = rng.permutation(test) yield train, test def split(self, X, y, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. Note that providing ``y`` is sufficient to generate the splits and hence ``np.zeros(n_samples)`` may be used as a placeholder for ``X`` instead of actual training data. y : array-like of shape (n_samples,) or (n_samples, n_labels) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting `random_state` to an integer. """ y = check_array(y, ensure_2d=False, dtype=None) return super().split(X, y, groups) def _validate_shuffle_split(n_samples, test_size, train_size, default_test_size=None): """ Validation helper to check if the test/test sizes are meaningful wrt to the size of the data (n_samples) """ if test_size is None and train_size is None: test_size = default_test_size test_size_type = np.asarray(test_size).dtype.kind train_size_type = np.asarray(train_size).dtype.kind if (test_size_type == 'i' and (test_size >= n_samples or test_size <= 0) or test_size_type == 'f' and (test_size <= 0 or test_size >= 1)): raise ValueError('test_size={0} should be either positive and smaller' ' than the number of samples {1} or a float in the ' '(0, 1) range'.format(test_size, n_samples)) if (train_size_type == 'i' and (train_size >= n_samples or train_size <= 0) or train_size_type == 'f' and (train_size <= 0 or train_size >= 1)): raise ValueError('train_size={0} should be either positive and smaller' ' than the number of samples {1} or a float in the ' '(0, 1) range'.format(train_size, n_samples)) if train_size is not None and train_size_type not in ('i', 'f'): raise ValueError("Invalid value for train_size: {}".format(train_size)) if test_size is not None and test_size_type not in ('i', 'f'): raise ValueError("Invalid value for test_size: {}".format(test_size)) if (train_size_type == 'f' and test_size_type == 'f' and train_size + test_size > 1): raise ValueError( 'The sum of test_size and train_size = {}, should be in the (0, 1)' ' range. Reduce test_size and/or train_size.' .format(train_size + test_size)) if test_size_type == 'f': n_test = ceil(test_size * n_samples) elif test_size_type == 'i': n_test = float(test_size) if train_size_type == 'f': n_train = floor(train_size * n_samples) elif train_size_type == 'i': n_train = float(train_size) if train_size is None: n_train = n_samples - n_test elif test_size is None: n_test = n_samples - n_train if n_train + n_test > n_samples: raise ValueError('The sum of train_size and test_size = %d, ' 'should be smaller than the number of ' 'samples %d. Reduce test_size and/or ' 'train_size.' % (n_train + n_test, n_samples)) n_train, n_test = int(n_train), int(n_test) if n_train == 0: raise ValueError( 'With n_samples={}, test_size={} and train_size={}, the ' 'resulting train set will be empty. Adjust any of the ' 'aforementioned parameters.'.format(n_samples, test_size, train_size) ) return n_train, n_test class PredefinedSplit(BaseCrossValidator): """Predefined split cross-validator Provides train/test indices to split data into train/test sets using a predefined scheme specified by the user with the ``test_fold`` parameter. Read more in the :ref:`User Guide <cross_validation>`. .. versionadded:: 0.16 Parameters ---------- test_fold : array-like of shape (n_samples,) The entry ``test_fold[i]`` represents the index of the test set that sample ``i`` belongs to. It is possible to exclude sample ``i`` from any test set (i.e. include sample ``i`` in every training set) by setting ``test_fold[i]`` equal to -1. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import PredefinedSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> test_fold = [0, 1, -1, 1] >>> ps = PredefinedSplit(test_fold) >>> ps.get_n_splits() 2 >>> print(ps) PredefinedSplit(test_fold=array([ 0, 1, -1, 1])) >>> for train_index, test_index in ps.split(): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2 3] TEST: [0] TRAIN: [0 2] TEST: [1 3] """ def __init__(self, test_fold): self.test_fold = np.array(test_fold, dtype=int) self.test_fold = column_or_1d(self.test_fold) self.unique_folds = np.unique(self.test_fold) self.unique_folds = self.unique_folds[self.unique_folds != -1] def split(self, X=None, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ ind = np.arange(len(self.test_fold)) for test_index in self._iter_test_masks(): train_index = ind[np.logical_not(test_index)] test_index = ind[test_index] yield train_index, test_index def _iter_test_masks(self): """Generates boolean masks corresponding to test sets.""" for f in self.unique_folds: test_index = np.where(self.test_fold == f)[0] test_mask = np.zeros(len(self.test_fold), dtype=bool) test_mask[test_index] = True yield test_mask def get_n_splits(self, X=None, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ return len(self.unique_folds) class _CVIterableWrapper(BaseCrossValidator): """Wrapper class for old style cv objects and iterables.""" def __init__(self, cv): self.cv = list(cv) def get_n_splits(self, X=None, y=None, groups=None): """Returns the number of splitting iterations in the cross-validator Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator. """ return len(self.cv) def split(self, X=None, y=None, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ for train, test in self.cv: yield train, test @_deprecate_positional_args def check_cv(cv=5, y=None, *, classifier=False): """Input checker utility for building a cross-validator Parameters ---------- cv : int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds. - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if classifier is True and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value changed from 3-fold to 5-fold. y : array-like, default=None The target variable for supervised learning problems. classifier : bool, default=False Whether the task is a classification task, in which case stratified KFold will be used. Returns ------- checked_cv : a cross-validator instance. The return value is a cross-validator which generates the train/test splits via the ``split`` method. """ cv = 5 if cv is None else cv if isinstance(cv, numbers.Integral): if (classifier and (y is not None) and (type_of_target(y) in ('binary', 'multiclass'))): return StratifiedKFold(cv) else: return KFold(cv) if not hasattr(cv, 'split') or isinstance(cv, str): if not isinstance(cv, Iterable) or isinstance(cv, str): raise ValueError("Expected cv as an integer, cross-validation " "object (from sklearn.model_selection) " "or an iterable. Got %s." % cv) return _CVIterableWrapper(cv) return cv # New style cv objects are passed without any modification def train_test_split(*arrays, test_size=None, train_size=None, random_state=None, shuffle=True, stratify=None): """Split arrays or matrices into random train and test subsets Quick utility that wraps input validation and ``next(ShuffleSplit().split(X, y))`` and application to input data into a single call for splitting (and optionally subsampling) data in a oneliner. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- *arrays : sequence of indexables with same length / shape[0] Allowed inputs are lists, numpy arrays, scipy-sparse matrices or pandas dataframes. test_size : float or int, default=None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is set to the complement of the train size. If ``train_size`` is also None, it will be set to 0.25. train_size : float or int, default=None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState instance, default=None Controls the shuffling applied to the data before applying the split. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. shuffle : bool, default=True Whether or not to shuffle the data before splitting. If shuffle=False then stratify must be None. stratify : array-like, default=None If not None, data is split in a stratified fashion, using this as the class labels. Returns ------- splitting : list, length=2 * len(arrays) List containing train-test split of inputs. .. versionadded:: 0.16 If the input is sparse, the output will be a ``scipy.sparse.csr_matrix``. Else, output type is the same as the input type. Examples -------- >>> import numpy as np >>> from sklearn.model_selection import train_test_split >>> X, y = np.arange(10).reshape((5, 2)), range(5) >>> X array([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]) >>> list(y) [0, 1, 2, 3, 4] >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, test_size=0.33, random_state=42) ... >>> X_train array([[4, 5], [0, 1], [6, 7]]) >>> y_train [2, 0, 3] >>> X_test array([[2, 3], [8, 9]]) >>> y_test [1, 4] >>> train_test_split(y, shuffle=False) [[0, 1, 2], [3, 4]] """ n_arrays = len(arrays) if n_arrays == 0: raise ValueError("At least one array required as input") arrays = indexable(*arrays) n_samples = _num_samples(arrays[0]) n_train, n_test = _validate_shuffle_split(n_samples, test_size, train_size, default_test_size=0.25) if shuffle is False: if stratify is not None: raise ValueError( "Stratified train/test split is not implemented for " "shuffle=False") train = np.arange(n_train) test = np.arange(n_train, n_train + n_test) else: if stratify is not None: CVClass = StratifiedShuffleSplit else: CVClass = ShuffleSplit cv = CVClass(test_size=n_test, train_size=n_train, random_state=random_state) train, test = next(cv.split(X=arrays[0], y=stratify)) return list(chain.from_iterable((_safe_indexing(a, train), _safe_indexing(a, test)) for a in arrays)) # Tell nose that train_test_split is not a test. # (Needed for external libraries that may use nose.) # Use setattr to avoid mypy errors when monkeypatching. setattr(train_test_split, '__test__', False) def _build_repr(self): # XXX This is copied from BaseEstimator's get_params cls = self.__class__ init = getattr(cls.__init__, 'deprecated_original', cls.__init__) # Ignore varargs, kw and default values and pop self init_signature = signature(init) # Consider the constructor parameters excluding 'self' if init is object.__init__: args = [] else: args = sorted([p.name for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD]) class_name = self.__class__.__name__ params = dict() for key in args: # We need deprecation warnings to always be on in order to # catch deprecated param values. # This is set in utils/__init__.py but it gets overwritten # when running under python3 somehow. warnings.simplefilter("always", FutureWarning) try: with warnings.catch_warnings(record=True) as w: value = getattr(self, key, None) if value is None and hasattr(self, 'cvargs'): value = self.cvargs.get(key, None) if len(w) and w[0].category == FutureWarning: # if the parameter is deprecated, don't show it continue finally: warnings.filters.pop(0) params[key] = value return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
gcode-mirror/audacity
refs/heads/master
lib-src/lv2/sratom/waflib/Task.py
148
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file import os,shutil,re,tempfile from waflib import Utils,Logs,Errors NOT_RUN=0 MISSING=1 CRASHED=2 EXCEPTION=3 SKIPPED=8 SUCCESS=9 ASK_LATER=-1 SKIP_ME=-2 RUN_ME=-3 COMPILE_TEMPLATE_SHELL=''' def f(tsk): env = tsk.env gen = tsk.generator bld = gen.bld wd = getattr(tsk, 'cwd', None) p = env.get_flat tsk.last_cmd = cmd = \'\'\' %s \'\'\' % s return tsk.exec_command(cmd, cwd=wd, env=env.env or None) ''' COMPILE_TEMPLATE_NOSHELL=''' def f(tsk): env = tsk.env gen = tsk.generator bld = gen.bld wd = getattr(tsk, 'cwd', None) def to_list(xx): if isinstance(xx, str): return [xx] return xx tsk.last_cmd = lst = [] %s lst = [x for x in lst if x] return tsk.exec_command(lst, cwd=wd, env=env.env or None) ''' def cache_outputs(cls): m1=cls.run def run(self): bld=self.generator.bld if bld.cache_global and not bld.nocache: if self.can_retrieve_cache(): return 0 return m1(self) cls.run=run m2=cls.post_run def post_run(self): bld=self.generator.bld ret=m2(self) if bld.cache_global and not bld.nocache: self.put_files_cache() return ret cls.post_run=post_run return cls classes={} class store_task_type(type): def __init__(cls,name,bases,dict): super(store_task_type,cls).__init__(name,bases,dict) name=cls.__name__ if name.endswith('_task'): name=name.replace('_task','') if name!='evil'and name!='TaskBase': global classes if getattr(cls,'run_str',None): (f,dvars)=compile_fun(cls.run_str,cls.shell) cls.hcode=cls.run_str cls.run_str=None cls.run=f cls.vars=list(set(cls.vars+dvars)) cls.vars.sort() elif getattr(cls,'run',None)and not'hcode'in cls.__dict__: cls.hcode=Utils.h_fun(cls.run) if not getattr(cls,'nocache',None): cls=cache_outputs(cls) getattr(cls,'register',classes)[name]=cls evil=store_task_type('evil',(object,),{}) class TaskBase(evil): color='GREEN' ext_in=[] ext_out=[] before=[] after=[] hcode='' def __init__(self,*k,**kw): self.hasrun=NOT_RUN try: self.generator=kw['generator'] except KeyError: self.generator=self def __repr__(self): return'\n\t{task %r: %s %s}'%(self.__class__.__name__,id(self),str(getattr(self,'fun',''))) def __str__(self): if hasattr(self,'fun'): return'executing: %s\n'%self.fun.__name__ return self.__class__.__name__+'\n' def __hash__(self): return id(self) def exec_command(self,cmd,**kw): bld=self.generator.bld try: if not kw.get('cwd',None): kw['cwd']=bld.cwd except AttributeError: bld.cwd=kw['cwd']=bld.variant_dir return bld.exec_command(cmd,**kw) def runnable_status(self): return RUN_ME def process(self): m=self.master if m.stop: m.out.put(self) return try: del self.generator.bld.task_sigs[self.uid()] except KeyError: pass try: self.generator.bld.returned_tasks.append(self) self.log_display(self.generator.bld) ret=self.run() except Exception: self.err_msg=Utils.ex_stack() self.hasrun=EXCEPTION m.error_handler(self) m.out.put(self) return if ret: self.err_code=ret self.hasrun=CRASHED else: try: self.post_run() except Errors.WafError: pass except Exception: self.err_msg=Utils.ex_stack() self.hasrun=EXCEPTION else: self.hasrun=SUCCESS if self.hasrun!=SUCCESS: m.error_handler(self) m.out.put(self) def run(self): if hasattr(self,'fun'): return self.fun(self) return 0 def post_run(self): pass def log_display(self,bld): bld.to_log(self.display()) def display(self): col1=Logs.colors(self.color) col2=Logs.colors.NORMAL master=self.master def cur(): tmp=-1 if hasattr(master,'ready'): tmp-=master.ready.qsize() return master.processed+tmp if self.generator.bld.progress_bar==1: return self.generator.bld.progress_line(cur(),master.total,col1,col2) if self.generator.bld.progress_bar==2: ela=str(self.generator.bld.timer) try: ins=','.join([n.name for n in self.inputs]) except AttributeError: ins='' try: outs=','.join([n.name for n in self.outputs]) except AttributeError: outs='' return'|Total %s|Current %s|Inputs %s|Outputs %s|Time %s|\n'%(master.total,cur(),ins,outs,ela) s=str(self) if not s: return None total=master.total n=len(str(total)) fs='[%%%dd/%%%dd] %%s%%s%%s'%(n,n) return fs%(cur(),total,col1,s,col2) def attr(self,att,default=None): ret=getattr(self,att,self) if ret is self:return getattr(self.__class__,att,default) return ret def hash_constraints(self): cls=self.__class__ tup=(str(cls.before),str(cls.after),str(cls.ext_in),str(cls.ext_out),cls.__name__,cls.hcode) h=hash(tup) return h def format_error(self): msg=getattr(self,'last_cmd','') name=getattr(self.generator,'name','') if getattr(self,"err_msg",None): return self.err_msg elif not self.hasrun: return'task in %r was not executed for some reason: %r'%(name,self) elif self.hasrun==CRASHED: try: return' -> task in %r failed (exit status %r): %r\n%r'%(name,self.err_code,self,msg) except AttributeError: return' -> task in %r failed: %r\n%r'%(name,self,msg) elif self.hasrun==MISSING: return' -> missing files in %r: %r\n%r'%(name,self,msg) else: return'invalid status for task in %r: %r'%(name,self.hasrun) def colon(self,var1,var2): tmp=self.env[var1] if isinstance(var2,str): it=self.env[var2] else: it=var2 if isinstance(tmp,str): return[tmp%x for x in it] else: if Logs.verbose and not tmp and it: Logs.warn('Missing env variable %r for task %r (generator %r)'%(var1,self,self.generator)) lst=[] for y in it: lst.extend(tmp) lst.append(y) return lst class Task(TaskBase): vars=[] shell=False def __init__(self,*k,**kw): TaskBase.__init__(self,*k,**kw) self.env=kw['env'] self.inputs=[] self.outputs=[] self.dep_nodes=[] self.run_after=set([]) def __str__(self): env=self.env src_str=' '.join([a.nice_path()for a in self.inputs]) tgt_str=' '.join([a.nice_path()for a in self.outputs]) if self.outputs:sep=' -> ' else:sep='' return'%s: %s%s%s\n'%(self.__class__.__name__.replace('_task',''),src_str,sep,tgt_str) def __repr__(self): try: ins=",".join([x.name for x in self.inputs]) outs=",".join([x.name for x in self.outputs]) except AttributeError: ins=",".join([str(x)for x in self.inputs]) outs=",".join([str(x)for x in self.outputs]) return"".join(['\n\t{task %r: '%id(self),self.__class__.__name__," ",ins," -> ",outs,'}']) def uid(self): try: return self.uid_ except AttributeError: m=Utils.md5() up=m.update up(self.__class__.__name__) for x in self.inputs+self.outputs: up(x.abspath()) self.uid_=m.digest() return self.uid_ def set_inputs(self,inp): if isinstance(inp,list):self.inputs+=inp else:self.inputs.append(inp) def set_outputs(self,out): if isinstance(out,list):self.outputs+=out else:self.outputs.append(out) def set_run_after(self,task): assert isinstance(task,TaskBase) self.run_after.add(task) def signature(self): try:return self.cache_sig except AttributeError:pass self.m=Utils.md5() self.m.update(self.hcode) self.sig_explicit_deps() self.sig_vars() if self.scan: try: self.sig_implicit_deps() except Errors.TaskRescan: return self.signature() ret=self.cache_sig=self.m.digest() return ret def runnable_status(self): for t in self.run_after: if not t.hasrun: return ASK_LATER bld=self.generator.bld try: new_sig=self.signature() except Errors.TaskNotReady: return ASK_LATER key=self.uid() try: prev_sig=bld.task_sigs[key] except KeyError: Logs.debug("task: task %r must run as it was never run before or the task code changed"%self) return RUN_ME for node in self.outputs: try: if node.sig!=new_sig: return RUN_ME except AttributeError: Logs.debug("task: task %r must run as the output nodes do not exist"%self) return RUN_ME if new_sig!=prev_sig: return RUN_ME return SKIP_ME def post_run(self): bld=self.generator.bld sig=self.signature() for node in self.outputs: try: os.stat(node.abspath()) except OSError: self.hasrun=MISSING self.err_msg='-> missing file: %r'%node.abspath() raise Errors.WafError(self.err_msg) node.sig=sig bld.task_sigs[self.uid()]=self.cache_sig def sig_explicit_deps(self): bld=self.generator.bld upd=self.m.update for x in self.inputs+self.dep_nodes: try: upd(x.get_bld_sig()) except(AttributeError,TypeError): raise Errors.WafError('Missing node signature for %r (required by %r)'%(x,self)) if bld.deps_man: additional_deps=bld.deps_man for x in self.inputs+self.outputs: try: d=additional_deps[id(x)] except KeyError: continue for v in d: if isinstance(v,bld.root.__class__): try: v=v.get_bld_sig() except AttributeError: raise Errors.WafError('Missing node signature for %r (required by %r)'%(v,self)) elif hasattr(v,'__call__'): v=v() upd(v) return self.m.digest() def sig_vars(self): bld=self.generator.bld env=self.env upd=self.m.update act_sig=bld.hash_env_vars(env,self.__class__.vars) upd(act_sig) dep_vars=getattr(self,'dep_vars',None) if dep_vars: upd(bld.hash_env_vars(env,dep_vars)) return self.m.digest() scan=None def sig_implicit_deps(self): bld=self.generator.bld key=self.uid() prev=bld.task_sigs.get((key,'imp'),[]) if prev: try: if prev==self.compute_sig_implicit_deps(): return prev except Exception: for x in bld.node_deps.get(self.uid(),[]): if x.is_child_of(bld.srcnode): try: os.stat(x.abspath()) except OSError: try: del x.parent.children[x.name] except KeyError: pass del bld.task_sigs[(key,'imp')] raise Errors.TaskRescan('rescan') (nodes,names)=self.scan() if Logs.verbose: Logs.debug('deps: scanner for %s returned %s %s'%(str(self),str(nodes),str(names))) bld.node_deps[key]=nodes bld.raw_deps[key]=names self.are_implicit_nodes_ready() try: bld.task_sigs[(key,'imp')]=sig=self.compute_sig_implicit_deps() except Exception: if Logs.verbose: for k in bld.node_deps.get(self.uid(),[]): try: k.get_bld_sig() except Exception: Logs.warn('Missing signature for node %r (may cause rebuilds)'%k) else: return sig def compute_sig_implicit_deps(self): upd=self.m.update bld=self.generator.bld self.are_implicit_nodes_ready() for k in bld.node_deps.get(self.uid(),[]): upd(k.get_bld_sig()) return self.m.digest() def are_implicit_nodes_ready(self): bld=self.generator.bld try: cache=bld.dct_implicit_nodes except AttributeError: bld.dct_implicit_nodes=cache={} try: dct=cache[bld.cur] except KeyError: dct=cache[bld.cur]={} for tsk in bld.cur_tasks: for x in tsk.outputs: dct[x]=tsk modified=False for x in bld.node_deps.get(self.uid(),[]): if x in dct: self.run_after.add(dct[x]) modified=True if modified: for tsk in self.run_after: if not tsk.hasrun: raise Errors.TaskNotReady('not ready') def can_retrieve_cache(self): if not getattr(self,'outputs',None): return None sig=self.signature() ssig=Utils.to_hex(self.uid())+Utils.to_hex(sig) dname=os.path.join(self.generator.bld.cache_global,ssig) try: t1=os.stat(dname).st_mtime except OSError: return None for node in self.outputs: orig=os.path.join(dname,node.name) try: shutil.copy2(orig,node.abspath()) os.utime(orig,None) except(OSError,IOError): Logs.debug('task: failed retrieving file') return None try: t2=os.stat(dname).st_mtime except OSError: return None if t1!=t2: return None for node in self.outputs: node.sig=sig if self.generator.bld.progress_bar<1: self.generator.bld.to_log('restoring from cache %r\n'%node.abspath()) self.cached=True return True def put_files_cache(self): if getattr(self,'cached',None): return None if not getattr(self,'outputs',None): return None sig=self.signature() ssig=Utils.to_hex(self.uid())+Utils.to_hex(sig) dname=os.path.join(self.generator.bld.cache_global,ssig) tmpdir=tempfile.mkdtemp(prefix=self.generator.bld.cache_global+os.sep+'waf') try: shutil.rmtree(dname) except Exception: pass try: for node in self.outputs: dest=os.path.join(tmpdir,node.name) shutil.copy2(node.abspath(),dest) except(OSError,IOError): try: shutil.rmtree(tmpdir) except Exception: pass else: try: os.rename(tmpdir,dname) except OSError: try: shutil.rmtree(tmpdir) except Exception: pass else: try: os.chmod(dname,Utils.O755) except Exception: pass def is_before(t1,t2): to_list=Utils.to_list for k in to_list(t2.ext_in): if k in to_list(t1.ext_out): return 1 if t1.__class__.__name__ in to_list(t2.after): return 1 if t2.__class__.__name__ in to_list(t1.before): return 1 return 0 def set_file_constraints(tasks): ins=Utils.defaultdict(set) outs=Utils.defaultdict(set) for x in tasks: for a in getattr(x,'inputs',[])+getattr(x,'dep_nodes',[]): ins[id(a)].add(x) for a in getattr(x,'outputs',[]): outs[id(a)].add(x) links=set(ins.keys()).intersection(outs.keys()) for k in links: for a in ins[k]: a.run_after.update(outs[k]) def set_precedence_constraints(tasks): cstr_groups=Utils.defaultdict(list) for x in tasks: h=x.hash_constraints() cstr_groups[h].append(x) keys=list(cstr_groups.keys()) maxi=len(keys) for i in range(maxi): t1=cstr_groups[keys[i]][0] for j in range(i+1,maxi): t2=cstr_groups[keys[j]][0] if is_before(t1,t2): a=i b=j elif is_before(t2,t1): a=j b=i else: continue aval=set(cstr_groups[keys[a]]) for x in cstr_groups[keys[b]]: x.run_after.update(aval) def funex(c): dc={} exec(c,dc) return dc['f'] reg_act=re.compile(r"(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})",re.M) def compile_fun_shell(line): extr=[] def repl(match): g=match.group if g('dollar'):return"$" elif g('backslash'):return'\\\\' elif g('subst'):extr.append((g('var'),g('code')));return"%s" return None line=reg_act.sub(repl,line)or line parm=[] dvars=[] app=parm.append for(var,meth)in extr: if var=='SRC': if meth:app('tsk.inputs%s'%meth) else:app('" ".join([a.path_from(bld.bldnode) for a in tsk.inputs])') elif var=='TGT': if meth:app('tsk.outputs%s'%meth) else:app('" ".join([a.path_from(bld.bldnode) for a in tsk.outputs])') elif meth: if meth.startswith(':'): m=meth[1:] if m=='SRC': m='[a.path_from(bld.bldnode) for a in tsk.inputs]' elif m=='TGT': m='[a.path_from(bld.bldnode) for a in tsk.outputs]' elif m[:3]not in('tsk','gen','bld'): dvars.extend([var,meth[1:]]) m='%r'%m app('" ".join(tsk.colon(%r, %s))'%(var,m)) else: app('%s%s'%(var,meth)) else: if not var in dvars:dvars.append(var) app("p('%s')"%var) if parm:parm="%% (%s) "%(',\n\t\t'.join(parm)) else:parm='' c=COMPILE_TEMPLATE_SHELL%(line,parm) Logs.debug('action: %s'%c.strip().splitlines()) return(funex(c),dvars) def compile_fun_noshell(line): extr=[] def repl(match): g=match.group if g('dollar'):return"$" elif g('subst'):extr.append((g('var'),g('code')));return"<<|@|>>" return None line2=reg_act.sub(repl,line) params=line2.split('<<|@|>>') assert(extr) buf=[] dvars=[] app=buf.append for x in range(len(extr)): params[x]=params[x].strip() if params[x]: app("lst.extend(%r)"%params[x].split()) (var,meth)=extr[x] if var=='SRC': if meth:app('lst.append(tsk.inputs%s)'%meth) else:app("lst.extend([a.path_from(bld.bldnode) for a in tsk.inputs])") elif var=='TGT': if meth:app('lst.append(tsk.outputs%s)'%meth) else:app("lst.extend([a.path_from(bld.bldnode) for a in tsk.outputs])") elif meth: if meth.startswith(':'): m=meth[1:] if m=='SRC': m='[a.path_from(bld.bldnode) for a in tsk.inputs]' elif m=='TGT': m='[a.path_from(bld.bldnode) for a in tsk.outputs]' elif m[:3]not in('tsk','gen','bld'): dvars.extend([var,m]) m='%r'%m app('lst.extend(tsk.colon(%r, %s))'%(var,m)) else: app('lst.extend(gen.to_list(%s%s))'%(var,meth)) else: app('lst.extend(to_list(env[%r]))'%var) if not var in dvars:dvars.append(var) if extr: if params[-1]: app("lst.extend(%r)"%params[-1].split()) fun=COMPILE_TEMPLATE_NOSHELL%"\n\t".join(buf) Logs.debug('action: %s'%fun.strip().splitlines()) return(funex(fun),dvars) def compile_fun(line,shell=False): if line.find('<')>0 or line.find('>')>0 or line.find('&&')>0: shell=True if shell: return compile_fun_shell(line) else: return compile_fun_noshell(line) def task_factory(name,func=None,vars=None,color='GREEN',ext_in=[],ext_out=[],before=[],after=[],shell=False,scan=None): params={'vars':vars or[],'color':color,'name':name,'ext_in':Utils.to_list(ext_in),'ext_out':Utils.to_list(ext_out),'before':Utils.to_list(before),'after':Utils.to_list(after),'shell':shell,'scan':scan,} if isinstance(func,str): params['run_str']=func else: params['run']=func cls=type(Task)(name,(Task,),params) global classes classes[name]=cls return cls def always_run(cls): old=cls.runnable_status def always(self): ret=old(self) if ret==SKIP_ME: ret=RUN_ME return ret cls.runnable_status=always return cls def update_outputs(cls): old_post_run=cls.post_run def post_run(self): old_post_run(self) for node in self.outputs: node.sig=Utils.h_file(node.abspath()) self.generator.bld.task_sigs[node.abspath()]=self.uid() cls.post_run=post_run old_runnable_status=cls.runnable_status def runnable_status(self): status=old_runnable_status(self) if status!=RUN_ME: return status try: bld=self.generator.bld prev_sig=bld.task_sigs[self.uid()] if prev_sig==self.signature(): for x in self.outputs: if not x.sig or bld.task_sigs[x.abspath()]!=self.uid(): return RUN_ME return SKIP_ME except KeyError: pass except IndexError: pass except AttributeError: pass return RUN_ME cls.runnable_status=runnable_status return cls