repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
hortonworks/hortonworks-sandbox
|
refs/heads/master
|
desktop/core/ext-py/Twisted/twisted/words/tap.py
|
54
|
# -*- test-case-name: twisted.words.test.test_tap -*-
# Copyright (c) 2001-2005 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Shiny new words service maker
"""
import sys, socket
from twisted.application import strports
from twisted.application.service import MultiService
from twisted.python import usage
from twisted import plugin
from twisted.words import iwords, service
from twisted.cred import checkers, credentials, portal, strcred
class Options(usage.Options, strcred.AuthOptionMixin):
supportedInterfaces = [credentials.IUsernamePassword]
optParameters = [
('hostname', None, socket.gethostname(),
'Name of this server; purely an informative')]
interfacePlugins = {}
plg = None
for plg in plugin.getPlugins(iwords.IProtocolPlugin):
assert plg.name not in interfacePlugins
interfacePlugins[plg.name] = plg
optParameters.append((
plg.name + '-port',
None, None,
'strports description of the port to bind for the ' + plg.name + ' server'))
del plg
def __init__(self, *a, **kw):
usage.Options.__init__(self, *a, **kw)
self['groups'] = []
def opt_group(self, name):
"""Specify a group which should exist
"""
self['groups'].append(name.decode(sys.stdin.encoding))
def opt_passwd(self, filename):
"""
Name of a passwd-style file. (This is for
backwards-compatibility only; you should use the --auth
command instead.)
"""
self.addChecker(checkers.FilePasswordDB(filename))
def makeService(config):
credCheckers = config.get('credCheckers', [])
wordsRealm = service.InMemoryWordsRealm(config['hostname'])
wordsPortal = portal.Portal(wordsRealm, credCheckers)
msvc = MultiService()
# XXX Attribute lookup on config is kind of bad - hrm.
for plgName in config.interfacePlugins:
port = config.get(plgName + '-port')
if port is not None:
factory = config.interfacePlugins[plgName].getFactory(wordsRealm, wordsPortal)
svc = strports.service(port, factory)
svc.setServiceParent(msvc)
# This is bogus. createGroup is async. makeService must be
# allowed to return a Deferred or some crap.
for g in config['groups']:
wordsRealm.createGroup(g)
return msvc
|
ctlajoie/ansible-modules-core
|
refs/heads/devel
|
utilities/logic/set_fact.py
|
115
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
author: "Dag Wieers (@dagwieers)"
module: set_fact
short_description: Set host facts from a task
description:
- This module allows setting new variables. Variables are set on a host-by-host basis
just like facts discovered by the setup module.
- These variables will survive between plays.
options:
key_value:
description:
- The C(set_fact) module takes key=value pairs as variables to set
in the playbook scope. Or alternatively, accepts complex arguments
using the C(args:) statement.
required: true
default: null
version_added: "1.2"
'''
EXAMPLES = '''
# Example setting host facts using key=value pairs
- set_fact: one_fact="something" other_fact="{{ local_var }}"
# Example setting host facts using complex arguments
- set_fact:
one_fact: something
other_fact: "{{ local_var * 2 }}"
another_fact: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
# As of 1.8, Ansible will convert boolean strings ('true', 'false', 'yes', 'no')
# to proper boolean values when using the key=value syntax, however it is still
# recommended that booleans be set using the complex argument style:
- set_fact:
one_fact: true
other_fact: false
'''
|
msmolens/VTK
|
refs/heads/slicer-v6.3.0-2015-07-21-426987d
|
Examples/Rendering/Python/TPlane.py
|
42
|
#!/usr/bin/env python
# This simple example shows how to do basic texture mapping.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Load in the texture map. A texture is any unsigned char image. If it
# is not of this type, you will have to map it through a lookup table
# or by using vtkImageShiftScale.
bmpReader = vtk.vtkBMPReader()
bmpReader.SetFileName(VTK_DATA_ROOT + "/Data/masonry.bmp")
atext = vtk.vtkTexture()
atext.SetInputConnection(bmpReader.GetOutputPort())
atext.InterpolateOn()
# Create a plane source and actor. The vtkPlanesSource generates
# texture coordinates.
plane = vtk.vtkPlaneSource()
planeMapper = vtk.vtkPolyDataMapper()
planeMapper.SetInputConnection(plane.GetOutputPort())
planeActor = vtk.vtkActor()
planeActor.SetMapper(planeMapper)
planeActor.SetTexture(atext)
# Create the RenderWindow, Renderer and both Actors
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(planeActor)
ren.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(500, 500)
ren.ResetCamera()
cam1 = ren.GetActiveCamera()
cam1.Elevation(-30)
cam1.Roll(-20)
ren.ResetCameraClippingRange()
iren.Initialize()
renWin.Render()
iren.Start()
|
mayank-johri/LearnSeleniumUsingPython
|
refs/heads/master
|
Section 2 - Advance Python/Chapter S2.11 Multiprocessing and Threading/code/1_threading.py
|
2
|
import threading
def worker():
"""thread worker function"""
print ('> Worker')
return
threads = []
for i in range(5):
t = threading.Thread(target=worker)
threads.append(t)
t.start()
|
shermanng10/superathletebuilder
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/conf/locale/bg/formats.py
|
619
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' ' # Non-breaking space
# NUMBER_GROUPING =
|
shams169/pythonProject
|
refs/heads/master
|
env/lib/python3.6/site-packages/pip/_vendor/requests/packages/chardet/universaldetector.py
|
1775
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
|
ciraxwe/cherrypy-app-engine
|
refs/heads/master
|
cherrypy/test/test_sessionauthenticate.py
|
22
|
import cherrypy
from cherrypy.test import helper
class SessionAuthenticateTest(helper.CPWebCase):
def setup_server():
def check(username, password):
# Dummy check_username_and_password function
if username != 'test' or password != 'password':
return 'Wrong login/password'
def augment_params():
# A simple tool to add some things to request.params
# This is to check to make sure that session_auth can handle
# request params (ticket #780)
cherrypy.request.params["test"] = "test"
cherrypy.tools.augment_params = cherrypy.Tool(
'before_handler', augment_params, None, priority=30)
class Test:
_cp_config = {
'tools.sessions.on': True,
'tools.session_auth.on': True,
'tools.session_auth.check_username_and_password': check,
'tools.augment_params.on': True,
}
def index(self, **kwargs):
return "Hi %s, you are logged in" % cherrypy.request.login
index.exposed = True
cherrypy.tree.mount(Test())
setup_server = staticmethod(setup_server)
def testSessionAuthenticate(self):
# request a page and check for login form
self.getPage('/')
self.assertInBody('<form method="post" action="do_login">')
# setup credentials
login_body = 'username=test&password=password&from_page=/'
# attempt a login
self.getPage('/do_login', method='POST', body=login_body)
self.assertStatus((302, 303))
# get the page now that we are logged in
self.getPage('/', self.cookies)
self.assertBody('Hi test, you are logged in')
# do a logout
self.getPage('/do_logout', self.cookies, method='POST')
self.assertStatus((302, 303))
# verify we are logged out
self.getPage('/', self.cookies)
self.assertInBody('<form method="post" action="do_login">')
|
40223112/2015cd_midterm
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/_sre.py
|
622
|
# NOT_RPYTHON
"""
A pure Python reimplementation of the _sre module from CPython 2.4
Copyright 2005 Nik Haldimann, licensed under the MIT license
This code is based on material licensed under CNRI's Python 1.6 license and
copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB
"""
MAXREPEAT = 2147483648
#import array
import operator, sys
from sre_constants import ATCODES, OPCODES, CHCODES
from sre_constants import SRE_INFO_PREFIX, SRE_INFO_LITERAL
from sre_constants import SRE_FLAG_UNICODE, SRE_FLAG_LOCALE
import sys
# Identifying as _sre from Python 2.3 or 2.4
#if sys.version_info[:2] >= (2, 4):
MAGIC = 20031017
#else:
# MAGIC = 20030419
# In _sre.c this is bytesize of the code word type of the C implementation.
# There it's 2 for normal Python builds and more for wide unicode builds (large
# enough to hold a 32-bit UCS-4 encoded character). Since here in pure Python
# we only see re bytecodes as Python longs, we shouldn't have to care about the
# codesize. But sre_compile will compile some stuff differently depending on the
# codesize (e.g., charsets).
# starting with python 3.3 CODESIZE is 4
#if sys.maxunicode == 65535:
# CODESIZE = 2
#else:
CODESIZE = 4
copyright = "_sre.py 2.4c Copyright 2005 by Nik Haldimann"
def getcodesize():
return CODESIZE
def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
"""Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern
object. Actual compilation to opcodes happens in sre_compile."""
return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup)
def getlower(char_ord, flags):
if (char_ord < 128) or (flags & SRE_FLAG_UNICODE) \
or (flags & SRE_FLAG_LOCALE and char_ord < 256):
#return ord(unichr(char_ord).lower())
return ord(chr(char_ord).lower())
else:
return char_ord
class SRE_Pattern:
def __init__(self, pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
self.pattern = pattern
self.flags = flags
self.groups = groups
self.groupindex = groupindex # Maps group names to group indices
self._indexgroup = indexgroup # Maps indices to group names
self._code = code
def match(self, string, pos=0, endpos=sys.maxsize):
"""If zero or more characters at the beginning of string match this
regular expression, return a corresponding MatchObject instance. Return
None if the string does not match the pattern."""
state = _State(string, pos, endpos, self.flags)
if state.match(self._code):
return SRE_Match(self, state)
return None
def search(self, string, pos=0, endpos=sys.maxsize):
"""Scan through string looking for a location where this regular
expression produces a match, and return a corresponding MatchObject
instance. Return None if no position in the string matches the
pattern."""
state = _State(string, pos, endpos, self.flags)
if state.search(self._code):
return SRE_Match(self, state)
else:
return None
def findall(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
matchlist = []
state = _State(string, pos, endpos, self.flags)
while state.start <= state.end:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
match = SRE_Match(self, state)
if self.groups == 0 or self.groups == 1:
item = match.group(self.groups)
else:
item = match.groups("")
matchlist.append(item)
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return matchlist
def _subx(self, template, string, count=0, subn=False):
filter = template
if not callable(template) and "\\" in template:
# handle non-literal strings ; hand it over to the template compiler
#import sre #sre was renamed to re
#fix me brython
#print("possible issue at _sre.py line 116")
import re as sre
filter = sre._subx(self, template)
state = _State(string, 0, sys.maxsize, self.flags)
sublist = []
n = last_pos = 0
while not count or n < count:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if last_pos < state.start:
sublist.append(string[last_pos:state.start])
if not (last_pos == state.start and
last_pos == state.string_position and n > 0):
# the above ignores empty matches on latest position
if callable(filter):
sublist.append(filter(SRE_Match(self, state)))
else:
sublist.append(filter)
last_pos = state.string_position
n += 1
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
if last_pos < state.end:
sublist.append(string[last_pos:state.end])
item = "".join(sublist)
if subn:
return item, n
else:
return item
def sub(self, repl, string, count=0):
"""Return the string obtained by replacing the leftmost non-overlapping
occurrences of pattern in string by the replacement repl."""
return self._subx(repl, string, count, False)
def subn(self, repl, string, count=0):
"""Return the tuple (new_string, number_of_subs_made) found by replacing
the leftmost non-overlapping occurrences of pattern with the replacement
repl."""
return self._subx(repl, string, count, True)
def split(self, string, maxsplit=0):
"""Split string by the occurrences of pattern."""
splitlist = []
state = _State(string, 0, sys.maxsize, self.flags)
n = 0
last = state.start
while not maxsplit or n < maxsplit:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if state.start == state.string_position: # zero-width match
if last == state.end: # or end of string
break
state.start += 1
continue
splitlist.append(string[last:state.start])
# add groups (if any)
if self.groups:
match = SRE_Match(self, state)
splitlist.extend(list(match.groups(None)))
n += 1
last = state.start = state.string_position
splitlist.append(string[last:state.end])
return splitlist
def finditer(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
#scanner = self.scanner(string, pos, endpos)
_list=[]
_m=self.scanner(string, pos, endpos)
_re=SRE_Scanner(self, string, pos, endpos)
_m=_re.search()
while _m:
_list.append(_m)
_m=_re.search()
return _list
#return iter(scanner.search, None)
def scanner(self, string, start=0, end=sys.maxsize):
return SRE_Scanner(self, string, start, end)
def __copy__(self):
raise TypeError("cannot copy this pattern object")
def __deepcopy__(self):
raise TypeError("cannot copy this pattern object")
class SRE_Scanner:
"""Undocumented scanner interface of sre."""
def __init__(self, pattern, string, start, end):
self.pattern = pattern
self._state = _State(string, start, end, self.pattern.flags)
def _match_search(self, matcher):
state = self._state
state.reset()
state.string_position = state.start
match = None
if matcher(self.pattern._code):
match = SRE_Match(self.pattern, state)
if match is None or state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return match
def match(self):
return self._match_search(self._state.match)
def search(self):
return self._match_search(self._state.search)
class SRE_Match:
def __init__(self, pattern, state):
self.re = pattern
self.string = state.string
self.pos = state.pos
self.endpos = state.end
self.lastindex = state.lastindex
if self.lastindex < 0:
self.lastindex = None
self.regs = self._create_regs(state)
#statement below is not valid under python3 ( 0 <= None)
#if pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
if self.lastindex is not None and pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
# The above upper-bound check should not be necessary, as the re
# compiler is supposed to always provide an _indexgroup list long
# enough. But the re.Scanner class seems to screw up something
# there, test_scanner in test_re won't work without upper-bound
# checking. XXX investigate this and report bug to CPython.
self.lastgroup = pattern._indexgroup[self.lastindex]
else:
self.lastgroup = None
def _create_regs(self, state):
"""Creates a tuple of index pairs representing matched groups."""
regs = [(state.start, state.string_position)]
for group in range(self.re.groups):
mark_index = 2 * group
if mark_index + 1 < len(state.marks) \
and state.marks[mark_index] is not None \
and state.marks[mark_index + 1] is not None:
regs.append((state.marks[mark_index], state.marks[mark_index + 1]))
else:
regs.append((-1, -1))
return tuple(regs)
def _get_index(self, group):
if isinstance(group, int):
if group >= 0 and group <= self.re.groups:
return group
else:
if group in self.re.groupindex:
return self.re.groupindex[group]
raise IndexError("no such group")
def _get_slice(self, group, default):
group_indices = self.regs[group]
if group_indices[0] >= 0:
return self.string[group_indices[0]:group_indices[1]]
else:
return default
def start(self, group=0):
"""Returns the indices of the start of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][0]
def end(self, group=0):
"""Returns the indices of the end of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][1]
def span(self, group=0):
"""Returns the 2-tuple (m.start(group), m.end(group))."""
return self.start(group), self.end(group)
def expand(self, template):
"""Return the string obtained by doing backslash substitution and
resolving group references on template."""
import sre
return sre._expand(self.re, self, template)
def groups(self, default=None):
"""Returns a tuple containing all the subgroups of the match. The
default argument is used for groups that did not participate in the
match (defaults to None)."""
groups = []
for indices in self.regs[1:]:
if indices[0] >= 0:
groups.append(self.string[indices[0]:indices[1]])
else:
groups.append(default)
return tuple(groups)
def groupdict(self, default=None):
"""Return a dictionary containing all the named subgroups of the match.
The default argument is used for groups that did not participate in the
match (defaults to None)."""
groupdict = {}
for key, value in self.re.groupindex.items():
groupdict[key] = self._get_slice(value, default)
return groupdict
def group(self, *args):
"""Returns one or more subgroups of the match. Each argument is either a
group index or a group name."""
if len(args) == 0:
args = (0,)
grouplist = []
for group in args:
grouplist.append(self._get_slice(self._get_index(group), None))
if len(grouplist) == 1:
return grouplist[0]
else:
return tuple(grouplist)
def __copy__():
raise TypeError("cannot copy this pattern object")
def __deepcopy__():
raise TypeError("cannot copy this pattern object")
class _State:
def __init__(self, string, start, end, flags):
self.string = string
if start < 0:
start = 0
if end > len(string):
end = len(string)
self.start = start
self.string_position = self.start
self.end = end
self.pos = start
self.flags = flags
self.reset()
def reset(self):
self.marks = []
self.lastindex = -1
self.marks_stack = []
self.context_stack = []
self.repeat = None
def match(self, pattern_codes):
# Optimization: Check string length. pattern_codes[3] contains the
# minimum length for a string to possibly match.
# brython.. the optimization doesn't work
#if pattern_codes[0] == OPCODES["info"] and pattern_codes[3]:
# if self.end - self.string_position < pattern_codes[3]:
# #_log("reject (got %d chars, need %d)"
# # % (self.end - self.string_position, pattern_codes[3]))
# return False
dispatcher = _OpcodeDispatcher()
self.context_stack.append(_MatchContext(self, pattern_codes))
has_matched = None
while len(self.context_stack) > 0:
context = self.context_stack[-1]
has_matched = dispatcher.match(context)
if has_matched is not None: # don't pop if context isn't done
self.context_stack.pop()
return has_matched
def search(self, pattern_codes):
flags = 0
if pattern_codes[0] == OPCODES["info"]:
# optimization info block
# <INFO> <1=skip> <2=flags> <3=min> <4=max> <5=prefix info>
if pattern_codes[2] & SRE_INFO_PREFIX and pattern_codes[5] > 1:
return self.fast_search(pattern_codes)
flags = pattern_codes[2]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
string_position = self.start
if pattern_codes[0] == OPCODES["literal"]:
# Special case: Pattern starts with a literal character. This is
# used for short prefixes
character = pattern_codes[1]
while True:
while string_position < self.end \
and ord(self.string[string_position]) != character:
string_position += 1
if string_position >= self.end:
return False
self.start = string_position
string_position += 1
self.string_position = string_position
if flags & SRE_INFO_LITERAL:
return True
if self.match(pattern_codes[2:]):
return True
return False
# General case
while string_position <= self.end:
self.reset()
self.start = self.string_position = string_position
if self.match(pattern_codes):
return True
string_position += 1
return False
def fast_search(self, pattern_codes):
"""Skips forward in a string as fast as possible using information from
an optimization info block."""
# pattern starts with a known prefix
# <5=length> <6=skip> <7=prefix data> <overlap data>
flags = pattern_codes[2]
prefix_len = pattern_codes[5]
prefix_skip = pattern_codes[6] # don't really know what this is good for
prefix = pattern_codes[7:7 + prefix_len]
overlap = pattern_codes[7 + prefix_len - 1:pattern_codes[1] + 1]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
i = 0
string_position = self.string_position
while string_position < self.end:
while True:
if ord(self.string[string_position]) != prefix[i]:
if i == 0:
break
else:
i = overlap[i]
else:
i += 1
if i == prefix_len:
# found a potential match
self.start = string_position + 1 - prefix_len
self.string_position = string_position + 1 \
- prefix_len + prefix_skip
if flags & SRE_INFO_LITERAL:
return True # matched all of pure literal pattern
if self.match(pattern_codes[2 * prefix_skip:]):
return True
i = overlap[i]
break
string_position += 1
return False
def set_mark(self, mark_nr, position):
if mark_nr & 1:
# This id marks the end of a group.
# fix python 3 division incompatability
#self.lastindex = mark_nr / 2 + 1
self.lastindex = mark_nr // 2 + 1
if mark_nr >= len(self.marks):
self.marks.extend([None] * (mark_nr - len(self.marks) + 1))
self.marks[mark_nr] = position
def get_marks(self, group_index):
marks_index = 2 * group_index
if len(self.marks) > marks_index + 1:
return self.marks[marks_index], self.marks[marks_index + 1]
else:
return None, None
def marks_push(self):
self.marks_stack.append((self.marks[:], self.lastindex))
def marks_pop(self):
self.marks, self.lastindex = self.marks_stack.pop()
def marks_pop_keep(self):
self.marks, self.lastindex = self.marks_stack[-1]
def marks_pop_discard(self):
self.marks_stack.pop()
def lower(self, char_ord):
return getlower(char_ord, self.flags)
class _MatchContext:
def __init__(self, state, pattern_codes):
self.state = state
self.pattern_codes = pattern_codes
self.string_position = state.string_position
self.code_position = 0
self.has_matched = None
def push_new_context(self, pattern_offset):
"""Creates a new child context of this context and pushes it on the
stack. pattern_offset is the offset off the current code position to
start interpreting from."""
child_context = _MatchContext(self.state,
self.pattern_codes[self.code_position + pattern_offset:])
#print("_sre.py:517:pushing new context") #, child_context.has_matched)
#print(self.state.string_position)
#print(self.pattern_codes[self.code_position + pattern_offset:])
#print(pattern_offset)
self.state.context_stack.append(child_context)
return child_context
def peek_char(self, peek=0):
return self.state.string[self.string_position + peek]
def skip_char(self, skip_count):
self.string_position += skip_count
def remaining_chars(self):
return self.state.end - self.string_position
def peek_code(self, peek=0):
return self.pattern_codes[self.code_position + peek]
def skip_code(self, skip_count):
self.code_position += skip_count
def remaining_codes(self):
return len(self.pattern_codes) - self.code_position
def at_beginning(self):
return self.string_position == 0
def at_end(self):
return self.string_position == self.state.end
def at_linebreak(self):
return not self.at_end() and _is_linebreak(self.peek_char())
def at_boundary(self, word_checker):
if self.at_beginning() and self.at_end():
return False
that = not self.at_beginning() and word_checker(self.peek_char(-1))
this = not self.at_end() and word_checker(self.peek_char())
return this != that
class _RepeatContext(_MatchContext):
def __init__(self, context):
_MatchContext.__init__(self, context.state,
context.pattern_codes[context.code_position:])
self.count = -1
#print('569:repeat', context.state.repeat)
self.previous = context.state.repeat
self.last_position = None
class _Dispatcher:
DISPATCH_TABLE = None
def dispatch(self, code, context):
method = self.DISPATCH_TABLE.get(code, self.__class__.unknown)
return method(self, context)
def unknown(self, code, ctx):
raise NotImplementedError()
def build_dispatch_table(cls, code_dict, method_prefix):
if cls.DISPATCH_TABLE is not None:
return
table = {}
for key, value in code_dict.items():
if hasattr(cls, "%s%s" % (method_prefix, key)):
table[value] = getattr(cls, "%s%s" % (method_prefix, key))
cls.DISPATCH_TABLE = table
build_dispatch_table = classmethod(build_dispatch_table)
class _OpcodeDispatcher(_Dispatcher):
def __init__(self):
self.executing_contexts = {}
self.at_dispatcher = _AtcodeDispatcher()
self.ch_dispatcher = _ChcodeDispatcher()
self.set_dispatcher = _CharsetDispatcher()
def match(self, context):
"""Returns True if the current context matches, False if it doesn't and
None if matching is not finished, ie must be resumed after child
contexts have been matched."""
while context.remaining_codes() > 0 and context.has_matched is None:
opcode = context.peek_code()
if not self.dispatch(opcode, context):
return None
if context.has_matched is None:
context.has_matched = False
return context.has_matched
def dispatch(self, opcode, context):
"""Dispatches a context on a given opcode. Returns True if the context
is done matching, False if it must be resumed when next encountered."""
#if self.executing_contexts.has_key(id(context)):
if id(context) in self.executing_contexts:
generator = self.executing_contexts[id(context)]
del self.executing_contexts[id(context)]
has_finished = next(generator)
else:
method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown)
has_finished = method(self, context)
if hasattr(has_finished, "__next__"): # avoid using the types module
generator = has_finished
has_finished = next(generator)
if not has_finished:
self.executing_contexts[id(context)] = generator
return has_finished
def op_success(self, ctx):
# end of pattern
#self._log(ctx, "SUCCESS")
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
return True
def op_failure(self, ctx):
# immediate failure
#self._log(ctx, "FAILURE")
ctx.has_matched = False
return True
def general_op_literal(self, ctx, compare, decorate=lambda x: x):
#print(ctx.peek_char())
if ctx.at_end() or not compare(decorate(ord(ctx.peek_char())),
decorate(ctx.peek_code(1))):
ctx.has_matched = False
ctx.skip_code(2)
ctx.skip_char(1)
def op_literal(self, ctx):
# match literal string
# <LITERAL> <code>
#self._log(ctx, "LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq)
return True
def op_not_literal(self, ctx):
# match anything that is not the given literal character
# <NOT_LITERAL> <code>
#self._log(ctx, "NOT_LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne)
return True
def op_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq, ctx.state.lower)
return True
def op_not_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne, ctx.state.lower)
return True
def op_at(self, ctx):
# match at given position
# <AT> <code>
#self._log(ctx, "AT", ctx.peek_code(1))
if not self.at_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line693, update context.has_matched variable')
return True
ctx.skip_code(2)
return True
def op_category(self, ctx):
# match at given category
# <CATEGORY> <code>
#self._log(ctx, "CATEGORY", ctx.peek_code(1))
if ctx.at_end() or not self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line703, update context.has_matched variable')
return True
ctx.skip_code(2)
ctx.skip_char(1)
return True
def op_any(self, ctx):
# match anything (except a newline)
# <ANY>
#self._log(ctx, "ANY")
if ctx.at_end() or ctx.at_linebreak():
ctx.has_matched = False
#print('_sre.py:line714, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def op_any_all(self, ctx):
# match anything
# <ANY_ALL>
#self._log(ctx, "ANY_ALL")
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line725, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def general_op_in(self, ctx, decorate=lambda x: x):
#self._log(ctx, "OP_IN")
#print('general_op_in')
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line734, update context.has_matched variable')
return
skip = ctx.peek_code(1)
ctx.skip_code(2) # set op pointer to the set code
#print(ctx.peek_char(), ord(ctx.peek_char()),
# decorate(ord(ctx.peek_char())))
if not self.check_charset(ctx, decorate(ord(ctx.peek_char()))):
#print('_sre.py:line738, update context.has_matched variable')
ctx.has_matched = False
return
ctx.skip_code(skip - 1)
ctx.skip_char(1)
#print('end:general_op_in')
def op_in(self, ctx):
# match set member (or non_member)
# <IN> <skip> <set>
#self._log(ctx, "OP_IN")
self.general_op_in(ctx)
return True
def op_in_ignore(self, ctx):
# match set member (or non_member), disregarding case of current char
# <IN_IGNORE> <skip> <set>
#self._log(ctx, "OP_IN_IGNORE")
self.general_op_in(ctx, ctx.state.lower)
return True
def op_jump(self, ctx):
# jump forward
# <JUMP> <offset>
#self._log(ctx, "JUMP", ctx.peek_code(1))
ctx.skip_code(ctx.peek_code(1) + 1)
return True
# skip info
# <INFO> <skip>
op_info = op_jump
def op_mark(self, ctx):
# set mark
# <MARK> <gid>
#self._log(ctx, "OP_MARK", ctx.peek_code(1))
ctx.state.set_mark(ctx.peek_code(1), ctx.string_position)
ctx.skip_code(2)
return True
def op_branch(self, ctx):
# alternation
# <BRANCH> <0=skip> code <JUMP> ... <NULL>
#self._log(ctx, "BRANCH")
ctx.state.marks_push()
ctx.skip_code(1)
current_branch_length = ctx.peek_code(0)
while current_branch_length:
# The following tries to shortcut branches starting with a
# (unmatched) literal. _sre.c also shortcuts charsets here.
if not (ctx.peek_code(1) == OPCODES["literal"] and \
(ctx.at_end() or ctx.peek_code(2) != ord(ctx.peek_char()))):
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(1)
#print("_sre.py:803:op_branch")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.marks_pop_keep()
ctx.skip_code(current_branch_length)
current_branch_length = ctx.peek_code(0)
ctx.state.marks_pop_discard()
ctx.has_matched = False
#print('_sre.py:line805, update context.has_matched variable')
yield True
def op_repeat_one(self, ctx):
# match repeated sequence (maximizing).
# this operator only works if the repeated item is exactly one character
# wide, and we're not already collecting backtracking points.
# <REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#print("repeat one", mincount, maxcount)
#self._log(ctx, "REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
count = self.count_repetitions(ctx, maxcount)
ctx.skip_char(count)
if count < mincount:
ctx.has_matched = False
yield True
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["literal"]:
# Special case: Tail starts with a literal. Skip positions where
# the rest of the pattern cannot possibly match.
char = ctx.peek_code(ctx.peek_code(1) + 2)
while True:
while count >= mincount and \
(ctx.at_end() or ord(ctx.peek_char()) != char):
ctx.skip_char(-1)
count -= 1
if count < mincount:
break
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:856:push_new_context")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
else:
# General case: backtracking
while count >= mincount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
#ctx.has_matched = True # <== this should be True (so match object gets returned to program)
yield True
def op_min_repeat_one(self, ctx):
# match repeated sequence (minimizing)
# <MIN_REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#self._log(ctx, "MIN_REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
if mincount == 0:
count = 0
else:
count = self.count_repetitions(ctx, mincount)
if count < mincount:
ctx.has_matched = False
#print('_sre.py:line891, update context.has_matched variable')
yield True
ctx.skip_char(count)
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
while maxcount == MAXREPEAT or count <= maxcount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print('_sre.py:916:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.string_position = ctx.string_position
if self.count_repetitions(ctx, 1) == 0:
break
ctx.skip_char(1)
count += 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
yield True
def op_repeat(self, ctx):
# create repeat context. all the hard work is done by the UNTIL
# operator (MAX_UNTIL, MIN_UNTIL)
# <REPEAT> <skip> <1=min> <2=max> item <UNTIL> tail
#self._log(ctx, "REPEAT", ctx.peek_code(2), ctx.peek_code(3))
#if ctx.state.repeat is None:
# print("951:ctx.state.repeat is None")
# #ctx.state.repeat=_RepeatContext(ctx)
repeat = _RepeatContext(ctx)
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:941:push new context", id(child_context))
#print(child_context.state.repeat)
#print(ctx.state.repeat)
# are these two yields causing the issue?
yield False
ctx.state.repeat = repeat.previous
ctx.has_matched = child_context.has_matched
yield True
def op_max_until(self, ctx):
# maximizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MAX_UNTIL> tail
repeat = ctx.state.repeat
#print("op_max_until") #, id(ctx.state.repeat))
if repeat is None:
#print(id(ctx), id(ctx.state))
raise RuntimeError("Internal re error: MAX_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MAX_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
if (count < maxcount or maxcount == MAXREPEAT) \
and ctx.state.string_position != repeat.last_position:
# we may have enough matches, if we can match another item, do so
repeat.count = count
ctx.state.marks_push()
save_last_position = repeat.last_position # zero-width match protection
repeat.last_position = ctx.state.string_position
child_context = repeat.push_new_context(4)
yield False
repeat.last_position = save_last_position
if child_context.has_matched:
ctx.state.marks_pop_discard()
ctx.has_matched = True
yield True
ctx.state.marks_pop()
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
# cannot match more repeated items here. make sure the tail matches
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print("_sre.py:987:op_max_until")
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
yield True
def op_min_until(self, ctx):
# minimizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MIN_UNTIL> tail
repeat = ctx.state.repeat
if repeat is None:
raise RuntimeError("Internal re error: MIN_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MIN_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
# see if the tail matches
ctx.state.marks_push()
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print('_sre.py:1022:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
ctx.state.marks_pop()
# match more until tail matches
if count >= maxcount and maxcount != MAXREPEAT:
ctx.has_matched = False
#print('_sre.py:line1022, update context.has_matched variable')
yield True
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
def general_op_groupref(self, ctx, decorate=lambda x: x):
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.has_matched = False
return True
while group_start < group_end:
if ctx.at_end() or decorate(ord(ctx.peek_char())) \
!= decorate(ord(ctx.state.string[group_start])):
ctx.has_matched = False
#print('_sre.py:line1042, update context.has_matched variable')
return True
group_start += 1
ctx.skip_char(1)
ctx.skip_code(2)
return True
def op_groupref(self, ctx):
# match backreference
# <GROUPREF> <zero-based group index>
#self._log(ctx, "GROUPREF", ctx.peek_code(1))
return self.general_op_groupref(ctx)
def op_groupref_ignore(self, ctx):
# match backreference case-insensitive
# <GROUPREF_IGNORE> <zero-based group index>
#self._log(ctx, "GROUPREF_IGNORE", ctx.peek_code(1))
return self.general_op_groupref(ctx, ctx.state.lower)
def op_groupref_exists(self, ctx):
# <GROUPREF_EXISTS> <group> <skip> codeyes <JUMP> codeno ...
#self._log(ctx, "GROUPREF_EXISTS", ctx.peek_code(1))
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.skip_code(ctx.peek_code(2) + 1)
else:
ctx.skip_code(3)
return True
def op_assert(self, ctx):
# assert subpattern
# <ASSERT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position < 0:
ctx.has_matched = False
yield True
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.skip_code(ctx.peek_code(1) + 1)
else:
ctx.has_matched = False
yield True
def op_assert_not(self, ctx):
# assert not subpattern
# <ASSERT_NOT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT_NOT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position >= 0:
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.has_matched = False
yield True
ctx.skip_code(ctx.peek_code(1) + 1)
yield True
def unknown(self, ctx):
#self._log(ctx, "UNKNOWN", ctx.peek_code())
raise RuntimeError("Internal re error. Unknown opcode: %s" % ctx.peek_code())
def check_charset(self, ctx, char):
"""Checks whether a character matches set of arbitrary length. Assumes
the code pointer is at the first member of the set."""
self.set_dispatcher.reset(char)
save_position = ctx.code_position
result = None
while result is None:
result = self.set_dispatcher.dispatch(ctx.peek_code(), ctx)
ctx.code_position = save_position
#print("_sre.py:1123:check_charset", result)
return result
def count_repetitions(self, ctx, maxcount):
"""Returns the number of repetitions of a single item, starting from the
current string position. The code pointer is expected to point to a
REPEAT_ONE operation (with the repeated 4 ahead)."""
count = 0
real_maxcount = ctx.state.end - ctx.string_position
if maxcount < real_maxcount and maxcount != MAXREPEAT:
real_maxcount = maxcount
# XXX could special case every single character pattern here, as in C.
# This is a general solution, a bit hackisch, but works and should be
# efficient.
code_position = ctx.code_position
string_position = ctx.string_position
ctx.skip_code(4)
reset_position = ctx.code_position
while count < real_maxcount:
# this works because the single character pattern is followed by
# a success opcode
ctx.code_position = reset_position
self.dispatch(ctx.peek_code(), ctx)
#print("count_repetitions", ctx.has_matched, count)
if ctx.has_matched is False: # could be None as well
break
count += 1
ctx.has_matched = None
ctx.code_position = code_position
ctx.string_position = string_position
return count
def _log(self, context, opname, *args):
arg_string = ("%s " * len(args)) % args
_log("|%s|%s|%s %s" % (context.pattern_codes,
context.string_position, opname, arg_string))
_OpcodeDispatcher.build_dispatch_table(OPCODES, "op_")
class _CharsetDispatcher(_Dispatcher):
def __init__(self):
self.ch_dispatcher = _ChcodeDispatcher()
def reset(self, char):
self.char = char
self.ok = True
def set_failure(self, ctx):
return not self.ok
def set_literal(self, ctx):
# <LITERAL> <code>
if ctx.peek_code(1) == self.char:
return self.ok
else:
ctx.skip_code(2)
def set_category(self, ctx):
# <CATEGORY> <code>
if self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
return self.ok
else:
ctx.skip_code(2)
def set_charset(self, ctx):
# <CHARSET> <bitmap> (16 bits per code word)
char_code = self.char
ctx.skip_code(1) # point to beginning of bitmap
if CODESIZE == 2:
if char_code < 256 and ctx.peek_code(char_code >> 4) \
& (1 << (char_code & 15)):
return self.ok
ctx.skip_code(16) # skip bitmap
else:
if char_code < 256 and ctx.peek_code(char_code >> 5) \
& (1 << (char_code & 31)):
return self.ok
ctx.skip_code(8) # skip bitmap
def set_range(self, ctx):
# <RANGE> <lower> <upper>
if ctx.peek_code(1) <= self.char <= ctx.peek_code(2):
return self.ok
ctx.skip_code(3)
def set_negate(self, ctx):
self.ok = not self.ok
ctx.skip_code(1)
#fixme brython. array module doesn't exist
def set_bigcharset(self, ctx):
raise NotImplementationError("_sre.py: set_bigcharset, array not implemented")
# <BIGCHARSET> <blockcount> <256 blockindices> <blocks>
char_code = self.char
count = ctx.peek_code(1)
ctx.skip_code(2)
if char_code < 65536:
block_index = char_code >> 8
# NB: there are CODESIZE block indices per bytecode
a = array.array("B")
a.fromstring(array.array(CODESIZE == 2 and "H" or "I",
[ctx.peek_code(block_index // CODESIZE)]).tostring())
block = a[block_index % CODESIZE]
ctx.skip_code(256 // CODESIZE) # skip block indices
block_value = ctx.peek_code(block * (32 // CODESIZE)
+ ((char_code & 255) >> (CODESIZE == 2 and 4 or 5)))
if block_value & (1 << (char_code & ((8 * CODESIZE) - 1))):
return self.ok
else:
ctx.skip_code(256 // CODESIZE) # skip block indices
ctx.skip_code(count * (32 // CODESIZE)) # skip blocks
def unknown(self, ctx):
return False
_CharsetDispatcher.build_dispatch_table(OPCODES, "set_")
class _AtcodeDispatcher(_Dispatcher):
def at_beginning(self, ctx):
return ctx.at_beginning()
at_beginning_string = at_beginning
def at_beginning_line(self, ctx):
return ctx.at_beginning() or _is_linebreak(ctx.peek_char(-1))
def at_end(self, ctx):
return (ctx.remaining_chars() == 1 and ctx.at_linebreak()) or ctx.at_end()
def at_end_line(self, ctx):
return ctx.at_linebreak() or ctx.at_end()
def at_end_string(self, ctx):
return ctx.at_end()
def at_boundary(self, ctx):
return ctx.at_boundary(_is_word)
def at_non_boundary(self, ctx):
return not ctx.at_boundary(_is_word)
def at_loc_boundary(self, ctx):
return ctx.at_boundary(_is_loc_word)
def at_loc_non_boundary(self, ctx):
return not ctx.at_boundary(_is_loc_word)
def at_uni_boundary(self, ctx):
return ctx.at_boundary(_is_uni_word)
def at_uni_non_boundary(self, ctx):
return not ctx.at_boundary(_is_uni_word)
def unknown(self, ctx):
return False
_AtcodeDispatcher.build_dispatch_table(ATCODES, "")
class _ChcodeDispatcher(_Dispatcher):
def category_digit(self, ctx):
return _is_digit(ctx.peek_char())
def category_not_digit(self, ctx):
return not _is_digit(ctx.peek_char())
def category_space(self, ctx):
return _is_space(ctx.peek_char())
def category_not_space(self, ctx):
return not _is_space(ctx.peek_char())
def category_word(self, ctx):
return _is_word(ctx.peek_char())
def category_not_word(self, ctx):
return not _is_word(ctx.peek_char())
def category_linebreak(self, ctx):
return _is_linebreak(ctx.peek_char())
def category_not_linebreak(self, ctx):
return not _is_linebreak(ctx.peek_char())
def category_loc_word(self, ctx):
return _is_loc_word(ctx.peek_char())
def category_loc_not_word(self, ctx):
return not _is_loc_word(ctx.peek_char())
def category_uni_digit(self, ctx):
return ctx.peek_char().isdigit()
def category_uni_not_digit(self, ctx):
return not ctx.peek_char().isdigit()
def category_uni_space(self, ctx):
return ctx.peek_char().isspace()
def category_uni_not_space(self, ctx):
return not ctx.peek_char().isspace()
def category_uni_word(self, ctx):
return _is_uni_word(ctx.peek_char())
def category_uni_not_word(self, ctx):
return not _is_uni_word(ctx.peek_char())
def category_uni_linebreak(self, ctx):
return ord(ctx.peek_char()) in _uni_linebreaks
def category_uni_not_linebreak(self, ctx):
return ord(ctx.peek_char()) not in _uni_linebreaks
def unknown(self, ctx):
return False
_ChcodeDispatcher.build_dispatch_table(CHCODES, "")
_ascii_char_info = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 6, 2,
2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 0, 0, 0, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0,
0, 0, 16, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 0 ]
def _is_digit(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 1
def _is_space(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 2
def _is_word(char):
# NB: non-ASCII chars aren't words according to _sre.c
code = ord(char)
return code < 128 and _ascii_char_info[code] & 16
def _is_loc_word(char):
return (not (ord(char) & ~255) and char.isalnum()) or char == '_'
def _is_uni_word(char):
# not valid in python 3
#return unichr(ord(char)).isalnum() or char == '_'
return chr(ord(char)).isalnum() or char == '_'
def _is_linebreak(char):
return char == "\n"
# Static list of all unicode codepoints reported by Py_UNICODE_ISLINEBREAK.
_uni_linebreaks = [10, 13, 28, 29, 30, 133, 8232, 8233]
def _log(message):
if 0:
print(message)
|
Queuer/queue-vision
|
refs/heads/master
|
recognition/utils/rect.py
|
1
|
class Rect(object):
def __init__(self, cx, cy, width, height, confidence):
self.cx = cx
self.cy = cy
self.width = width
self.height = height
self.confidence = confidence
self.true_confidence = confidence
def overlaps(self, other):
if abs(self.cx - other.cx) > (self.width + other.width) / 1.5:
return False
elif abs(self.cy - other.cy) > (self.height + other.height) / 2.0:
return False
else:
return True
def distance(self, other):
return sum(map(abs, [self.cx - other.cx, self.cy - other.cy,
self.width - other.width, self.height - other.height]))
def intersection(self, other):
left = max(self.cx - self.width / 2., other.cx - other.width / 2.)
right = min(self.cx + self.width / 2., other.cx + other.width / 2.)
width = max(right - left, 0)
top = max(self.cy - self.height / 2., other.cy - other.height / 2.)
bottom = min(self.cy + self.height / 2., other.cy + other.height / 2.)
height = max(bottom - top, 0)
return width * height
def area(self):
return self.height * self.width
def union(self, other):
return self.area() + other.area() - self.intersection(other)
def iou(self, other):
return self.intersection(other) / self.union(other)
def __eq__(self, other):
return (self.cx == other.cx and
self.cy == other.cy and
self.width == other.width and
self.height == other.height and
self.confidence == other.confidence)
|
mrkm4ntr/incubator-airflow
|
refs/heads/master
|
airflow/providers/google/cloud/example_dags/example_cloud_sql_query.py
|
10
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that performs query in a Cloud SQL instance.
This DAG relies on the following OS environment variables
* GCP_PROJECT_ID - Google Cloud project for the Cloud SQL instance
* GCP_REGION - Google Cloud region where the database is created
*
* GCSQL_POSTGRES_INSTANCE_NAME - Name of the postgres Cloud SQL instance
* GCSQL_POSTGRES_USER - Name of the postgres database user
* GCSQL_POSTGRES_PASSWORD - Password of the postgres database user
* GCSQL_POSTGRES_PUBLIC_IP - Public IP of the Postgres database
* GCSQL_POSTGRES_PUBLIC_PORT - Port of the postgres database
*
* GCSQL_MYSQL_INSTANCE_NAME - Name of the postgres Cloud SQL instance
* GCSQL_MYSQL_USER - Name of the mysql database user
* GCSQL_MYSQL_PASSWORD - Password of the mysql database user
* GCSQL_MYSQL_PUBLIC_IP - Public IP of the mysql database
* GCSQL_MYSQL_PUBLIC_PORT - Port of the mysql database
"""
import os
import subprocess
from os.path import expanduser
from urllib.parse import quote_plus
from airflow import models
from airflow.providers.google.cloud.operators.cloud_sql import CloudSQLExecuteQueryOperator
from airflow.utils.dates import days_ago
GCP_PROJECT_ID = os.environ.get('GCP_PROJECT_ID', 'example-project')
GCP_REGION = os.environ.get('GCP_REGION', 'europe-west-1b')
GCSQL_POSTGRES_INSTANCE_NAME_QUERY = os.environ.get('GCSQL_POSTGRES_INSTANCE_NAME_QUERY', 'testpostgres')
GCSQL_POSTGRES_DATABASE_NAME = os.environ.get('GCSQL_POSTGRES_DATABASE_NAME', 'postgresdb')
GCSQL_POSTGRES_USER = os.environ.get('GCSQL_POSTGRES_USER', 'postgres_user')
GCSQL_POSTGRES_PASSWORD = os.environ.get('GCSQL_POSTGRES_PASSWORD', 'password')
GCSQL_POSTGRES_PUBLIC_IP = os.environ.get('GCSQL_POSTGRES_PUBLIC_IP', '0.0.0.0')
GCSQL_POSTGRES_PUBLIC_PORT = os.environ.get('GCSQL_POSTGRES_PUBLIC_PORT', 5432)
GCSQL_POSTGRES_CLIENT_CERT_FILE = os.environ.get(
'GCSQL_POSTGRES_CLIENT_CERT_FILE', ".key/postgres-client-cert.pem"
)
GCSQL_POSTGRES_CLIENT_KEY_FILE = os.environ.get(
'GCSQL_POSTGRES_CLIENT_KEY_FILE', ".key/postgres-client-key.pem"
)
GCSQL_POSTGRES_SERVER_CA_FILE = os.environ.get('GCSQL_POSTGRES_SERVER_CA_FILE', ".key/postgres-server-ca.pem")
GCSQL_MYSQL_INSTANCE_NAME_QUERY = os.environ.get('GCSQL_MYSQL_INSTANCE_NAME_QUERY', 'testmysql')
GCSQL_MYSQL_DATABASE_NAME = os.environ.get('GCSQL_MYSQL_DATABASE_NAME', 'mysqldb')
GCSQL_MYSQL_USER = os.environ.get('GCSQL_MYSQL_USER', 'mysql_user')
GCSQL_MYSQL_PASSWORD = os.environ.get('GCSQL_MYSQL_PASSWORD', 'password')
GCSQL_MYSQL_PUBLIC_IP = os.environ.get('GCSQL_MYSQL_PUBLIC_IP', '0.0.0.0')
GCSQL_MYSQL_PUBLIC_PORT = os.environ.get('GCSQL_MYSQL_PUBLIC_PORT', 3306)
GCSQL_MYSQL_CLIENT_CERT_FILE = os.environ.get('GCSQL_MYSQL_CLIENT_CERT_FILE', ".key/mysql-client-cert.pem")
GCSQL_MYSQL_CLIENT_KEY_FILE = os.environ.get('GCSQL_MYSQL_CLIENT_KEY_FILE', ".key/mysql-client-key.pem")
GCSQL_MYSQL_SERVER_CA_FILE = os.environ.get('GCSQL_MYSQL_SERVER_CA_FILE', ".key/mysql-server-ca.pem")
SQL = [
'CREATE TABLE IF NOT EXISTS TABLE_TEST (I INTEGER)',
'CREATE TABLE IF NOT EXISTS TABLE_TEST (I INTEGER)', # shows warnings logged
'INSERT INTO TABLE_TEST VALUES (0)',
'CREATE TABLE IF NOT EXISTS TABLE_TEST2 (I INTEGER)',
'DROP TABLE TABLE_TEST',
'DROP TABLE TABLE_TEST2',
]
# [START howto_operator_cloudsql_query_connections]
HOME_DIR = expanduser("~")
def get_absolute_path(path):
"""
Returns absolute path.
"""
if path.startswith("/"):
return path
else:
return os.path.join(HOME_DIR, path)
postgres_kwargs = dict(
user=quote_plus(GCSQL_POSTGRES_USER),
password=quote_plus(GCSQL_POSTGRES_PASSWORD),
public_port=GCSQL_POSTGRES_PUBLIC_PORT,
public_ip=quote_plus(GCSQL_POSTGRES_PUBLIC_IP),
project_id=quote_plus(GCP_PROJECT_ID),
location=quote_plus(GCP_REGION),
instance=quote_plus(GCSQL_POSTGRES_INSTANCE_NAME_QUERY),
database=quote_plus(GCSQL_POSTGRES_DATABASE_NAME),
client_cert_file=quote_plus(get_absolute_path(GCSQL_POSTGRES_CLIENT_CERT_FILE)),
client_key_file=quote_plus(get_absolute_path(GCSQL_POSTGRES_CLIENT_KEY_FILE)),
server_ca_file=quote_plus(get_absolute_path(GCSQL_POSTGRES_SERVER_CA_FILE)),
)
# The connections below are created using one of the standard approaches - via environment
# variables named AIRFLOW_CONN_* . The connections can also be created in the database
# of AIRFLOW (using command line or UI).
# Postgres: connect via proxy over TCP
os.environ['AIRFLOW_CONN_PROXY_POSTGRES_TCP'] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=postgres&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=True&"
"sql_proxy_use_tcp=True".format(**postgres_kwargs)
)
# Postgres: connect via proxy over UNIX socket (specific proxy version)
os.environ['AIRFLOW_CONN_PROXY_POSTGRES_SOCKET'] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=postgres&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=True&"
"sql_proxy_version=v1.13&"
"sql_proxy_use_tcp=False".format(**postgres_kwargs)
)
# Postgres: connect directly via TCP (non-SSL)
os.environ['AIRFLOW_CONN_PUBLIC_POSTGRES_TCP'] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=postgres&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=False&"
"use_ssl=False".format(**postgres_kwargs)
)
# Postgres: connect directly via TCP (SSL)
os.environ['AIRFLOW_CONN_PUBLIC_POSTGRES_TCP_SSL'] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=postgres&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=False&"
"use_ssl=True&"
"sslcert={client_cert_file}&"
"sslkey={client_key_file}&"
"sslrootcert={server_ca_file}".format(**postgres_kwargs)
)
mysql_kwargs = dict(
user=quote_plus(GCSQL_MYSQL_USER),
password=quote_plus(GCSQL_MYSQL_PASSWORD),
public_port=GCSQL_MYSQL_PUBLIC_PORT,
public_ip=quote_plus(GCSQL_MYSQL_PUBLIC_IP),
project_id=quote_plus(GCP_PROJECT_ID),
location=quote_plus(GCP_REGION),
instance=quote_plus(GCSQL_MYSQL_INSTANCE_NAME_QUERY),
database=quote_plus(GCSQL_MYSQL_DATABASE_NAME),
client_cert_file=quote_plus(get_absolute_path(GCSQL_MYSQL_CLIENT_CERT_FILE)),
client_key_file=quote_plus(get_absolute_path(GCSQL_MYSQL_CLIENT_KEY_FILE)),
server_ca_file=quote_plus(get_absolute_path(GCSQL_MYSQL_SERVER_CA_FILE)),
)
# MySQL: connect via proxy over TCP (specific proxy version)
os.environ['AIRFLOW_CONN_PROXY_MYSQL_TCP'] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=mysql&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=True&"
"sql_proxy_version=v1.13&"
"sql_proxy_use_tcp=True".format(**mysql_kwargs)
)
# MySQL: connect via proxy over UNIX socket using pre-downloaded Cloud Sql Proxy binary
try:
sql_proxy_binary_path = subprocess.check_output(['which', 'cloud_sql_proxy']).decode('utf-8').rstrip()
except subprocess.CalledProcessError:
sql_proxy_binary_path = "/tmp/anyhow_download_cloud_sql_proxy"
os.environ['AIRFLOW_CONN_PROXY_MYSQL_SOCKET'] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=mysql&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=True&"
"sql_proxy_binary_path={sql_proxy_binary_path}&"
"sql_proxy_use_tcp=False".format(sql_proxy_binary_path=quote_plus(sql_proxy_binary_path), **mysql_kwargs)
)
# MySQL: connect directly via TCP (non-SSL)
os.environ['AIRFLOW_CONN_PUBLIC_MYSQL_TCP'] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=mysql&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=False&"
"use_ssl=False".format(**mysql_kwargs)
)
# MySQL: connect directly via TCP (SSL) and with fixed Cloud Sql Proxy binary path
os.environ['AIRFLOW_CONN_PUBLIC_MYSQL_TCP_SSL'] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=mysql&"
"project_id={project_id}&"
"location={location}&"
"instance={instance}&"
"use_proxy=False&"
"use_ssl=True&"
"sslcert={client_cert_file}&"
"sslkey={client_key_file}&"
"sslrootcert={server_ca_file}".format(**mysql_kwargs)
)
# Special case: MySQL: connect directly via TCP (SSL) and with fixed Cloud Sql
# Proxy binary path AND with missing project_id
os.environ['AIRFLOW_CONN_PUBLIC_MYSQL_TCP_SSL_NO_PROJECT_ID'] = (
"gcpcloudsql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"database_type=mysql&"
"location={location}&"
"instance={instance}&"
"use_proxy=False&"
"use_ssl=True&"
"sslcert={client_cert_file}&"
"sslkey={client_key_file}&"
"sslrootcert={server_ca_file}".format(**mysql_kwargs)
)
# [END howto_operator_cloudsql_query_connections]
# [START howto_operator_cloudsql_query_operators]
connection_names = [
"proxy_postgres_tcp",
"proxy_postgres_socket",
"public_postgres_tcp",
"public_postgres_tcp_ssl",
"proxy_mysql_tcp",
"proxy_mysql_socket",
"public_mysql_tcp",
"public_mysql_tcp_ssl",
"public_mysql_tcp_ssl_no_project_id",
]
tasks = []
with models.DAG(
dag_id='example_gcp_sql_query',
schedule_interval=None,
start_date=days_ago(1),
tags=['example'],
) as dag:
prev_task = None
for connection_name in connection_names:
task = CloudSQLExecuteQueryOperator(
gcp_cloudsql_conn_id=connection_name, task_id="example_gcp_sql_task_" + connection_name, sql=SQL
)
tasks.append(task)
if prev_task:
prev_task >> task
prev_task = task
# [END howto_operator_cloudsql_query_operators]
|
FabioRosado/opsdroid
|
refs/heads/master
|
opsdroid/testing/const.py
|
3
|
"""Constants for use in testing."""
MINIMAL_CONFIG = {
"connectors": {
"mock": {"module": "opsdroid.testing.mockmodules.connectors.mocked"}
},
"skills": {"hello": {"module": "opsdroid.testing.mockmodules.skills.hello"}},
}
|
kater169/libcloud
|
refs/heads/trunk
|
docs/examples/compute/cloudwatt/create_volume.py
|
55
|
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
Cloudwatt = get_driver(Provider.CLOUDWATT)
driver = Cloudwatt('your_email', 'your_password', 'your_tenant_id',
tenant_name='your_tenant_name')
node = driver.list_nodes()[0]
volume = driver.create_volume(10, 'your_volume')
driver.attach_volume(node, volume)
|
duducosmos/pgs4a
|
refs/heads/master
|
python-install/lib/python2.7/test/test_StringIO.py
|
14
|
# Tests StringIO and cStringIO
import unittest
import StringIO
import cStringIO
import types
from test import test_support
class TestGenericStringIO(unittest.TestCase):
# use a class variable MODULE to define which module is being tested
# Line of data to test as string
_line = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!'
# Constructor to use for the test data (._line is passed to this
# constructor)
constructor = str
def setUp(self):
self._line = self.constructor(self._line)
self._lines = self.constructor((self._line + '\n') * 5)
self._fp = self.MODULE.StringIO(self._lines)
def test_reads(self):
eq = self.assertEqual
self.assertRaises(TypeError, self._fp.seek)
eq(self._fp.read(10), self._line[:10])
eq(self._fp.readline(), self._line[10:] + '\n')
eq(len(self._fp.readlines(60)), 2)
self._fp.seek(0)
eq(self._fp.readline(-1), self._line + '\n')
def test_writes(self):
f = self.MODULE.StringIO()
self.assertRaises(TypeError, f.seek)
f.write(self._line[:6])
f.seek(3)
f.write(self._line[20:26])
f.write(self._line[52])
self.assertEqual(f.getvalue(), 'abcuvwxyz!')
def test_writelines(self):
f = self.MODULE.StringIO()
f.writelines([self._line[0], self._line[1], self._line[2]])
f.seek(0)
self.assertEqual(f.getvalue(), 'abc')
def test_writelines_error(self):
def errorGen():
yield 'a'
raise KeyboardInterrupt()
f = self.MODULE.StringIO()
self.assertRaises(KeyboardInterrupt, f.writelines, errorGen())
def test_truncate(self):
eq = self.assertEqual
f = self.MODULE.StringIO()
f.write(self._lines)
f.seek(10)
f.truncate()
eq(f.getvalue(), 'abcdefghij')
f.truncate(5)
eq(f.getvalue(), 'abcde')
f.write('xyz')
eq(f.getvalue(), 'abcdexyz')
self.assertRaises(IOError, f.truncate, -1)
f.close()
self.assertRaises(ValueError, f.write, 'frobnitz')
def test_closed_flag(self):
f = self.MODULE.StringIO()
self.assertEqual(f.closed, False)
f.close()
self.assertEqual(f.closed, True)
f = self.MODULE.StringIO("abc")
self.assertEqual(f.closed, False)
f.close()
self.assertEqual(f.closed, True)
def test_isatty(self):
f = self.MODULE.StringIO()
self.assertRaises(TypeError, f.isatty, None)
self.assertEqual(f.isatty(), False)
f.close()
self.assertRaises(ValueError, f.isatty)
def test_iterator(self):
eq = self.assertEqual
unless = self.assertTrue
eq(iter(self._fp), self._fp)
# Does this object support the iteration protocol?
unless(hasattr(self._fp, '__iter__'))
unless(hasattr(self._fp, 'next'))
i = 0
for line in self._fp:
eq(line, self._line + '\n')
i += 1
eq(i, 5)
self._fp.close()
self.assertRaises(ValueError, self._fp.next)
def test_getvalue(self):
self._fp.close()
self.assertRaises(ValueError, self._fp.getvalue)
class TestStringIO(TestGenericStringIO):
MODULE = StringIO
def test_unicode(self):
if not test_support.have_unicode: return
# The StringIO module also supports concatenating Unicode
# snippets to larger Unicode strings. This is tested by this
# method. Note that cStringIO does not support this extension.
f = self.MODULE.StringIO()
f.write(self._line[:6])
f.seek(3)
f.write(unicode(self._line[20:26]))
f.write(unicode(self._line[52]))
s = f.getvalue()
self.assertEqual(s, unicode('abcuvwxyz!'))
self.assertEqual(type(s), types.UnicodeType)
class TestcStringIO(TestGenericStringIO):
MODULE = cStringIO
import sys
if sys.platform.startswith('java'):
# Jython doesn't have a buffer object, so we just do a useless
# fake of the buffer tests.
buffer = str
class TestBufferStringIO(TestStringIO):
constructor = buffer
class TestBuffercStringIO(TestcStringIO):
constructor = buffer
def test_main():
test_support.run_unittest(TestStringIO, TestcStringIO)
with test_support.check_py3k_warnings(("buffer.. not supported",
DeprecationWarning)):
test_support.run_unittest(TestBufferStringIO, TestBuffercStringIO)
if __name__ == '__main__':
test_main()
|
leorochael/odoo
|
refs/heads/8.0
|
addons/web_api/__openerp__.py
|
384
|
{
'name': 'OpenERP Web API',
'category': 'Hidden',
'description': """
Openerp Web API.
================
""",
'version': '2.0',
'depends': ['web'],
'installable': True,
'auto_install': False,
}
|
gdgellatly/OCB1
|
refs/heads/7.0
|
addons/hr_attendance/__init__.py
|
434
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_attendance
import wizard
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
slank/ansible
|
refs/heads/devel
|
test/sanity/validate-modules/test_validate_modules_regex.py
|
19
|
#!/usr/bin/env python
# This is a standalone test for the regex inside validate-modules
# It is not suitable to add to the make tests target because the
# file under test is outside the test's sys.path AND has a hyphen
# in the name making it unimportable.
#
# To execute this by hand:
# 1) cd <checkoutdir>
# 2) source hacking/env-setup
# 3) PYTHONPATH=./lib nosetests -d -w test -v --nocapture sanity/validate-modules
import os
import re
from ansible.compat.tests import unittest
#TYPE_REGEX = re.compile(r'.*\stype\(.*')
#TYPE_REGEX = re.compile(r'.*(if|or)\stype\(.*')
#TYPE_REGEX = re.compile(r'.*(if|or)(\s+.*|\s+)type\(.*')
#TYPE_REGEX = re.compile(r'.*(if|or)(\s+.*|\s+)type\(.*')
#TYPE_REGEX = re.compile(r'.*(if|\sor)(\s+.*|\s+)type\(.*')
#TYPE_REGEX = re.compile(r'.*(if|\sor)(\s+.*|\s+)(?<!_)type\(.*')
TYPE_REGEX = re.compile(r'.*(if|or)(\s+.*|\s+)(?<!_)(?<!str\()type\(.*')
class TestValidateModulesRegex(unittest.TestCase):
def test_type_regex(self):
# each of these examples needs to be matched or not matched
checks = [
['if type(foo) is Bar', True],
['if Bar is type(foo)', True],
['if type(foo) is not Bar', True],
['if Bar is not type(foo)', True],
['if type(foo) == Bar', True],
['if Bar == type(foo)', True],
['if type(foo)==Bar', True],
['if Bar==type(foo)', True],
['if type(foo) != Bar', True],
['if Bar != type(foo)', True],
['if type(foo)!=Bar', True],
['if Bar!=type(foo)', True],
['if foo or type(bar) != Bar', True],
['x = type(foo)', False],
["error = err.message + ' ' + str(err) + ' - ' + str(type(err))", False],
#cloud/amazon/ec2_group.py
["module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule))", False],
#files/patch.py
["p = type('Params', (), module.params)", False], #files/patch.py
#system/osx_defaults.py
["if self.current_value is not None and not isinstance(self.current_value, type(self.value)):", True],
#system/osx_defaults.py
['raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)', False],
#network/nxos/nxos_interface.py
["if get_interface_type(interface) == 'svi':", False],
]
for idc,check in enumerate(checks):
cstring = check[0]
cexpected = check[1]
match = TYPE_REGEX.match(cstring)
if cexpected and not match:
assert False, "%s should have matched" % cstring
elif not cexpected and match:
assert False, "%s should not have matched" % cstring
|
cainmatt/django
|
refs/heads/master
|
django/core/management/sql.py
|
399
|
from __future__ import unicode_literals
from django.apps import apps
from django.db import models
def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True, include_views=False)
else:
tables = connection.introspection.table_names(include_views=False)
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade)
return statements
def emit_pre_migrate_signal(verbosity, interactive, db):
# Emit the pre_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running pre-migrate handlers for application %s" % app_config.label)
models.signals.pre_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
def emit_post_migrate_signal(verbosity, interactive, db):
# Emit the post_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running post-migrate handlers for application %s" % app_config.label)
models.signals.post_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db)
|
Tomtomgo/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/suggestnominations.py
|
119
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Copyright (c) 2011 Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from optparse import make_option
import re
from webkitpy.common.checkout.changelog import ChangeLogEntry
from webkitpy.common.config.committers import CommitterList
from webkitpy.tool.grammar import join_with_separators
from webkitpy.tool.multicommandtool import Command
class CommitLogError(Exception):
def __init__(self):
Exception.__init__(self)
class CommitLogMissingReviewer(CommitLogError):
def __init__(self):
CommitLogError.__init__(self)
class AbstractCommitLogCommand(Command):
_leading_indent_regexp = re.compile(r"^[ ]{4}", re.MULTILINE)
_reviewed_by_regexp = re.compile(ChangeLogEntry.reviewed_by_regexp, re.MULTILINE)
_patch_by_regexp = re.compile(r'^Patch by (?P<name>.+?)\s+<(?P<email>[^<>]+)> on (?P<date>\d{4}-\d{2}-\d{2})$', re.MULTILINE)
_committer_regexp = re.compile(r'^Author: (?P<email>\S+)\s+<[^>]+>$', re.MULTILINE)
_date_regexp = re.compile(r'^Date: (?P<date>\d{4}-\d{2}-\d{2}) (?P<time>\d{2}:\d{2}:\d{2}) [\+\-]\d{4}$', re.MULTILINE)
_revision_regexp = re.compile(r'^git-svn-id: http://svn.webkit.org/repository/webkit/trunk@(?P<svnid>\d+) (?P<gitid>[0-9a-f\-]{36})$', re.MULTILINE)
def __init__(self, options=None):
options = options or []
options += [
make_option("--max-commit-age", action="store", dest="max_commit_age", type="int", default=9, help="Specify maximum commit age to consider (in months)."),
]
options = sorted(options, cmp=lambda a, b: cmp(a._long_opts, b._long_opts))
super(AbstractCommitLogCommand, self).__init__(options=options)
# FIXME: This should probably be on the tool somewhere.
self._committer_list = CommitterList()
def _init_options(self, options):
self.verbose = options.verbose
self.max_commit_age = options.max_commit_age
# FIXME: This should move to scm.py
def _recent_commit_messages(self):
git_log = self._tool.executive.run_command(['git', 'log', '--date=iso', '--since="%s months ago"' % self.max_commit_age])
messages = re.compile(r"^commit \w{40}$", re.MULTILINE).split(git_log)[1:] # Ignore the first message which will be empty.
for message in messages:
# Unindent all the lines
(message, _) = self._leading_indent_regexp.subn("", message)
yield message.lstrip() # Remove any leading newlines from the log message.
def _author_name_from_email(self, email):
contributor = self._committer_list.contributor_by_email(email)
return contributor.full_name if contributor else None
def _contributor_from_email(self, email):
contributor = self._committer_list.contributor_by_email(email)
return contributor if contributor else None
def _parse_commit_message(self, commit_message):
committer_match = self._committer_regexp.search(commit_message)
if not committer_match:
raise CommitLogError
committer_email = committer_match.group('email')
if not committer_email:
raise CommitLogError
committer = self._contributor_from_email(committer_email)
if not committer:
raise CommitLogError
commit_date_match = self._date_regexp.search(commit_message)
if not commit_date_match:
raise CommitLogError
commit_date = commit_date_match.group('date')
revision_match = self._revision_regexp.search(commit_message)
if not revision_match:
raise CommitLogError
revision = revision_match.group('svnid')
# Look for "Patch by" line first, which is used for non-committer contributors;
# otherwise, use committer info determined above.
author_match = self._patch_by_regexp.search(commit_message)
if not author_match:
author_match = committer_match
author_email = author_match.group('email')
if not author_email:
author_email = committer_email
author_name = author_match.group('name') if 'name' in author_match.groupdict() else None
if not author_name:
author_name = self._author_name_from_email(author_email)
if not author_name:
raise CommitLogError
contributor = self._contributor_from_email(author_email)
if contributor and author_name != contributor.full_name and contributor.full_name:
author_name = contributor.full_name
reviewer_match = self._reviewed_by_regexp.search(commit_message)
if not reviewer_match:
raise CommitLogMissingReviewer
reviewers = reviewer_match.group('reviewer')
return {
'committer': committer,
'commit_date': commit_date,
'revision': revision,
'author_email': author_email,
'author_name': author_name,
'contributor': contributor,
'reviewers': reviewers,
}
class SuggestNominations(AbstractCommitLogCommand):
name = "suggest-nominations"
help_text = "Suggest contributors for committer/reviewer nominations"
def __init__(self):
options = [
make_option("--committer-minimum", action="store", dest="committer_minimum", type="int", default=10, help="Specify minimum patch count for Committer nominations."),
make_option("--reviewer-minimum", action="store", dest="reviewer_minimum", type="int", default=80, help="Specify minimum patch count for Reviewer nominations."),
make_option("--show-commits", action="store_true", dest="show_commits", default=False, help="Show commit history with nomination suggestions."),
]
super(SuggestNominations, self).__init__(options=options)
def _init_options(self, options):
super(SuggestNominations, self)._init_options(options)
self.committer_minimum = options.committer_minimum
self.reviewer_minimum = options.reviewer_minimum
self.show_commits = options.show_commits
def _count_commit(self, commit, analysis):
author_name = commit['author_name']
author_email = commit['author_email']
revision = commit['revision']
commit_date = commit['commit_date']
# See if we already have a contributor with this author_name or email
counter_by_name = analysis['counters_by_name'].get(author_name)
counter_by_email = analysis['counters_by_email'].get(author_email)
if counter_by_name:
if counter_by_email:
if counter_by_name != counter_by_email:
# Merge these two counters This is for the case where we had
# John Smith (jsmith@gmail.com) and Jonathan Smith (jsmith@apple.com)
# and just found a John Smith (jsmith@apple.com). Now we know the
# two names are the same person
counter_by_name['names'] |= counter_by_email['names']
counter_by_name['emails'] |= counter_by_email['emails']
counter_by_name['count'] += counter_by_email.get('count', 0)
analysis['counters_by_email'][author_email] = counter_by_name
else:
# Add email to the existing counter
analysis['counters_by_email'][author_email] = counter_by_name
counter_by_name['emails'] |= set([author_email])
else:
if counter_by_email:
# Add name to the existing counter
analysis['counters_by_name'][author_name] = counter_by_email
counter_by_email['names'] |= set([author_name])
else:
# Create new counter
new_counter = {'names': set([author_name]), 'emails': set([author_email]), 'latest_name': author_name, 'latest_email': author_email, 'commits': ""}
analysis['counters_by_name'][author_name] = new_counter
analysis['counters_by_email'][author_email] = new_counter
assert(analysis['counters_by_name'][author_name] == analysis['counters_by_email'][author_email])
counter = analysis['counters_by_name'][author_name]
counter['count'] = counter.get('count', 0) + 1
if revision.isdigit():
revision = "http://trac.webkit.org/changeset/" + revision
counter['commits'] += " commit: %s on %s by %s (%s)\n" % (revision, commit_date, author_name, author_email)
def _count_recent_patches(self):
analysis = {
'counters_by_name': {},
'counters_by_email': {},
}
for commit_message in self._recent_commit_messages():
try:
self._count_commit(self._parse_commit_message(commit_message), analysis)
except CommitLogError, exception:
continue
return analysis['counters_by_email']
def _collect_nominations(self, counters_by_email):
nominations = []
for author_email, counter in counters_by_email.items():
if author_email != counter['latest_email']:
continue
roles = []
contributor = self._committer_list.contributor_by_email(author_email)
author_name = counter['latest_name']
patch_count = counter['count']
if patch_count >= self.committer_minimum and (not contributor or not contributor.can_commit):
roles.append("committer")
if patch_count >= self.reviewer_minimum and contributor and contributor.can_commit and not contributor.can_review:
roles.append("reviewer")
if roles:
nominations.append({
'roles': roles,
'author_name': author_name,
'author_email': author_email,
'patch_count': patch_count,
})
return nominations
def _print_nominations(self, nominations, counters_by_email):
def nomination_cmp(a_nomination, b_nomination):
roles_result = cmp(a_nomination['roles'], b_nomination['roles'])
if roles_result:
return -roles_result
count_result = cmp(a_nomination['patch_count'], b_nomination['patch_count'])
if count_result:
return -count_result
return cmp(a_nomination['author_name'], b_nomination['author_name'])
for nomination in sorted(nominations, nomination_cmp):
# This is a little bit of a hack, but its convienent to just pass the nomination dictionary to the formating operator.
nomination['roles_string'] = join_with_separators(nomination['roles']).upper()
print "%(roles_string)s: %(author_name)s (%(author_email)s) has %(patch_count)s reviewed patches" % nomination
counter = counters_by_email[nomination['author_email']]
if self.show_commits:
print counter['commits']
def _print_counts(self, counters_by_email):
def counter_cmp(a_tuple, b_tuple):
# split the tuples
# the second element is the "counter" structure
_, a_counter = a_tuple
_, b_counter = b_tuple
count_result = cmp(a_counter['count'], b_counter['count'])
if count_result:
return -count_result
return cmp(a_counter['latest_name'].lower(), b_counter['latest_name'].lower())
for author_email, counter in sorted(counters_by_email.items(), counter_cmp):
if author_email != counter['latest_email']:
continue
contributor = self._committer_list.contributor_by_email(author_email)
author_name = counter['latest_name']
patch_count = counter['count']
counter['names'] = counter['names'] - set([author_name])
counter['emails'] = counter['emails'] - set([author_email])
alias_list = []
for alias in counter['names']:
alias_list.append(alias)
for alias in counter['emails']:
alias_list.append(alias)
if alias_list:
print "CONTRIBUTOR: %s (%s) has %d reviewed patches %s" % (author_name, author_email, patch_count, "(aliases: " + ", ".join(alias_list) + ")")
else:
print "CONTRIBUTOR: %s (%s) has %d reviewed patches" % (author_name, author_email, patch_count)
return
def execute(self, options, args, tool):
self._init_options(options)
patch_counts = self._count_recent_patches()
nominations = self._collect_nominations(patch_counts)
self._print_nominations(nominations, patch_counts)
if self.verbose:
self._print_counts(patch_counts)
if __name__ == "__main__":
SuggestNominations()
|
walterreade/scikit-learn
|
refs/heads/master
|
sklearn/linear_model/tests/test_omp.py
|
272
|
# Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
|
nthiep/global-ssh-server
|
refs/heads/master
|
lib/python2.7/site-packages/django/db/backends/mysql/creation.py
|
210
|
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
suffix = []
if self.connection.settings_dict['TEST_CHARSET']:
suffix.append('CHARACTER SET %s' % self.connection.settings_dict['TEST_CHARSET'])
if self.connection.settings_dict['TEST_COLLATION']:
suffix.append('COLLATE %s' % self.connection.settings_dict['TEST_COLLATION'])
return ' '.join(suffix)
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"All inline references are pending under MySQL"
return [], True
def sql_for_inline_many_to_many_references(self, model, field, style):
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL')),
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL'))
]
deferred = [
(field.m2m_db_table(), field.m2m_column_name(), opts.db_table,
opts.pk.column),
(field.m2m_db_table(), field.m2m_reverse_name(),
field.rel.to._meta.db_table, field.rel.to._meta.pk.column)
]
return table_output, deferred
|
fighterlyt/bite-project
|
refs/heads/master
|
deps/gdata-python-client/tests/gdata_tests/contentforshopping_test.py
|
39
|
#!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Content API for Shopping tests"""
__author__ = 'afshar (Ali Afshar)'
import unittest
from gdata.contentforshopping import client, data
class CFSClientTest(unittest.TestCase):
def test_uri_missing_account_id(self):
c = client.ContentForShoppingClient()
self.assertRaises(ValueError, c._create_uri,
account_id=None, projection=None, resource='a/b')
def test_uri_bad_projection(self):
c = client.ContentForShoppingClient()
self.assertRaises(ValueError, c._create_uri,
account_id='123', projection='banana', resource='a/b')
def test_good_default_account_id(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='a/b')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/a/b/generic')
def test_override_request_account_id(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id='321', projection=None, resource='a/b')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/321/a/b/generic')
def test_default_projection(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='a/b')
self.assertEqual(c.cfs_projection, 'generic')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/a/b/generic')
def test_default_projection_change(self):
c = client.ContentForShoppingClient(account_id='123', projection='schema')
uri = c._create_uri(account_id=None, projection=None, resource='a/b')
self.assertEqual(c.cfs_projection, 'schema')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/a/b/schema')
def test_request_projection(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection='schema', resource='a/b')
self.assertEqual(c.cfs_projection, 'generic')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/a/b/schema')
def test_request_resource(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='x/y/z')
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/x/y/z/generic')
def test_path_single(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='r',
path=['1'])
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/r/generic/1')
def test_path_multiple(self):
c = client.ContentForShoppingClient(account_id='123')
uri = c._create_uri(account_id=None, projection=None, resource='r',
path=['1', '2'])
self.assertEqual(uri,
'https://content.googleapis.com/content/v1/123/r/generic/1/2')
if __name__ == '__main__':
unittest.main()
|
jrrembert/django
|
refs/heads/master
|
django/contrib/admin/templatetags/log.py
|
499
|
from django import template
from django.contrib.admin.models import LogEntry
register = template.Library()
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is None:
entries = LogEntry.objects.all()
else:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].pk
entries = LogEntry.objects.filter(user__pk=user_id)
context[self.varname] = entries.select_related('content_type', 'user')[:int(self.limit)]
return ''
@register.tag
def get_admin_log(parser, token):
"""
Populates a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError(
"'get_admin_log' statements require two arguments")
if not tokens[1].isdigit():
raise template.TemplateSyntaxError(
"First argument to 'get_admin_log' must be an integer")
if tokens[2] != 'as':
raise template.TemplateSyntaxError(
"Second argument to 'get_admin_log' must be 'as'")
if len(tokens) > 4:
if tokens[4] != 'for_user':
raise template.TemplateSyntaxError(
"Fourth argument to 'get_admin_log' must be 'for_user'")
return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(tokens[5] if len(tokens) > 5 else None))
|
nitzmahone/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/lambda.py
|
12
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda
short_description: Manage AWS Lambda functions
description:
- Allows for the management of Lambda functions.
version_added: '2.2'
requirements: [ boto3 ]
options:
name:
description:
- The name you want to assign to the function you are uploading. Cannot be changed.
required: true
state:
description:
- Create or delete Lambda function
default: present
choices: [ 'present', 'absent' ]
runtime:
description:
- The runtime environment for the Lambda function you are uploading. Required when creating a function. Use parameters as described in boto3 docs.
Current example runtime environments are nodejs, nodejs4.3, java8 or python2.7
- Required when C(state=present)
role:
description:
- The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS)
resources. You may use the bare ARN if the role belongs to the same AWS account.
- Required when C(state=present)
handler:
description:
- The function within your code that Lambda calls to begin execution
zip_file:
description:
- A .zip file containing your deployment package
- If C(state=present) then either zip_file or s3_bucket must be present.
aliases: [ 'src' ]
s3_bucket:
description:
- Amazon S3 bucket name where the .zip file containing your deployment package is stored
- If C(state=present) then either zip_file or s3_bucket must be present.
- s3_bucket and s3_key are required together
s3_key:
description:
- The Amazon S3 object (the deployment package) key name you want to upload
- s3_bucket and s3_key are required together
s3_object_version:
description:
- The Amazon S3 object (the deployment package) version you want to upload.
description:
description:
- A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.
timeout:
description:
- The function maximum execution time in seconds after which Lambda should terminate the function.
default: 3
memory_size:
description:
- The amount of memory, in MB, your Lambda function is given
default: 128
vpc_subnet_ids:
description:
- List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run
the function in a VPC.
vpc_security_group_ids:
description:
- List of VPC security group IDs to associate with the Lambda function. Required when vpc_subnet_ids is used.
environment_variables:
description:
- A dictionary of environment variables the Lambda function is given.
aliases: [ 'environment' ]
version_added: "2.3"
dead_letter_arn:
description:
- The parent object that contains the target Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
version_added: "2.3"
tags:
description:
- tag dict to apply to the function (requires botocore 1.5.40 or above)
version_added: "2.5"
author:
- 'Steyn Huizinga (@steynovich)'
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create Lambda functions
- name: looped creation
lambda:
name: '{{ item.name }}'
state: present
zip_file: '{{ item.zip_file }}'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
handler: 'hello_python.my_handler'
vpc_subnet_ids:
- subnet-123abcde
- subnet-edcba321
vpc_security_group_ids:
- sg-123abcde
- sg-edcba321
environment_variables: '{{ item.env_vars }}'
tags:
key1: 'value1'
loop:
- name: HelloWorld
zip_file: hello-code.zip
env_vars:
key1: "first"
key2: "second"
- name: ByeBye
zip_file: bye-code.zip
env_vars:
key1: "1"
key2: "2"
# To remove previously added tags pass a empty dict
- name: remove tags
lambda:
name: 'Lambda function'
state: present
zip_file: 'code.zip'
runtime: 'python2.7'
role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
handler: 'hello_python.my_handler'
tags: {}
# Basic Lambda function deletion
- name: Delete Lambda functions HelloWorld and ByeBye
lambda:
name: '{{ item }}'
state: absent
loop:
- HelloWorld
- ByeBye
'''
RETURN = '''
code:
description: the lambda function location returned by get_function in boto3
returned: success
type: dict
sample:
{
'location': 'a presigned S3 URL',
'repository_type': 'S3',
}
configuration:
description: the lambda function metadata returned by get_function in boto3
returned: success
type: dict
sample:
{
'code_sha256': 'SHA256 hash',
'code_size': 123,
'description': 'My function',
'environment': {
'variables': {
'key': 'value'
}
},
'function_arn': 'arn:aws:lambda:us-east-1:123456789012:function:myFunction:1',
'function_name': 'myFunction',
'handler': 'index.handler',
'last_modified': '2017-08-01T00:00:00.000+0000',
'memory_size': 128,
'role': 'arn:aws:iam::123456789012:role/lambda_basic_execution',
'runtime': 'nodejs6.10',
'timeout': 3,
'version': '1',
'vpc_config': {
'security_group_ids': [],
'subnet_ids': []
}
}
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict
from ansible.module_utils.ec2 import compare_aws_tags
import base64
import hashlib
import traceback
try:
from botocore.exceptions import ClientError, BotoCoreError, ValidationError, ParamValidationError
except ImportError:
pass # protected by AnsibleAWSModule
def get_account_id(module, region=None, endpoint=None, **aws_connect_kwargs):
"""return the account id we are currently working on
get_account_id tries too find out the account that we are working
on. It's not guaranteed that this will be easy so we try in
several different ways. Giving either IAM or STS privilages to
the account should be enough to permit this.
"""
account_id = None
try:
sts_client = boto3_conn(module, conn_type='client', resource='sts',
region=region, endpoint=endpoint, **aws_connect_kwargs)
account_id = sts_client.get_caller_identity().get('Account')
except ClientError:
try:
iam_client = boto3_conn(module, conn_type='client', resource='iam',
region=region, endpoint=endpoint, **aws_connect_kwargs)
account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
except ClientError as e:
if (e.response['Error']['Code'] == 'AccessDenied'):
except_msg = to_native(e.message)
account_id = except_msg.search(r"arn:aws:iam::([0-9]{12,32}):\w+/").group(1)
if account_id is None:
module.fail_json_aws(e, msg="getting account information")
except Exception as e:
module.fail_json_aws(e, msg="getting account information")
return account_id
def get_current_function(connection, function_name, qualifier=None):
try:
if qualifier is not None:
return connection.get_function(FunctionName=function_name, Qualifier=qualifier)
return connection.get_function(FunctionName=function_name)
except ClientError as e:
try:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return None
except (KeyError, AttributeError):
pass
raise e
def sha256sum(filename):
hasher = hashlib.sha256()
with open(filename, 'rb') as f:
hasher.update(f.read())
code_hash = hasher.digest()
code_b64 = base64.b64encode(code_hash)
hex_digest = code_b64.decode('utf-8')
return hex_digest
def set_tag(client, module, tags, function):
if not hasattr(client, "list_tags"):
module.fail_json(msg="Using tags requires botocore 1.5.40 or above")
changed = False
arn = function['Configuration']['FunctionArn']
try:
current_tags = client.list_tags(Resource=arn).get('Tags', {})
except ClientError as e:
module.fail_json(msg="Unable to list tags: {0}".format(to_native(e)),
exception=traceback.format_exc())
tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=True)
try:
if tags_to_remove:
client.untag_resource(
Resource=arn,
TagKeys=tags_to_remove
)
changed = True
if tags_to_add:
client.tag_resource(
Resource=arn,
Tags=tags_to_add
)
changed = True
except ClientError as e:
module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
to_native(e)), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
module.fail_json(msg="Unable to tag resource {0}: {1}".format(arn,
to_native(e)), exception=traceback.format_exc())
return changed
def main():
argument_spec = dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
runtime=dict(),
role=dict(),
handler=dict(),
zip_file=dict(aliases=['src']),
s3_bucket=dict(),
s3_key=dict(),
s3_object_version=dict(),
description=dict(default=''),
timeout=dict(type='int', default=3),
memory_size=dict(type='int', default=128),
vpc_subnet_ids=dict(type='list'),
vpc_security_group_ids=dict(type='list'),
environment_variables=dict(type='dict'),
dead_letter_arn=dict(),
tags=dict(type='dict'),
)
mutually_exclusive = [['zip_file', 's3_key'],
['zip_file', 's3_bucket'],
['zip_file', 's3_object_version']]
required_together = [['s3_key', 's3_bucket'],
['vpc_subnet_ids', 'vpc_security_group_ids']]
required_if = [['state', 'present', ['runtime', 'handler', 'role']]]
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_if=required_if)
name = module.params.get('name')
state = module.params.get('state').lower()
runtime = module.params.get('runtime')
role = module.params.get('role')
handler = module.params.get('handler')
s3_bucket = module.params.get('s3_bucket')
s3_key = module.params.get('s3_key')
s3_object_version = module.params.get('s3_object_version')
zip_file = module.params.get('zip_file')
description = module.params.get('description')
timeout = module.params.get('timeout')
memory_size = module.params.get('memory_size')
vpc_subnet_ids = module.params.get('vpc_subnet_ids')
vpc_security_group_ids = module.params.get('vpc_security_group_ids')
environment_variables = module.params.get('environment_variables')
dead_letter_arn = module.params.get('dead_letter_arn')
tags = module.params.get('tags')
check_mode = module.check_mode
changed = False
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg='region must be specified')
try:
client = boto3_conn(module, conn_type='client', resource='lambda',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (ClientError, ValidationError) as e:
module.fail_json_aws(e, msg="Trying to connect to AWS")
if state == 'present':
if role.startswith('arn:aws:iam'):
role_arn = role
else:
# get account ID and assemble ARN
account_id = get_account_id(module, region=region, endpoint=ec2_url, **aws_connect_kwargs)
role_arn = 'arn:aws:iam::{0}:role/{1}'.format(account_id, role)
# Get function configuration if present, False otherwise
current_function = get_current_function(client, name)
# Update existing Lambda function
if state == 'present' and current_function:
# Get current state
current_config = current_function['Configuration']
current_version = None
# Update function configuration
func_kwargs = {'FunctionName': name}
# Update configuration if needed
if role_arn and current_config['Role'] != role_arn:
func_kwargs.update({'Role': role_arn})
if handler and current_config['Handler'] != handler:
func_kwargs.update({'Handler': handler})
if description and current_config['Description'] != description:
func_kwargs.update({'Description': description})
if timeout and current_config['Timeout'] != timeout:
func_kwargs.update({'Timeout': timeout})
if memory_size and current_config['MemorySize'] != memory_size:
func_kwargs.update({'MemorySize': memory_size})
if (environment_variables is not None) and (current_config.get(
'Environment', {}).get('Variables', {}) != environment_variables):
func_kwargs.update({'Environment': {'Variables': environment_variables}})
if dead_letter_arn is not None:
if current_config.get('DeadLetterConfig'):
if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn:
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
else:
if dead_letter_arn != "":
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
# Check for unsupported mutation
if current_config['Runtime'] != runtime:
module.fail_json(msg='Cannot change runtime. Please recreate the function')
# If VPC configuration is desired
if vpc_subnet_ids or vpc_security_group_ids:
if not vpc_subnet_ids or not vpc_security_group_ids:
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
if 'VpcConfig' in current_config:
# Compare VPC config with current config
current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds']
current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds']
subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids)
vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)
if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed:
new_vpc_config = {'SubnetIds': vpc_subnet_ids,
'SecurityGroupIds': vpc_security_group_ids}
func_kwargs.update({'VpcConfig': new_vpc_config})
else:
# No VPC configuration is desired, assure VPC config is empty when present in current config
if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'):
func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}})
# Upload new configuration if configuration has changed
if len(func_kwargs) > 1:
try:
if not check_mode:
response = client.update_function_configuration(**func_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to update lambda configuration")
# Update code configuration
code_kwargs = {'FunctionName': name, 'Publish': True}
# Update S3 location
if s3_bucket and s3_key:
# If function is stored on S3 always update
code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})
# If S3 Object Version is given
if s3_object_version:
code_kwargs.update({'S3ObjectVersion': s3_object_version})
# Compare local checksum, update remote code when different
elif zip_file:
local_checksum = sha256sum(zip_file)
remote_checksum = current_config['CodeSha256']
# Only upload new code when local code is different compared to the remote code
if local_checksum != remote_checksum:
try:
with open(zip_file, 'rb') as f:
encoded_zip = f.read()
code_kwargs.update({'ZipFile': encoded_zip})
except IOError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
# Tag Function
if tags is not None:
if set_tag(client, module, tags, current_function):
changed = True
# Upload new code if needed (e.g. code checksum has changed)
if len(code_kwargs) > 2:
try:
if not check_mode:
response = client.update_function_code(**code_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to upload new code")
# Describe function code and configuration
response = get_current_function(client, name, qualifier=current_version)
if not response:
module.fail_json(msg='Unable to get function information after updating')
# We're done
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
# Function doesn't exists, create new Lambda function
elif state == 'present':
if s3_bucket and s3_key:
# If function is stored on S3
code = {'S3Bucket': s3_bucket,
'S3Key': s3_key}
if s3_object_version:
code.update({'S3ObjectVersion': s3_object_version})
elif zip_file:
# If function is stored in local zipfile
try:
with open(zip_file, 'rb') as f:
zip_content = f.read()
code = {'ZipFile': zip_content}
except IOError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
else:
module.fail_json(msg='Either S3 object or path to zipfile required')
func_kwargs = {'FunctionName': name,
'Publish': True,
'Runtime': runtime,
'Role': role_arn,
'Code': code,
'Timeout': timeout,
'MemorySize': memory_size,
}
if description is not None:
func_kwargs.update({'Description': description})
if handler is not None:
func_kwargs.update({'Handler': handler})
if environment_variables:
func_kwargs.update({'Environment': {'Variables': environment_variables}})
if dead_letter_arn:
func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
# If VPC configuration is given
if vpc_subnet_ids or vpc_security_group_ids:
if not vpc_subnet_ids or not vpc_security_group_ids:
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
'SecurityGroupIds': vpc_security_group_ids}})
# Finally try to create function
current_version = None
try:
if not check_mode:
response = client.create_function(**func_kwargs)
current_version = response['Version']
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to create function")
# Tag Function
if tags is not None:
if set_tag(client, module, tags, get_current_function(client, name)):
changed = True
response = get_current_function(client, name, qualifier=current_version)
if not response:
module.fail_json(msg='Unable to get function information after creating')
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
# Delete existing Lambda function
if state == 'absent' and current_function:
try:
if not check_mode:
client.delete_function(FunctionName=name)
changed = True
except (ParamValidationError, ClientError) as e:
module.fail_json_aws(e, msg="Trying to delete Lambda function")
module.exit_json(changed=changed)
# Function already absent, do nothing
elif state == 'absent':
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
|
allenlavoie/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/sparse_grad.py
|
7
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in sparse_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
# TODO(b/31222613): This op may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable("SparseAddGrad")
ops.NotDifferentiable("SparseConcat")
ops.NotDifferentiable("SparseToDense")
@ops.RegisterGradient("SparseReorder")
def _SparseReorderGrad(op, unused_output_indices_grad, output_values_grad):
"""Gradients for the SparseReorder op.
Args:
op: the SparseReorder op
unused_output_indices_grad: the incoming gradients of the output indices
output_values_grad: the incoming gradients of the output values
Returns:
Gradient for each of the 3 input tensors:
(input_indices, input_values, input_shape)
The gradients for input_indices and input_shape is None.
"""
input_indices = op.inputs[0]
input_shape = op.inputs[2]
num_entries = array_ops.shape(input_indices)[0]
entry_indices = math_ops.range(num_entries)
sp_unordered = sparse_tensor.SparseTensor(
input_indices, entry_indices, input_shape)
sp_ordered = sparse_ops.sparse_reorder(sp_unordered)
inverted_permutation = array_ops.invert_permutation(sp_ordered.values)
return (None,
array_ops.gather(output_values_grad, inverted_permutation),
None)
@ops.RegisterGradient("SparseAdd")
def _SparseAddGrad(op, *grads):
"""The backward operator for the SparseAdd op.
The SparseAdd op calculates A + B, where A, B, and the sum are all represented
as `SparseTensor` objects. This op takes in the upstream gradient w.r.t.
non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
values of A and B.
Args:
op: the SparseAdd op
*grads: the incoming gradients, one element per output of `op`
Returns:
Gradient for each of the 6 input tensors of SparseAdd:
(a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh)
The gradients for the indices, shapes, and the threshold are None.
"""
val_grad = grads[1]
a_indices = op.inputs[0]
b_indices = op.inputs[3]
sum_indices = op.outputs[0]
# NOTE: we do not need to take `thresh` into account, since it simply affects
# the non-zero elements of the sum, and we will peek into `sum_indices` in the
# gradient op.
a_val_grad, b_val_grad = gen_sparse_ops.sparse_add_grad(
val_grad, a_indices, b_indices, sum_indices)
a_val_grad.set_shape(op.inputs[1].get_shape())
b_val_grad.set_shape(op.inputs[4].get_shape())
# (a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh)
return (None, a_val_grad, None, None, b_val_grad, None, None)
@ops.RegisterGradient("SparseTensorDenseAdd")
def _SparseTensorDenseAddGrad(op, out_grad):
sp_indices = op.inputs[0]
# (sparse_indices, sparse_values, sparse_shape, dense)
return (None, array_ops.gather_nd(out_grad, sp_indices), None, out_grad)
@ops.RegisterGradient("SparseReduceSum")
def _SparseReduceSumGrad(op, out_grad):
"""Similar to gradient for the Sum Op (i.e. tf.reduce_sum())."""
sp_indices = op.inputs[0]
sp_shape = op.inputs[2]
output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3])
out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims)
scale = sp_shape // math_ops.to_int64(output_shape_kept_dims)
# (sparse_indices, sparse_values, sparse_shape, reduction_axes)
return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale),
None, None)
@ops.RegisterGradient("SparseTensorDenseMatMul")
def _SparseTensorDenseMatMulGrad(op, grad):
"""Gradients for the dense tensor in the SparseTensorDenseMatMul op.
If either input is complex, no gradient is provided.
Args:
op: the SparseTensorDenseMatMul op
grad: the incoming gradient
Returns:
Gradient for each of the 4 input tensors:
(sparse_indices, sparse_values, sparse_shape, dense_tensor)
The gradients for indices and shape are None.
Raises:
TypeError: When the two operands don't have the same type.
"""
a_indices, a_values, a_shape = op.inputs[:3]
b = op.inputs[3]
adj_a = op.get_attr("adjoint_a")
adj_b = op.get_attr("adjoint_b")
a_type = a_values.dtype.base_dtype
b_type = b.dtype.base_dtype
if a_type != b_type:
raise TypeError("SparseTensorDenseMatMul op received operands with "
"different types: ", a_type, " and ", b_type)
if a_type in (ops.dtypes.complex64, ops.dtypes.complex128):
raise NotImplementedError("SparseTensorDenseMatMul op does not support "
"complex gradients.")
# gradient w.r.t. dense
b_grad = gen_sparse_ops.sparse_tensor_dense_mat_mul(
a_indices, a_values, a_shape, grad, adjoint_a=not adj_a)
if adj_b:
b_grad = array_ops.transpose(b_grad)
# gradient w.r.t. sparse values
rows = a_indices[:, 0]
cols = a_indices[:, 1]
# TODO(zongheng, ebrevdo): add conjugates in the right places when complex
# values are allowed.
# TODO(zongheng): these gather calls could potentially duplicate rows/cols in
# memory. If there is a need, we should look into implementing this more
# intelligently to avoid duplicating data.
parts_a = array_ops.gather(grad, rows if not adj_a else cols)
parts_b = array_ops.gather(b if not adj_b else array_ops.transpose(b),
cols if not adj_a else rows)
a_values_grad = math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1)
# gradients w.r.t. (a_indices, a_values, a_shape, b)
return (None, a_values_grad, None, b_grad)
@ops.RegisterGradient("SparseDenseCwiseAdd")
def _SparseDenseCwiseAddGrad(unused_op, unused_grad):
raise NotImplementedError("Gradient for SparseDenseCwiseAdd is currently not"
" implemented yet.")
def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
"""Common code for SparseDenseCwise{Mul,Div} gradients."""
x_indices = op.inputs[0]
x_shape = op.inputs[2]
y = op.inputs[3]
y_shape = math_ops.to_int64(array_ops.shape(y))
num_added_dims = array_ops.expand_dims(
array_ops.size(x_shape) - array_ops.size(y_shape), 0)
augmented_y_shape = array_ops.concat(
[array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)
scaling = x_shape // augmented_y_shape
scaled_indices = x_indices // scaling
scaled_indices = array_ops.slice(scaled_indices,
array_ops.concat([[0], num_added_dims], 0),
[-1, -1])
dense_vals = array_ops.gather_nd(y, scaled_indices)
if is_mul:
dx = grad * dense_vals
dy_val = grad * op.inputs[1]
else:
dx = grad / dense_vals
dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
# indices can repeat after scaling, so we can't use sparse_to_dense().
dy = sparse_ops.sparse_add(
array_ops.zeros_like(y),
sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))
# (sp_indices, sp_vals, sp_shape, dense)
return (None, dx, None, dy)
@ops.RegisterGradient("SparseDenseCwiseMul")
def _SparseDenseCwiseMulGrad(op, grad):
"""Gradients for SparseDenseCwiseMul."""
return _SparseDenseCwiseMulOrDivGrad(op, grad, True)
@ops.RegisterGradient("SparseDenseCwiseDiv")
def _SparseDenseCwiseDivGrad(op, grad):
"""Gradients for SparseDenseCwiseDiv."""
return _SparseDenseCwiseMulOrDivGrad(op, grad, False)
@ops.RegisterGradient("SparseSoftmax")
def _SparseSoftmaxGrad(op, grad):
"""Gradients for SparseSoftmax.
The calculation is the same as SoftmaxGrad:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
where we now only operate on the non-zero values present in the SparseTensors.
Args:
op: the SparseSoftmax op.
grad: the upstream gradient w.r.t. the non-zero SparseSoftmax output values.
Returns:
Gradients w.r.t. the input (sp_indices, sp_values, sp_shape).
"""
indices, shape = op.inputs[0], op.inputs[2]
out_vals = op.outputs[0]
sp_output = sparse_tensor.SparseTensor(indices, out_vals, shape)
sp_grad = sparse_tensor.SparseTensor(indices, grad, shape)
sp_product = sparse_tensor.SparseTensor(
indices, sp_output.values * sp_grad.values, shape)
# [..., B, 1], dense.
sum_reduced = -sparse_ops.sparse_reduce_sum(sp_product, [-1], keep_dims=True)
# sparse [..., B, C] + dense [..., B, 1] with broadcast; outputs sparse.
sp_sum = sparse_ops.sparse_dense_cwise_add(sp_grad, sum_reduced)
grad_x = sp_sum.values * sp_output.values
return [None, grad_x, None]
@ops.RegisterGradient("SparseSparseMaximum")
def _SparseSparseMaximumGrad(unused_op, unused_grad):
raise NotImplementedError("Gradient for SparseSparseMaximum is currently not"
" implemented yet.")
@ops.RegisterGradient("SparseSparseMinimum")
def _SparseSparseMinimumGrad(unused_op, unused_grad):
raise NotImplementedError("Gradient for SparseSparseMinimum is currently not"
" implemented yet.")
@ops.RegisterGradient("SparseFillEmptyRows")
def _SparseFillEmptyRowsGrad(op, unused_grad_output_indices, output_grad_values,
unused_grad_empty_row_indicator,
unused_grad_reverse_index_map):
"""Gradients for SparseFillEmptyRows."""
reverse_index_map = op.outputs[3]
d_values, d_default_value = gen_sparse_ops.sparse_fill_empty_rows_grad(
reverse_index_map=reverse_index_map, grad_values=output_grad_values)
# d_indices, d_values, d_dense_shape, d_default_value.
return [None, d_values, None, d_default_value]
|
DeveloperJose/Vision-Rat-Brain
|
refs/heads/master
|
feature_matching_v2/asift.py
|
1
|
# -*- coding: utf-8 -*-
# Affine SIFT (ASIFT)
# http://www.ipol.im/pub/algo/my_affine_sift/
# Currently unused in program
import numpy as np
import cv2
import sift
from multiprocessing.pool import ThreadPool
def extract_asift(img, mask=None, cpus=4, start=0, end=180, inc=72.0):
'''
affine_detect(detector, img, mask=None, pool=None) -> keypoints, descrs
Apply a set of affine transormations to the image, detect keypoints and
reproject them into initial image coordinates.
See http://www.ipol.im/pub/algo/my_affine_sift/ for the details.
ThreadPool object may be passed to speedup the computation.
'''
detector = sift.SIFT
params = [(1.0, 0.0)]
for t in 2**(0.5*np.arange(1,6)):
for phi in np.arange(start, end, inc / t):
params.append((t, phi))
def f(p):
t, phi = p
timg, tmask, Ai = __affine_skew(t, phi, img)
keypoints, descrs = detector.detectAndCompute(timg, tmask)
for kp in keypoints:
x, y = kp.pt
kp.pt = tuple( np.dot(Ai, (x, y, 1)) )
if descrs is None:
descrs = []
return keypoints, descrs
keypoints, descrs = [], []
pool = ThreadPool(cpus)
#if pool is None:
# ires = map(f, params)
#else:
ires = pool.map(f, params)
pool.close()
for i, (k, d) in enumerate(ires):
#logger.debug('affine sampling: %d / %d\r' % (i+1, len(params)))
keypoints.extend(k)
descrs.extend(d)
return keypoints, np.array(descrs)
def __affine_skew(tilt, phi, img, mask=None):
'''
affine_skew(tilt, phi, img, mask=None) -> skew_img, skew_mask, Ai
Ai - is an affine transform matrix from skew_img to img
'''
h, w = img.shape[:2]
if mask is None:
mask = np.zeros((h, w), np.uint8)
mask[:] = 255
A = np.float32([[1, 0, 0], [0, 1, 0]])
if phi != 0.0:
phi = np.deg2rad(phi)
s, c = np.sin(phi), np.cos(phi)
A = np.float32([[c,-s], [ s, c]])
corners = [[0, 0], [w, 0], [w, h], [0, h]]
tcorners = np.int32( np.dot(corners, A.T) )
x, y, w, h = cv2.boundingRect(tcorners.reshape(1,-1,2))
A = np.hstack([A, [[-x], [-y]]])
img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
if tilt != 1.0:
s = 0.8*np.sqrt(tilt*tilt-1)
img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
img = cv2.resize(img, (0, 0), fx=1.0/tilt, fy=1.0, interpolation=cv2.INTER_NEAREST)
A[0] /= tilt
if phi != 0.0 or tilt != 1.0:
h, w = img.shape[:2]
mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST)
Ai = cv2.invertAffineTransform(A)
return img, mask, Ai
|
tklovett/MaudeMiner
|
refs/heads/master
|
MaudeMiner/querier/__init__.py
|
1
|
from MaudeMiner import interactive
from MaudeMiner.querier import keyword_search
from MaudeMiner.querier import sql
def run(args):
interactive.start("querier", 1, commands)
commands = {
"keywords": keyword_search.run,
"sql": sql.run,
}
|
robbinfan/thirdparty
|
refs/heads/master
|
gmock-1.7.0/scripts/generator/cpp/ast.py
|
268
|
#!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparision.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparision.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter():
if default:
del default[0] # Remove flag.
end = type_modifiers[-1].end
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter()
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter()
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necesary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, templated_types, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
|
rburgoyne/repos-search
|
refs/heads/master
|
hook/changehandler.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class ReposSearchChangeHandler(object):
'''
Interface for indexing handlers, based on "svnlook changed" change log.
'''
def onRevisionBegin(self, rev):
pass
def onRevisionComplete(self, rev):
pass
def onBatchComplete(self):
'''
Called when the current indexing batch operation has completed.
Can be after a single revision or a range. Useful for commit/optimize etc.
Handlers may also use the revision counter to detect need for commit.
Commit, drop and optimize can be implemented independently or using ReposSolr.
'''
pass
def onStartOver(self):
'''
Called to order drop all indexed documents for the current repo.
Make sure it is only the current repo.
Dropping of an entire core's data is a manual admin operation.
'''
pass
def onOptimize(self):
'''
Called to recommend optimize on a core.
'''
pass
def onFolderCopyBegin(self, copyFromPath, path):
'''
Return False to skip handling folder added with copy-from info
as invocation of onAdd for all sub-items and the folder.
Note: may not be called if option foldercopy is 'nobranch' or 'no'.
Paths start with slash.
'''
pass
def onFolderCopyComplete(self, copyFromPath, path):
pass
def onFolderDeleteBegin(self, path):
'''
Called after the onDelete for the actual folder that svn operated on.
Return True to receive separate onDelete for every item in folder.
Return False to get only onDelete for the actual folder. I most schemas
recursive delete can be accomplished using a wildcard query when path.isFolder().
'''
pass
def onFolderDeleteComplete(self, path):
pass
# --- below are the actual item change events
def onAdd(self, path, copyFromPath):
'''
Invoked both for svn status A(dd) and R(eplace),
because R is not represented in svnlook.
For R onDelete is invoked followed by onAdd.
Path is the path from repository root, type ChangePath, starting with slash,
ending with slash for folders but not for files.
CopyFromPath is None unless add is a copy. For contents of folder copy
it is also None because only the folder entry is a real svn copy.
Note that files and folders may be adde svn properties,
but onChangeProps will never be called for Add.
'''
pass
def onChange(self, path, hasPropChanges):
'''
Invoked for 'U'. If hasPropChanges is True this call will be
followed by onChangeProps for the same item.
'''
pass
def onChangeProps(self, path):
'''
Invoked for 'UU' and '_U'. For the former it is called after onChange.
'''
pass
def onDelete(self, path):
'''
Invoked for 'D' and each item in deleted folder.
'''
pass
|
raajitr/django_hangman
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/contrib/gis/db/backends/oracle/adapter.py
|
273
|
from cx_Oracle import CLOB
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.geos import GeometryCollection, Polygon
from django.utils.six.moves import range
class OracleSpatialAdapter(WKTAdapter):
input_size = CLOB
def __init__(self, geom):
"""
Oracle requires that polygon rings are in proper orientation. This
affects spatial operations and an invalid orientation may cause
failures. Correct orientations are:
* Outer ring - counter clockwise
* Inner ring(s) - clockwise
"""
if isinstance(geom, Polygon):
self._fix_polygon(geom)
elif isinstance(geom, GeometryCollection):
self._fix_geometry_collection(geom)
self.wkt = geom.wkt
self.srid = geom.srid
def _fix_polygon(self, poly):
# Fix single polygon orientation as described in __init__()
if self._isClockwise(poly.exterior_ring):
poly.exterior_ring = list(reversed(poly.exterior_ring))
for i in range(1, len(poly)):
if not self._isClockwise(poly[i]):
poly[i] = list(reversed(poly[i]))
return poly
def _fix_geometry_collection(self, coll):
# Fix polygon orientations in geometry collections as described in
# __init__()
for i, geom in enumerate(coll):
if isinstance(geom, Polygon):
coll[i] = self._fix_polygon(geom)
def _isClockwise(self, coords):
# A modified shoelace algorithm to determine polygon orientation.
# See https://en.wikipedia.org/wiki/Shoelace_formula
n = len(coords)
area = 0.0
for i in range(n):
j = (i + 1) % n
area += coords[i][0] * coords[j][1]
area -= coords[j][0] * coords[i][1]
return area < 0.0
|
HyperloopTeam/FullOpenMDAO
|
refs/heads/master
|
lib/python2.7/site-packages/pycrypto-2.3-py2.7-macosx-10.10-x86_64.egg/Crypto/Random/OSRNG/rng_base.py
|
11
|
#
# Random/OSRNG/rng_base.py : Base class for OSRNG
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
from Crypto.Util.python_compat import *
class BaseRNG(object):
def __init__(self):
self.closed = False
self._selftest()
def __del__(self):
self.close()
def _selftest(self):
# Test that urandom can return data
data = self.read(16)
if len(data) != 16:
raise AssertionError("read truncated")
# Test that we get different data every time (if we don't, the RNG is
# probably malfunctioning)
data2 = self.read(16)
if data == data2:
raise AssertionError("OS RNG returned duplicate data")
# PEP 343: Support for the "with" statement
def __enter__(self):
pass
def __exit__(self):
"""PEP 343 support"""
self.close()
def close(self):
if not self.closed:
self._close()
self.closed = True
def flush(self):
pass
def read(self, N=-1):
"""Return N bytes from the RNG."""
if self.closed:
raise ValueError("I/O operation on closed file")
if not isinstance(N, (long, int)):
raise TypeError("an integer is required")
if N < 0:
raise ValueError("cannot read to end of infinite stream")
elif N == 0:
return ""
data = self._read(N)
if len(data) != N:
raise AssertionError("%s produced truncated output (requested %d, got %d)" % (self.name, N, len(data)))
return data
def _close(self):
raise NotImplementedError("child class must implement this")
def _read(self, N):
raise NotImplementedError("child class must implement this")
# vim:set ts=4 sw=4 sts=4 expandtab:
|
chinmaygarde/depot_tools
|
refs/heads/master
|
third_party/protobuf26/internal/cpp_message.py
|
26
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains helper functions used to create protocol message classes from
Descriptor objects at runtime backed by the protocol buffer C++ API.
"""
__author__ = 'petar@google.com (Petar Petrov)'
import copy_reg
import operator
from protobuf26.internal import _net_proto2___python
from protobuf26.internal import enum_type_wrapper
from protobuf26 import message
_LABEL_REPEATED = _net_proto2___python.LABEL_REPEATED
_LABEL_OPTIONAL = _net_proto2___python.LABEL_OPTIONAL
_CPPTYPE_MESSAGE = _net_proto2___python.CPPTYPE_MESSAGE
_TYPE_MESSAGE = _net_proto2___python.TYPE_MESSAGE
def GetDescriptorPool():
"""Creates a new DescriptorPool C++ object."""
return _net_proto2___python.NewCDescriptorPool()
_pool = GetDescriptorPool()
def GetFieldDescriptor(full_field_name):
"""Searches for a field descriptor given a full field name."""
return _pool.FindFieldByName(full_field_name)
def BuildFile(content):
"""Registers a new proto file in the underlying C++ descriptor pool."""
_net_proto2___python.BuildFile(content)
def GetExtensionDescriptor(full_extension_name):
"""Searches for extension descriptor given a full field name."""
return _pool.FindExtensionByName(full_extension_name)
def NewCMessage(full_message_name):
"""Creates a new C++ protocol message by its name."""
return _net_proto2___python.NewCMessage(full_message_name)
def ScalarProperty(cdescriptor):
"""Returns a scalar property for the given descriptor."""
def Getter(self):
return self._cmsg.GetScalar(cdescriptor)
def Setter(self, value):
self._cmsg.SetScalar(cdescriptor, value)
return property(Getter, Setter)
def CompositeProperty(cdescriptor, message_type):
"""Returns a Python property the given composite field."""
def Getter(self):
sub_message = self._composite_fields.get(cdescriptor.name, None)
if sub_message is None:
cmessage = self._cmsg.NewSubMessage(cdescriptor)
sub_message = message_type._concrete_class(__cmessage=cmessage)
self._composite_fields[cdescriptor.name] = sub_message
return sub_message
return property(Getter)
class RepeatedScalarContainer(object):
"""Container for repeated scalar fields."""
__slots__ = ['_message', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor):
self._message = msg
self._cmsg = msg._cmsg
self._cfield_descriptor = cfield_descriptor
def append(self, value):
self._cmsg.AddRepeatedScalar(
self._cfield_descriptor, value)
def extend(self, sequence):
for element in sequence:
self.append(element)
def insert(self, key, value):
values = self[slice(None, None, None)]
values.insert(key, value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def remove(self, value):
values = self[slice(None, None, None)]
values.remove(value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __setitem__(self, key, value):
values = self[slice(None, None, None)]
values[key] = value
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __getitem__(self, key):
return self._cmsg.GetRepeatedScalar(self._cfield_descriptor, key)
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(self._cfield_descriptor, key)
def __len__(self):
return len(self[slice(None, None, None)])
def __eq__(self, other):
if self is other:
return True
if not operator.isSequenceType(other):
raise TypeError(
'Can only compare repeated scalar fields against sequences.')
# We are presumably comparing against some other sequence type.
return other == self[slice(None, None, None)]
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, *args, **kwargs):
# Maintain compatibility with the previous interface.
if 'sort_function' in kwargs:
kwargs['cmp'] = kwargs.pop('sort_function')
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor,
sorted(self, *args, **kwargs))
def RepeatedScalarProperty(cdescriptor):
"""Returns a Python property the given repeated scalar field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedScalarContainer(self, cdescriptor)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class RepeatedCompositeContainer(object):
"""Container for repeated composite fields."""
__slots__ = ['_message', '_subclass', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor, subclass):
self._message = msg
self._cmsg = msg._cmsg
self._subclass = subclass
self._cfield_descriptor = cfield_descriptor
def add(self, **kwargs):
cmessage = self._cmsg.AddMessage(self._cfield_descriptor)
return self._subclass(__cmessage=cmessage, __owner=self._message, **kwargs)
def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
for message in elem_seq:
self.add().MergeFrom(message)
def remove(self, value):
# TODO(protocol-devel): This is inefficient as it needs to generate a
# message pointer for each message only to do index(). Move this to a C++
# extension function.
self.__delitem__(self[slice(None, None, None)].index(value))
def MergeFrom(self, other):
for message in other[:]:
self.add().MergeFrom(message)
def __getitem__(self, key):
cmessages = self._cmsg.GetRepeatedMessage(
self._cfield_descriptor, key)
subclass = self._subclass
if not isinstance(cmessages, list):
return subclass(__cmessage=cmessages, __owner=self._message)
return [subclass(__cmessage=m, __owner=self._message) for m in cmessages]
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(
self._cfield_descriptor, key)
def __len__(self):
return self._cmsg.FieldLength(self._cfield_descriptor)
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
if not isinstance(other, self.__class__):
raise TypeError('Can only compare repeated composite fields against '
'other repeated composite fields.')
messages = self[slice(None, None, None)]
other_messages = other[slice(None, None, None)]
return messages == other_messages
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, cmp=None, key=None, reverse=False, **kwargs):
# Maintain compatibility with the old interface.
if cmp is None and 'sort_function' in kwargs:
cmp = kwargs.pop('sort_function')
# The cmp function, if provided, is passed the results of the key function,
# so we only need to wrap one of them.
if key is None:
index_key = self.__getitem__
else:
index_key = lambda i: key(self[i])
# Sort the list of current indexes by the underlying object.
indexes = range(len(self))
indexes.sort(cmp=cmp, key=index_key, reverse=reverse)
# Apply the transposition.
for dest, src in enumerate(indexes):
if dest == src:
continue
self._cmsg.SwapRepeatedFieldElements(self._cfield_descriptor, dest, src)
# Don't swap the same value twice.
indexes[src] = src
def RepeatedCompositeProperty(cdescriptor, message_type):
"""Returns a Python property for the given repeated composite field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedCompositeContainer(
self, cdescriptor, message_type._concrete_class)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class ExtensionDict(object):
"""Extension dictionary added to each protocol message."""
def __init__(self, msg):
self._message = msg
self._cmsg = msg._cmsg
self._values = {}
def __setitem__(self, extension, value):
from protobuf26 import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_OPTIONAL or
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
raise TypeError('Extension %r is repeated and/or a composite type.' % (
extension.full_name,))
self._cmsg.SetScalar(cdescriptor, value)
self._values[extension] = value
def __getitem__(self, extension):
from protobuf26 import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type != _CPPTYPE_MESSAGE):
return self._cmsg.GetScalar(cdescriptor)
ext = self._values.get(extension, None)
if ext is not None:
return ext
ext = self._CreateNewHandle(extension)
self._values[extension] = ext
return ext
def ClearExtension(self, extension):
from protobuf26 import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
self._cmsg.ClearFieldByDescriptor(extension._cdescriptor)
if extension in self._values:
del self._values[extension]
def HasExtension(self, extension):
from protobuf26 import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
return self._cmsg.HasFieldByDescriptor(extension._cdescriptor)
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._message._extensions_by_name.get(name, None)
def _CreateNewHandle(self, extension):
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
cmessage = self._cmsg.NewSubMessage(cdescriptor)
return extension.message_type._concrete_class(__cmessage=cmessage)
if cdescriptor.label == _LABEL_REPEATED:
if cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
return RepeatedCompositeContainer(
self._message, cdescriptor, extension.message_type._concrete_class)
else:
return RepeatedScalarContainer(self._message, cdescriptor)
# This shouldn't happen!
assert False
return None
def NewMessage(bases, message_descriptor, dictionary):
"""Creates a new protocol message *class*."""
_AddClassAttributesForNestedExtensions(message_descriptor, dictionary)
_AddEnumValues(message_descriptor, dictionary)
_AddDescriptors(message_descriptor, dictionary)
return bases
def InitMessage(message_descriptor, cls):
"""Constructs a new message instance (called before instance's __init__)."""
cls._extensions_by_name = {}
_AddInitMethod(message_descriptor, cls)
_AddMessageMethods(message_descriptor, cls)
_AddPropertiesForExtensions(message_descriptor, cls)
copy_reg.pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
def _AddDescriptors(message_descriptor, dictionary):
"""Sets up a new protocol message class dictionary.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__descriptors'] = {}
for field in message_descriptor.fields:
dictionary['__descriptors'][field.name] = GetFieldDescriptor(
field.full_name)
dictionary['__slots__'] = list(dictionary['__descriptors'].iterkeys()) + [
'_cmsg', '_owner', '_composite_fields', 'Extensions', '_HACK_REFCOUNTS']
def _AddEnumValues(message_descriptor, dictionary):
"""Sets class-level attributes for all enum fields defined in this message.
Args:
message_descriptor: Descriptor object for this message type.
dictionary: Class dictionary that should be populated.
"""
for enum_type in message_descriptor.enum_types:
dictionary[enum_type.name] = enum_type_wrapper.EnumTypeWrapper(enum_type)
for enum_value in enum_type.values:
dictionary[enum_value.name] = enum_value.number
def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary):
"""Adds class attributes for the nested extensions."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
# Create and attach message field properties to the message class.
# This can be done just once per message class, since property setters and
# getters are passed the message instance.
# This makes message instantiation extremely fast, and at the same time it
# doesn't require the creation of property objects for each message instance,
# which saves a lot of memory.
for field in message_descriptor.fields:
field_cdescriptor = cls.__descriptors[field.name]
if field.label == _LABEL_REPEATED:
if field.cpp_type == _CPPTYPE_MESSAGE:
value = RepeatedCompositeProperty(field_cdescriptor, field.message_type)
else:
value = RepeatedScalarProperty(field_cdescriptor)
elif field.cpp_type == _CPPTYPE_MESSAGE:
value = CompositeProperty(field_cdescriptor, field.message_type)
else:
value = ScalarProperty(field_cdescriptor)
setattr(cls, field.name, value)
# Attach a constant with the field number.
constant_name = field.name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, field.number)
def Init(self, **kwargs):
"""Message constructor."""
cmessage = kwargs.pop('__cmessage', None)
if cmessage:
self._cmsg = cmessage
else:
self._cmsg = NewCMessage(message_descriptor.full_name)
# Keep a reference to the owner, as the owner keeps a reference to the
# underlying protocol buffer message.
owner = kwargs.pop('__owner', None)
if owner:
self._owner = owner
if message_descriptor.is_extendable:
self.Extensions = ExtensionDict(self)
else:
# Reference counting in the C++ code is broken and depends on
# the Extensions reference to keep this object alive during unit
# tests (see b/4856052). Remove this once b/4945904 is fixed.
self._HACK_REFCOUNTS = self
self._composite_fields = {}
for field_name, field_value in kwargs.iteritems():
field_cdescriptor = self.__descriptors.get(field_name, None)
if not field_cdescriptor:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field_cdescriptor.label == _LABEL_REPEATED:
if field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
field_name = getattr(self, field_name)
for val in field_value:
field_name.add().MergeFrom(val)
else:
getattr(self, field_name).extend(field_value)
elif field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
getattr(self, field_name).MergeFrom(field_value)
else:
setattr(self, field_name, field_value)
Init.__module__ = None
Init.__doc__ = None
cls.__init__ = Init
def _IsMessageSetExtension(field):
"""Checks if a field is a message set extension."""
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _LABEL_OPTIONAL)
def _AddMessageMethods(message_descriptor, cls):
"""Adds the methods to a protocol message class."""
if message_descriptor.is_extendable:
def ClearExtension(self, extension):
self.Extensions.ClearExtension(extension)
def HasExtension(self, extension):
return self.Extensions.HasExtension(extension)
def HasField(self, field_name):
return self._cmsg.HasField(field_name)
def ClearField(self, field_name):
child_cmessage = None
if field_name in self._composite_fields:
child_field = self._composite_fields[field_name]
del self._composite_fields[field_name]
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
child_cmessage = child_field._cmsg
if child_cmessage is not None:
self._cmsg.ClearField(field_name, child_cmessage)
else:
self._cmsg.ClearField(field_name)
def Clear(self):
cmessages_to_release = []
for field_name, child_field in self._composite_fields.iteritems():
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
cmessages_to_release.append((child_cdescriptor, child_field._cmsg))
self._composite_fields.clear()
self._cmsg.Clear(cmessages_to_release)
def IsInitialized(self, errors=None):
if self._cmsg.IsInitialized():
return True
if errors is not None:
errors.extend(self.FindInitializationErrors());
return False
def SerializeToString(self):
if not self.IsInitialized():
raise message.EncodeError(
'Message %s is missing required fields: %s' % (
self._cmsg.full_name, ','.join(self.FindInitializationErrors())))
return self._cmsg.SerializeToString()
def SerializePartialToString(self):
return self._cmsg.SerializePartialToString()
def ParseFromString(self, serialized):
self.Clear()
self.MergeFromString(serialized)
def MergeFromString(self, serialized):
byte_size = self._cmsg.MergeFromString(serialized)
if byte_size < 0:
raise message.DecodeError('Unable to merge from string.')
return byte_size
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
"expected %s got %s." % (cls.__name__, type(msg).__name__))
self._cmsg.MergeFrom(msg._cmsg)
def CopyFrom(self, msg):
self._cmsg.CopyFrom(msg._cmsg)
def ByteSize(self):
return self._cmsg.ByteSize()
def SetInParent(self):
return self._cmsg.SetInParent()
def ListFields(self):
all_fields = []
field_list = self._cmsg.ListFields()
fields_by_name = cls.DESCRIPTOR.fields_by_name
for is_extension, field_name in field_list:
if is_extension:
extension = cls._extensions_by_name[field_name]
all_fields.append((extension, self.Extensions[extension]))
else:
field_descriptor = fields_by_name[field_name]
all_fields.append(
(field_descriptor, getattr(self, field_name)))
all_fields.sort(key=lambda item: item[0].number)
return all_fields
def FindInitializationErrors(self):
return self._cmsg.FindInitializationErrors()
def __str__(self):
return str(self._cmsg)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, self.__class__):
return False
return self.ListFields() == other.ListFields()
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __unicode__(self):
# Lazy import to prevent circular import when text_format imports this file.
from protobuf26 import text_format
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
# Attach the local methods to the message class.
for key, value in locals().copy().iteritems():
if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'):
setattr(cls, key, value)
# Static methods:
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
cls._extensions_by_name[extension_handle.full_name] = extension_handle
if _IsMessageSetExtension(extension_handle):
# MessageSet extension. Also register under type name.
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(string):
msg = cls()
msg.MergeFromString(string)
return msg
cls.FromString = staticmethod(FromString)
def _AddPropertiesForExtensions(message_descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.iteritems():
constant_name = extension_name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, extension_field.number)
|
aleontiev/dj
|
refs/heads/master
|
djay/blueprints/command/templates/{{app}}/management/commands/{{name}}.py
|
2
|
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "{{doc}}"
def handle(self, **options):
self.stdout.write("{{name}} called")
|
CMU-Robotics-Club/roboticsclub.org
|
refs/heads/master
|
robocrm/migrations/0046_auto_20150311_1727.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('robocrm', '0045_machine_toolbox_id'),
]
operations = [
migrations.AlterField(
model_name='machine',
name='toolbox_id',
field=models.PositiveIntegerField(default=None, blank=True, null=True, unique=True),
preserve_default=True,
),
]
|
lmazuel/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-network/azure/mgmt/network/v2016_12_01/operations/routes_operations.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class RoutesOperations(object):
"""RoutesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2016-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def _delete_initial(
self, resource_group_name, route_table_name, route_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, route_table_name, route_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'}
def get(
self, resource_group_name, route_table_name, route_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Route or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2016_12_01.models.Route or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Route', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'}
def _create_or_update_initial(
self, resource_group_name, route_table_name, route_name, route_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(route_parameters, 'Route')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Route', response)
if response.status_code == 201:
deserialized = self._deserialize('Route', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, route_table_name, route_name, route_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update
route operation.
:type route_parameters: ~azure.mgmt.network.v2016_12_01.models.Route
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Route or
ClientRawResponse<Route> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2016_12_01.models.Route]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2016_12_01.models.Route]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
route_parameters=route_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Route', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'}
def list(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, **operation_config):
"""Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Route
:rtype:
~azure.mgmt.network.v2016_12_01.models.RoutePaged[~azure.mgmt.network.v2016_12_01.models.Route]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RoutePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RoutePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'}
|
MartijnRas/cherrymusic
|
refs/heads/devel
|
cherrymusicserver/api/v1/users.py
|
3
|
# -*- coding: utf-8 -*- #
#
# CherryMusic - a standalone music server
# Copyright (c) 2012-2015 Tom Wallroth & Tilman Boerner
#
# Project page:
# http://fomori.org/cherrymusic/
# Sources on github:
# http://github.com/devsnd/cherrymusic/
#
# CherryMusic is based on
# jPlayer (GPL/MIT license) http://www.jplayer.org/
# CherryPy (BSD license) http://www.cherrypy.org/
#
# licensed under GNU GPL version 3 (or later)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
#python 2.6+ backward compability
from __future__ import unicode_literals
import cherrypy
from cherrymusicserver.api.v1.models import Model
from cherrymusicserver.api.v1.resources import Resource
def get_resource():
return users()
class User(Model):
name = Model.Field(None)
roles = Model.Field([])
_userdb = {
'adm': User(id=1, name='adm', roles=('admin' 'user')),
'puh': User(id=22, name='puh', roles=('user' 'bear')),
}
class users(Resource):
def GET(self, name=None):
if name:
if not name in _userdb:
raise cherrypy.NotFound(name)
return _userdb[name]
return sorted(_userdb)
|
dfolch/pysal
|
refs/heads/master
|
pysal/core/IOHandlers/geobugs_txt.py
|
15
|
import pysal
import os.path
import pysal.core.FileIO as FileIO
from pysal.weights import W
from warnings import warn
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>"
__all__ = ["GeoBUGSTextIO"]
class GeoBUGSTextIO(FileIO.FileIO):
"""
Opens, reads, and writes weights file objects in the text format
used in GeoBUGS. GeoBUGS generates a spatial weights matrix
as an R object and writes it out as an ASCII text representation of
the R object.
An exemplary GeoBUGS text file is as follows.
list([CARD],[ADJ],[WGT],[SUMNUMNEIGH])
where [CARD] and [ADJ] are required but the others are optional.
PySAL assumes [CARD] and [ADJ] always exist in an input text file.
It can read a GeoBUGS text file, even when its content is not written
in the order of [CARD], [ADJ], [WGT], and [SUMNUMNEIGH].
It always writes all of [CARD], [ADJ], [WGT], and [SUMNUMNEIGH].
PySAL does not apply text wrapping during file writing.
In the above example,
[CARD]:
num=c([a list of comma-splitted neighbor cardinalities])
[ADJ]:
adj=c([a list of comma-splitted neighbor IDs])
if caridnality is zero, neighbor IDs are skipped.
The ordering of observations is the same in both [CARD] and
[ADJ].
Neighbor IDs are record numbers starting from one.
[WGT]:
weights=c([a list of comma-splitted weights])
The restrictions for [ADJ] also apply to [WGT].
[SUMNUMNEIGH]:
sumNumNeigh=[The total number of neighbor pairs]
the total number of neighbor pairs is an integer
value and the same as the sum of neighbor cardinalities.
Notes
-----
For the files generated from R spdep nb2WB and dput function,
it is assumed that the value for the control parameter of dput function
is NULL. Please refer to R spdep nb2WB function help file.
References
----------
Thomas, A., Best, N., Lunn, D., Arnold, R., and Spiegelhalter, D.
(2004) GeoBUGS User Manual.
R spdep nb2WB function help file.
"""
FORMATS = ['geobugs_text']
MODES = ['r', 'w']
def __init__(self, *args, **kwargs):
args = args[:2]
FileIO.FileIO.__init__(self, *args, **kwargs)
self.file = open(self.dataPath, self.mode)
def read(self, n=-1):
"""
Reads GeoBUGS text file
Returns
-------
a pysal.weights.weights.W object
Examples
--------
Type 'dir(w)' at the interpreter to see what methods are supported.
Open a GeoBUGS text file and read it into a pysal weights object
>>> w = pysal.open(pysal.examples.get_path('geobugs_scot'),'r','geobugs_text').read()
WARNING: there are 3 disconnected observations
Island ids: [6, 8, 11]
Get the number of observations from the header
>>> w.n
56
Get the mean number of neighbors
>>> w.mean_neighbors
4.1785714285714288
Get neighbor distances for a single observation
>>> w[1]
{9: 1.0, 19: 1.0, 5: 1.0}
"""
self._complain_ifclosed(self.closed)
return self._read()
def seek(self, pos):
if pos == 0:
self.file.seek(0)
self.pos = 0
def _read(self):
if self.pos > 0:
raise StopIteration
fbody = self.file.read()
body_structure = {}
for i in ['num', 'adj', 'weights', 'sumNumNeigh']:
i_loc = fbody.find(i)
if i_loc != -1:
body_structure[i] = (i_loc, i)
body_sequence = sorted(body_structure.values())
body_sequence.append((-1, 'eof'))
for i in range(len(body_sequence) - 1):
part, next_part = body_sequence[i], body_sequence[i + 1]
start, end = part[0], next_part[0]
part_text = fbody[start:end]
part_length, start, end = len(part_text), 0, -1
for c in xrange(part_length):
if part_text[c].isdigit():
start = c
break
for c in xrange(part_length - 1, 0, -1):
if part_text[c].isdigit():
end = c + 1
break
part_text = part_text[start: end]
part_text = part_text.replace('\n', '')
value_type = int
if part[1] == 'weights':
value_type = float
body_structure[part[1]] = [value_type(v)
for v in part_text.split(',')]
cardinalities = body_structure['num']
adjacency = body_structure['adj']
raw_weights = [1.0] * int(sum(cardinalities))
if 'weights' in body_structure and isinstance(body_structure['weights'], list):
raw_weights = body_structure['weights']
no_obs = len(cardinalities)
neighbors = {}
weights = {}
pos = 0
for i in xrange(no_obs):
neighbors[i + 1] = []
weights[i + 1] = []
no_nghs = cardinalities[i]
if no_nghs > 0:
neighbors[i + 1] = adjacency[pos: pos + no_nghs]
weights[i + 1] = raw_weights[pos: pos + no_nghs]
pos += no_nghs
self.pos += 1
return W(neighbors, weights)
def write(self, obj):
"""
Writes a weights object to the opened text file.
Parameters
----------
.write(weightsObject)
accepts a weights object
Returns
------
a GeoBUGS text file
Examples
--------
>>> import tempfile, pysal, os
>>> testfile = pysal.open(pysal.examples.get_path('geobugs_scot'),'r','geobugs_text')
>>> w = testfile.read()
WARNING: there are 3 disconnected observations
Island ids: [6, 8, 11]
Create a temporary file for this example
>>> f = tempfile.NamedTemporaryFile(suffix='')
Reassign to new var
>>> fname = f.name
Close the temporary named file
>>> f.close()
Open the new file in write mode
>>> o = pysal.open(fname,'w','geobugs_text')
Write the Weights object into the open file
>>> o.write(w)
>>> o.close()
Read in the newly created text file
>>> wnew = pysal.open(fname,'r','geobugs_text').read()
WARNING: there are 3 disconnected observations
Island ids: [6, 8, 11]
Compare values from old to new
>>> wnew.pct_nonzero == w.pct_nonzero
True
Clean up temporary file created for this example
>>> os.remove(fname)
"""
self._complain_ifclosed(self.closed)
if issubclass(type(obj), W):
cardinalities, neighbors, weights = [], [], []
for i in obj.id_order:
cardinalities.append(obj.cardinalities[i])
neighbors.extend(obj.neighbors[i])
weights.extend(obj.weights[i])
self.file.write('list(')
self.file.write('num=c(%s),' % ','.join(map(str, cardinalities)))
self.file.write('adj=c(%s),' % ','.join(map(str, neighbors)))
self.file.write('sumNumNeigh=%i)' % sum(cardinalities))
self.pos += 1
else:
raise TypeError("Expected a pysal weights object, got: %s" % (
type(obj)))
def close(self):
self.file.close()
FileIO.FileIO.close(self)
|
PeterWangIntel/chromium-crosswalk
|
refs/heads/master
|
chrome/test/chromedriver/test/run_all_tests.py
|
7
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all ChromeDriver end to end tests."""
import optparse
import os
import platform
import shutil
import sys
import tempfile
import traceback
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(_THIS_DIR, os.pardir))
import archive
import chrome_paths
import util
sys.path.insert(0, os.path.join(chrome_paths.GetSrc(), 'build', 'android'))
from pylib import constants
def _GenerateTestCommand(script,
chromedriver,
ref_chromedriver=None,
chrome=None,
chrome_version=None,
android_package=None,
verbose=False):
_, log_path = tempfile.mkstemp(prefix='chromedriver_')
print 'chromedriver server log: %s' % log_path
cmd = [
sys.executable,
os.path.join(_THIS_DIR, script),
'--chromedriver=%s' % chromedriver,
'--log-path=%s' % log_path,
]
if ref_chromedriver:
cmd.append('--reference-chromedriver=' + ref_chromedriver)
if chrome:
cmd.append('--chrome=' + chrome)
if chrome_version:
cmd.append('--chrome-version=' + chrome_version)
if verbose:
cmd.append('--verbose')
if android_package:
cmd = ['xvfb-run', '-a'] + cmd
cmd.append('--android-package=' + android_package)
return cmd
def RunPythonTests(chromedriver, ref_chromedriver,
chrome=None, chrome_version=None,
chrome_version_name=None, android_package=None):
version_info = ''
if chrome_version_name:
version_info = '(%s)' % chrome_version_name
util.MarkBuildStepStart('python_tests%s' % version_info)
code = util.RunCommand(
_GenerateTestCommand('run_py_tests.py',
chromedriver,
ref_chromedriver=ref_chromedriver,
chrome=chrome,
chrome_version=chrome_version,
android_package=android_package))
if code:
util.MarkBuildStepError()
return code
def RunJavaTests(chromedriver, chrome=None, chrome_version=None,
chrome_version_name=None, android_package=None,
verbose=False):
version_info = ''
if chrome_version_name:
version_info = '(%s)' % chrome_version_name
util.MarkBuildStepStart('java_tests%s' % version_info)
code = util.RunCommand(
_GenerateTestCommand('run_java_tests.py',
chromedriver,
ref_chromedriver=None,
chrome=chrome,
chrome_version=chrome_version,
android_package=android_package,
verbose=verbose))
if code:
util.MarkBuildStepError()
return code
def RunCppTests(cpp_tests):
util.MarkBuildStepStart('chromedriver_tests')
code = util.RunCommand([cpp_tests])
if code:
util.MarkBuildStepError()
return code
def DownloadChrome(version_name, revision, download_site):
util.MarkBuildStepStart('download %s' % version_name)
try:
temp_dir = util.MakeTempDir()
return (temp_dir, archive.DownloadChrome(revision, temp_dir, download_site))
except Exception:
traceback.print_exc()
util.AddBuildStepText('Skip Java and Python tests')
util.MarkBuildStepError()
return (None, None)
def _KillChromes():
chrome_map = {
'win': 'chrome.exe',
'mac': 'Chromium',
'linux': 'chrome',
}
if util.IsWindows():
cmd = ['taskkill', '/F', '/IM']
else:
cmd = ['killall', '-9']
cmd.append(chrome_map[util.GetPlatformName()])
util.RunCommand(cmd)
def main():
parser = optparse.OptionParser()
parser.add_option(
'', '--android-packages',
help='Comma separated list of application package names, '
'if running tests on Android.')
# Option 'chrome-version' is for desktop only.
parser.add_option(
'', '--chrome-version',
help='Version of chrome, e.g., \'HEAD\', \'27\', or \'26\'.'
'Default is to run tests against all of these versions.'
'Notice: this option only applies to desktop.')
options, _ = parser.parse_args()
exe_postfix = ''
if util.IsWindows():
exe_postfix = '.exe'
cpp_tests_name = 'chromedriver_tests' + exe_postfix
server_name = 'chromedriver' + exe_postfix
required_build_outputs = [server_name]
if not options.android_packages:
required_build_outputs += [cpp_tests_name]
try:
build_dir = chrome_paths.GetBuildDir(required_build_outputs)
except RuntimeError:
util.MarkBuildStepStart('check required binaries')
traceback.print_exc()
util.MarkBuildStepError()
constants.SetBuildType(os.path.basename(build_dir))
print 'Using build outputs from', build_dir
chromedriver = os.path.join(build_dir, server_name)
platform_name = util.GetPlatformName()
if util.IsLinux() and platform.architecture()[0] == '64bit':
platform_name += '64'
ref_chromedriver = os.path.join(
chrome_paths.GetSrc(),
'chrome', 'test', 'chromedriver', 'third_party', 'java_tests',
'reference_builds',
'chromedriver_%s%s' % (platform_name, exe_postfix))
if options.android_packages:
os.environ['PATH'] += os.pathsep + os.path.join(
_THIS_DIR, os.pardir, 'chrome')
code = 0
for package in options.android_packages.split(','):
code1 = RunPythonTests(chromedriver,
ref_chromedriver,
chrome_version_name=package,
android_package=package)
code2 = RunJavaTests(chromedriver,
chrome_version_name=package,
android_package=package,
verbose=True)
code = code or code1 or code2
return code
else:
latest_snapshot_revision = archive.GetLatestSnapshotVersion()
versions = [
['HEAD', latest_snapshot_revision],
['42', archive.CHROME_42_REVISION],
['41', archive.CHROME_41_REVISION],
['40', archive.CHROME_40_REVISION]
]
code = 0
for version in versions:
if options.chrome_version and version[0] != options.chrome_version:
continue
download_site = archive.Site.CONTINUOUS
version_name = version[0]
if version_name == 'HEAD':
version_name = version[1]
download_site = archive.GetSnapshotDownloadSite()
temp_dir, chrome_path = DownloadChrome(version_name, version[1],
download_site)
if not chrome_path:
code = 1
continue
code1 = RunPythonTests(chromedriver,
ref_chromedriver,
chrome=chrome_path,
chrome_version=version[0],
chrome_version_name='v%s' % version_name)
code2 = RunJavaTests(chromedriver, chrome=chrome_path,
chrome_version=version[0],
chrome_version_name='v%s' % version_name)
code = code or code1 or code2
_KillChromes()
shutil.rmtree(temp_dir)
cpp_tests = os.path.join(build_dir, cpp_tests_name)
return RunCppTests(cpp_tests) or code
if __name__ == '__main__':
sys.exit(main())
|
mlavin/django
|
refs/heads/master
|
django/contrib/gis/gdal/raster/band.py
|
12
|
from ctypes import byref, c_double, c_int, c_void_p
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import raster as capi
from django.contrib.gis.gdal.raster.base import GDALRasterBase
from django.contrib.gis.shortcuts import numpy
from django.utils.encoding import force_text
from .const import GDAL_INTEGER_TYPES, GDAL_PIXEL_TYPES, GDAL_TO_CTYPES
class GDALBand(GDALRasterBase):
"""
Wrap a GDAL raster band, needs to be obtained from a GDALRaster object.
"""
def __init__(self, source, index):
self.source = source
self._ptr = capi.get_ds_raster_band(source._ptr, index)
def _flush(self):
"""
Call the flush method on the Band's parent raster and force a refresh
of the statistics attribute when requested the next time.
"""
self.source._flush()
self._stats_refresh = True
@property
def description(self):
"""
Return the description string of the band.
"""
return force_text(capi.get_band_description(self._ptr))
@property
def width(self):
"""
Width (X axis) in pixels of the band.
"""
return capi.get_band_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels of the band.
"""
return capi.get_band_ysize(self._ptr)
@property
def pixel_count(self):
"""
Return the total number of pixels in this band.
"""
return self.width * self.height
_stats_refresh = False
def statistics(self, refresh=False, approximate=False):
"""
Compute statistics on the pixel values of this band.
The return value is a tuple with the following structure:
(minimum, maximum, mean, standard deviation).
If approximate=True, the statistics may be computed based on overviews
or a subset of image tiles.
If refresh=True, the statistics will be computed from the data directly,
and the cache will be updated where applicable.
For empty bands (where all pixel values are nodata), all statistics
values are returned as None.
For raster formats using Persistent Auxiliary Metadata (PAM) services,
the statistics might be cached in an auxiliary file.
"""
# Prepare array with arguments for capi function
smin, smax, smean, sstd = c_double(), c_double(), c_double(), c_double()
stats_args = [
self._ptr, c_int(approximate), byref(smin), byref(smax),
byref(smean), byref(sstd), c_void_p(), c_void_p(),
]
if refresh or self._stats_refresh:
func = capi.compute_band_statistics
else:
# Add additional argument to force computation if there is no
# existing PAM file to take the values from.
force = True
stats_args.insert(2, c_int(force))
func = capi.get_band_statistics
# Computation of statistics fails for empty bands.
try:
func(*stats_args)
result = smin.value, smax.value, smean.value, sstd.value
except GDALException:
result = (None, None, None, None)
self._stats_refresh = False
return result
@property
def min(self):
"""
Return the minimum pixel value for this band.
"""
return self.statistics()[0]
@property
def max(self):
"""
Return the maximum pixel value for this band.
"""
return self.statistics()[1]
@property
def mean(self):
"""
Return the mean of all pixel values of this band.
"""
return self.statistics()[2]
@property
def std(self):
"""
Return the standard deviation of all pixel values of this band.
"""
return self.statistics()[3]
@property
def nodata_value(self):
"""
Return the nodata value for this band, or None if it isn't set.
"""
# Get value and nodata exists flag
nodata_exists = c_int()
value = capi.get_band_nodata_value(self._ptr, nodata_exists)
if not nodata_exists:
value = None
# If the pixeltype is an integer, convert to int
elif self.datatype() in GDAL_INTEGER_TYPES:
value = int(value)
return value
@nodata_value.setter
def nodata_value(self, value):
"""
Set the nodata value for this band.
"""
if value is None:
if not capi.delete_band_nodata_value:
raise ValueError('GDAL >= 2.1 required to delete nodata values.')
capi.delete_band_nodata_value(self._ptr)
elif not isinstance(value, (int, float)):
raise ValueError('Nodata value must be numeric or None.')
else:
capi.set_band_nodata_value(self._ptr, value)
self._flush()
def datatype(self, as_string=False):
"""
Return the GDAL Pixel Datatype for this band.
"""
dtype = capi.get_band_datatype(self._ptr)
if as_string:
dtype = GDAL_PIXEL_TYPES[dtype]
return dtype
def data(self, data=None, offset=None, size=None, shape=None, as_memoryview=False):
"""
Read or writes pixel values for this band. Blocks of data can
be accessed by specifying the width, height and offset of the
desired block. The same specification can be used to update
parts of a raster by providing an array of values.
Allowed input data types are bytes, memoryview, list, tuple, and array.
"""
if not offset:
offset = (0, 0)
if not size:
size = (self.width - offset[0], self.height - offset[1])
if not shape:
shape = size
if any(x <= 0 for x in size):
raise ValueError('Offset too big for this raster.')
if size[0] > self.width or size[1] > self.height:
raise ValueError('Size is larger than raster.')
# Create ctypes type array generator
ctypes_array = GDAL_TO_CTYPES[self.datatype()] * (shape[0] * shape[1])
if data is None:
# Set read mode
access_flag = 0
# Prepare empty ctypes array
data_array = ctypes_array()
else:
# Set write mode
access_flag = 1
# Instantiate ctypes array holding the input data
if isinstance(data, (bytes, memoryview)) or (numpy and isinstance(data, numpy.ndarray)):
data_array = ctypes_array.from_buffer_copy(data)
else:
data_array = ctypes_array(*data)
# Access band
capi.band_io(self._ptr, access_flag, offset[0], offset[1],
size[0], size[1], byref(data_array), shape[0],
shape[1], self.datatype(), 0, 0)
# Return data as numpy array if possible, otherwise as list
if data is None:
if as_memoryview:
return memoryview(data_array)
elif numpy:
# reshape() needs a reshape parameter with the height first.
return numpy.frombuffer(
data_array, dtype=numpy.dtype(data_array)
).reshape(tuple(reversed(size)))
else:
return list(data_array)
else:
self._flush()
class BandList(list):
def __init__(self, source):
self.source = source
list.__init__(self)
def __iter__(self):
for idx in range(1, len(self) + 1):
yield GDALBand(self.source, idx)
def __len__(self):
return capi.get_ds_raster_count(self.source._ptr)
def __getitem__(self, index):
try:
return GDALBand(self.source, index + 1)
except GDALException:
raise GDALException('Unable to get band index %d' % index)
|
belltailjp/scikit-learn
|
refs/heads/master
|
examples/datasets/plot_random_dataset.py
|
348
|
"""
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
|
nimholz/course-exercises
|
refs/heads/master
|
script.py
|
1
|
print('everything is awesome!!')
import test
print('circumference is', test.circumfer(1))
print('area is', test.area(1))
|
Mariaanisimova/pythonintask
|
refs/heads/master
|
BITs/2014/Shmireychik_S_V/task_6_17.py
|
1
|
#Задача №6. Вариант 17
#Программа в которой компьютер загадывает название одного из пяти космических челноков проекта Спейс шаттл, а игрок должен его угадать.
#Шмирейчик С.В.
#21.03.2016
import random
SpaceShuttles=('Колумбия','Челленджер','Дискавери','Атлантис','Индевор')
SpaceShuttle=random.randint(0,4)
rand=SpaceShuttles[SpaceShuttle]
print("я загадал одного из пяти космических челноков проекта Спейс шаттл")
#print (rand)
otvet=0
while(otvet)!=(rand):
otvet=input("Введате одно название космических челноков проекта Спейс шаттл")
if(otvet)!=(rand):
print("Вы не угадали. Попробуйте снова.")
elif(otvet)==(rand):
print("Вы угадали.")
break
input("Нажмите enter для выхода")
|
ocefpaf/paegan-transport
|
refs/heads/master
|
tests/test_diel.py
|
2
|
import unittest
from paegan.location4d import Location4D
from paegan.transport.models.behaviors.diel import Diel
from datetime import datetime, timedelta
from paegan.transport.utils.asasuncycles import SunCycles
import pytz
import os
import json
class DielTest(unittest.TestCase):
def test_cycle_object_from_json(self):
data = open(os.path.normpath(os.path.join(os.path.dirname(__file__), "./resources/files/diel_behavior_cycle.json"))).read()
d = Diel(json=data)
assert d.pattern == "cycles"
assert d.plus_or_minus == "+"
assert d.min_depth == -4.0
assert d.max_depth == -5.0
assert d.time_delta == 4
assert d.cycle == "sunrise"
def test_cycle_object_from_dict(self):
data = open(os.path.normpath(os.path.join(os.path.dirname(__file__), "./resources/files/diel_behavior_cycle.json"))).read()
d = Diel(data=json.loads(data))
assert d.pattern == "cycles"
assert d.plus_or_minus == "+"
assert d.min_depth == -4.0
assert d.max_depth == -5.0
assert d.time_delta == 4
assert d.cycle == "sunrise"
def test_specifictime_object_from_json(self):
data = open(os.path.normpath(os.path.join(os.path.dirname(__file__), "./resources/files/diel_behavior_specifictime.json"))).read()
d = Diel(json=data)
t = datetime.utcnow().replace(tzinfo=pytz.utc)
loc = Location4D(time=t, latitude=35, longitude=-76)
assert d.pattern == "specifictime"
assert d.min_depth == -4.0
assert d.max_depth == -5.0
assert d.get_time(loc4d=loc).year == t.year
assert d.get_time(loc4d=loc).month == t.month
assert d.get_time(loc4d=loc).day == t.day
assert d.get_time(loc4d=loc).hour == 17
assert d.get_time(loc4d=loc).minute == 0
assert d.get_time(loc4d=loc).second == 0
assert d.get_time(loc4d=loc).microsecond == 0
def test_specifictime_object_from_dict(self):
data = open(os.path.normpath(os.path.join(os.path.dirname(__file__), "./resources/files/diel_behavior_specifictime.json"))).read()
d = Diel(data=json.loads(data))
t = datetime.utcnow().replace(tzinfo=pytz.utc)
loc = Location4D(time=t, latitude=35, longitude=-76)
assert d.pattern == "specifictime"
assert d.min_depth == -4.0
assert d.max_depth == -5.0
assert d.get_time(loc4d=loc).year == t.year
assert d.get_time(loc4d=loc).month == t.month
assert d.get_time(loc4d=loc).day == t.day
assert d.get_time(loc4d=loc).hour == 17
assert d.get_time(loc4d=loc).minute == 0
assert d.get_time(loc4d=loc).second == 0
assert d.get_time(loc4d=loc).microsecond == 0
def test_cycle(self):
t = datetime.utcnow().replace(tzinfo=pytz.utc)
loc = Location4D(time=t, latitude=35, longitude=-76)
c = SunCycles.cycles(loc=loc)
sunrise = c[SunCycles.RISING]
sunset = c[SunCycles.SETTING]
d = Diel()
d.min_depth = -4
d.max_depth = -10
d.pattern = 'cycles'
d.cycle = 'sunrise'
d.plus_or_minus = '+'
d.time_delta = 4
assert d.get_time(loc4d=loc) == sunrise + timedelta(hours=4)
d = Diel()
d.min_depth = -4
d.max_depth = -10
d.pattern = 'cycles'
d.cycle = 'sunset'
d.plus_or_minus = '-'
d.time_delta = 2
assert d.get_time(loc4d=loc) == sunset - timedelta(hours=2)
|
sangheestyle/elasticsearch-dsl-py
|
refs/heads/master
|
test_elasticsearch_dsl/test_analysis.py
|
10
|
# coding: utf-8
from elasticsearch_dsl import analysis
def test_analyzer_serializes_as_name():
a = analysis.Analyzer('my_analyzer')
assert 'my_analyzer' == a.to_dict()
def test_analyzer_has_definition():
a = analysis.CustomAnalyzer(
'my_analyzer',
tokenizer='keyword',
filter=['lowercase']
)
assert {
'type': 'custom',
'tokenizer': 'keyword',
'filter': ["lowercase"],
} == a.definition()
def test_tokenizer():
t = analysis.tokenizer('trigram', 'nGram', min_gram=3, max_gram=3)
assert t.to_dict() == 'trigram'
assert {
'type': 'nGram',
'min_gram': 3,
'max_gram': 3
} == t.definition()
assert t == analysis.NGramTokenizer('trigram', min_gram=3, max_gram=3)
def test_custom_analyzer_can_collect_custom_items():
trigram = analysis.tokenizer('trigram', 'nGram', min_gram=3, max_gram=3)
my_stop = analysis.token_filter('my_stop', 'stop', stopwords=['a', 'b'])
umlauts = analysis.char_filter('umlauts', 'pattern_replace', mappings=['ü=>ue'])
a = analysis.analyzer(
'my_analyzer',
tokenizer=trigram,
filter=['lowercase', my_stop],
char_filter=['html_strip', umlauts]
)
assert a.to_dict() == 'my_analyzer'
assert {
'analyzer': {
'my_analyzer': {
'type': 'custom',
'tokenizer': 'trigram',
'filter': ['lowercase', 'my_stop'],
'char_filter': ['html_strip', 'umlauts']
}
},
'tokenizer': {
'trigram': trigram.definition()
},
'filter': {
'my_stop': my_stop.definition()
},
'char_filter': {
'umlauts': umlauts.definition()
}
} == a.get_analysis_definition()
|
Fusion-Devices/android_kernel_mediatek_sprout
|
refs/heads/cm-12.1
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
2013Commons/hue
|
refs/heads/master
|
desktop/core/ext-py/requests-2.0.0/requests/models.py
|
2
|
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import logging
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .exceptions import (
HTTPError, RequestException, MissingSchema, InvalidURL,
ChunkedEncodingError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, json, builtin_str, basestring, IncompleteRead)
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
log = logging.getLogger(__name__)
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
else:
fn, fp, ft = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, str):
fp = StringIO(fp)
if isinstance(fp, bytes):
fp = BytesIO(fp)
if ft:
new_v = (fn, fp.read(), ft)
else:
new_v = (fn, fp.read())
new_fields.append((k, new_v))
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None):
"""Prepares the the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
try:
url = unicode(url)
except NameError:
# We're on Python 3.
url = str(url)
except UnicodeDecodeError:
pass
# Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme:
raise MissingSchema("Invalid URL %r: No schema supplied" % url)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, basestring),
not isinstance(data, list),
not isinstance(data, dict)
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = str(l)
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
cookies = cookies
else:
cookies = cookiejar_from_dict(cookies)
if 'cookie' not in self.headers:
cookie_header = get_cookie_header(cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Requires that ``stream=True` on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except RequestException:
return False
return True
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the lovely Charade library
(Thanks, Ian!)."""
from bs4 import UnicodeDammit
return UnicodeDammit(self.content).original_encoding
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
"""
if self._content_consumed:
# simulate reading small chunks of the content
return iter_slices(self._content, chunk_size)
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size,
decode_content=True):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except AttributeError:
# Standard file-like object.
while 1:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
gen = generate()
if decode_unicode:
gen = stream_decode_response_unicode(gen, self)
return gen
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size,
decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
if Response.encoding is None and chardet module is available, encoding
will be guessed.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
return json.loads(self.content.decode(encoding), **kwargs)
return json.loads(self.text or self.content, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Closes the underlying file descriptor and releases the connection
back to the pool.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
|
kawamon/hue
|
refs/heads/master
|
desktop/core/ext-py/boto-2.46.1/boto/dynamodb2/exceptions.py
|
164
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class ProvisionedThroughputExceededException(JSONResponseError):
pass
class LimitExceededException(JSONResponseError):
pass
class ConditionalCheckFailedException(JSONResponseError):
pass
class ResourceInUseException(JSONResponseError):
pass
class ResourceNotFoundException(JSONResponseError):
pass
class InternalServerError(JSONResponseError):
pass
class ValidationException(JSONResponseError):
pass
class ItemCollectionSizeLimitExceededException(JSONResponseError):
pass
class DynamoDBError(Exception):
pass
class UnknownSchemaFieldError(DynamoDBError):
pass
class UnknownIndexFieldError(DynamoDBError):
pass
class UnknownFilterTypeError(DynamoDBError):
pass
class QueryError(DynamoDBError):
pass
class ItemNotFound(DynamoDBError):
pass
|
MaStanford/PingPongOpenCV
|
refs/heads/master
|
shapes/Circles.py
|
1
|
__author__ = 'm.stanford'
import cv2
import cv2.cv as cv
import numpy as np
class Circles():
accumulator_threshold = 75
minCircleDist = 100
maxCannyThresh = 175
minRadius = 10
maxRadius = 0
def __init__(self):
pass
def getCircles(self, img):
smoothed = cv2.medianBlur(img, 5)
circles = cv2.HoughCircles(smoothed, cv.CV_HOUGH_GRADIENT, 1
, self.minCircleDist
, param1=self.maxCannyThresh
, param2=self.accumulator_threshold
, minRadius=self.minRadius
, maxRadius=self.maxRadius)
if circles is None:
return None
circles = np.uint16(np.around(circles))
return circles
def drawCircles(self, targetFrame, grayFrame):
circles = self.getCircles(grayFrame)
if circles is None:
return
for i in circles[0, :]:
# draw the outer circle
cv2.circle(targetFrame, (i[0], i[1]), i[2], (0, 255, 0), 2)
# draw the center of the circle
cv2.circle(targetFrame, (i[0], i[1]), 2, (0, 0, 255), 3)
|
Rawtechio/oscar
|
refs/heads/master
|
oscar/collection/views.py
|
1
|
# -*- coding: utf-8 -*-
# Created on 21 Apr 2016 at 11:25
from __future__ import unicode_literals, absolute_import
from django.views.generic import ListView
from django.views.generic.edit import CreateView
from .models import Missed
from .forms import MissedBinForm
class MissedListView(ListView):
model = Missed
def get_queryset(self):
return Missed.objects.filter(reported_by=self.request.user)
class MissedCreateView(CreateView):
model = Missed
form_class = MissedBinForm
def form_valid(self, form):
obj = form.save(commit=False)
obj.reported_by = self.request.user
return super(MissedCreateView, self).form_valid(form)
|
adedayo/intellij-community
|
refs/heads/master
|
python/testData/resolve/multiFile/modulePackageCollision/ModulePackageCollision.py
|
83
|
from smname import module1
# <ref>
|
TeamEOS/kernel_oppo_r5
|
refs/heads/lp5.1
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
cloudbau/keystone
|
refs/heads/master
|
keystone/contrib/oauth1/migrate_repo/__init__.py
|
24
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
|
solarwinds/orionsdk-python
|
refs/heads/master
|
samples/nta_enable_disable_alert.py
|
1
|
from __future__ import print_function
import re
import requests
import pprint
from orionsdk import SwisClient
def main():
# Connect to SWIS
server = 'localhost'
username = 'admin'
password = ''
swis = SwisClient(server, username, password)
alert_name = 'NTA Alert on machine-hostname'
query_results = swis.query('Select Uri FROM Orion.AlertConfigurations WHERE Name = @alertname_par', alertname_par=alert_name)
uri = query_results['results'][0]['Uri']
# Disable alert
props = {
'Enabled': False
}
swis.update(uri, **props)
# Enable alert
props = {
'Enabled': True
}
swis.update(uri, **props)
if __name__ == '__main__':
main()
|
xiaoyaozi5566/DynamicCache
|
refs/heads/master
|
ext/ply/test/yacc_badargs.py
|
174
|
# -----------------------------------------------------------------------------
# yacc_badargs.py
#
# Rules with wrong # args
# -----------------------------------------------------------------------------
import sys
sys.tracebacklimit = 0
sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t,s):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr():
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
|
chromium/chromium
|
refs/heads/master
|
testing/merge_scripts/standard_isolated_script_merge.py
|
8
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
import merge_api
import results_merger
def StandardIsolatedScriptMerge(output_json, summary_json, jsons_to_merge):
"""Merge the contents of one or more results JSONs into a single JSON.
Args:
output_json: A path to a JSON file to which the merged results should be
written.
jsons_to_merge: A list of paths to JSON files that should be merged.
"""
# summary.json is produced by swarming client itself. We are mostly interested
# in the number of shards.
try:
with open(summary_json) as f:
summary = json.load(f)
except (IOError, ValueError):
print >> sys.stderr, (
'summary.json is missing or can not be read',
'Something is seriously wrong with swarming client or the bot.')
return 1
missing_shards = []
shard_results_list = []
for index, result in enumerate(summary['shards']):
output_path = None
if result:
output_path = find_shard_output_path(index, result.get('task_id'),
jsons_to_merge)
if not output_path:
missing_shards.append(index)
continue
with open(output_path) as f:
try:
json_contents = json.load(f)
except ValueError:
raise ValueError('Failed to parse JSON from %s' % j)
shard_results_list.append(json_contents)
merged_results = results_merger.merge_test_results(shard_results_list)
if missing_shards:
merged_results['missing_shards'] = missing_shards
if 'global_tags' not in merged_results:
merged_results['global_tags'] = []
merged_results['global_tags'].append('UNRELIABLE_RESULTS')
with open(output_json, 'w') as f:
json.dump(merged_results, f)
return 0
def find_shard_output_path(index, task_id, jsons_to_merge):
"""Finds the shard matching the index/task-id.
Args:
index: The index of the shard to load data for, this is for old api.
task_id: The directory of the shard to load data for, this is for new api.
jsons_to_merge: A container of file paths for shards that emitted output.
Returns:
* The matching path, or None
"""
# 'output.json' is set in swarming/api.py, gtest_task method.
matching_json_files = [
j for j in jsons_to_merge
if (os.path.basename(j) == 'output.json' and
(os.path.basename(os.path.dirname(j)) == str(index) or
os.path.basename(os.path.dirname(j)) == task_id))]
if not matching_json_files:
print >> sys.stderr, 'shard %s test output missing' % index
return None
elif len(matching_json_files) > 1:
print >> sys.stderr, 'duplicate test output for shard %s' % index
return None
return matching_json_files[0]
def main(raw_args):
parser = merge_api.ArgumentParser()
args = parser.parse_args(raw_args)
return StandardIsolatedScriptMerge(
args.output_json, args.summary_json, args.jsons_to_merge)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
shawnsilva/steamwebapi
|
refs/heads/devel
|
steamwebapi/test/test_utils.py
|
1
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Copyright (C) 2013-2015 Shawn Silva
# ------------------------------------
# This file is part of steamwebapi.
#
# steamwebapi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import unittest
from steamwebapi.utils import gid_32_to_64_bit
KNOWN_32_BIT_GID = 1606484
KNOWN_64_BIT_GID = 103582791431127892
class TestUtils(unittest.TestCase):
def test_gid_32_to_64_bit(self):
converted_id = gid_32_to_64_bit(KNOWN_32_BIT_GID)
self.assertEqual(converted_id, KNOWN_64_BIT_GID)
|
boneyao/sentry
|
refs/heads/master
|
tests/functional/test_imports.py
|
24
|
from __future__ import absolute_import
import fnmatch
import os
from subprocess import call
ROOT = os.path.normpath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'src'))
def _find_files(root, pattern='*'):
matches = []
for root, _, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
def _generate_tests():
string = 'from __future__ import absolute_import'
kwargs = {
'stdout': open('/dev/null', 'a'),
'stderr': open('/dev/null', 'a'),
}
def make_test(filename, relpath):
def test():
assert not call(['grep', string, filename], **kwargs), \
"Missing %r in %s" % (string, relpath)
test.__doc__ = relpath
test.__name__ = 'test_' + relpath.replace('/', '_').rstrip('.py')
return test
for filename in _find_files(ROOT, '*.py'):
relpath = filename[len(ROOT) - 3:]
if '/migrations/' in relpath:
continue
if relpath.startswith('src/sentry/static/'):
continue
func = make_test(filename, relpath)
globals()[func.__name__] = func
_generate_tests()
|
dhruvsrivastava/OJ
|
refs/heads/master
|
python/lib/python2.7/site-packages/pip/req/req_file.py
|
239
|
"""
Requirements file parsing
"""
from __future__ import absolute_import
import os
import re
import shlex
import optparse
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves import filterfalse
import pip
from pip.download import get_file_content
from pip.req.req_install import InstallRequirement
from pip.exceptions import (RequirementsFileParseError)
from pip.utils import normalize_name
from pip import cmdoptions
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s)+#.*$')
SUPPORTED_OPTIONS = [
cmdoptions.constraints,
cmdoptions.editable,
cmdoptions.requirements,
cmdoptions.no_index,
cmdoptions.index_url,
cmdoptions.find_links,
cmdoptions.extra_index_url,
cmdoptions.allow_external,
cmdoptions.allow_all_external,
cmdoptions.no_allow_external,
cmdoptions.allow_unsafe,
cmdoptions.no_allow_unsafe,
cmdoptions.use_wheel,
cmdoptions.no_use_wheel,
cmdoptions.always_unzip,
cmdoptions.no_binary,
cmdoptions.only_binary,
]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options
]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None, constraint=False, wheel_cache=None):
"""Parse a requirements file and yield InstallRequirement instances.
:param filename: Path or url of requirements file.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: Global options.
:param session: Instance of pip.download.PipSession.
:param constraint: If true, parsing a constraint file rather than
requirements file.
:param wheel_cache: Instance of pip.wheel.WheelCache
"""
if session is None:
raise TypeError(
"parse_requirements() missing 1 required keyword argument: "
"'session'"
)
_, content = get_file_content(
filename, comes_from=comes_from, session=session
)
lines = content.splitlines()
lines = ignore_comments(lines)
lines = join_lines(lines)
lines = skip_regex(lines, options)
for line_number, line in enumerate(lines, 1):
req_iter = process_line(line, filename, line_number, finder,
comes_from, options, session, wheel_cache,
constraint=constraint)
for req in req_iter:
yield req
def process_line(line, filename, line_number, finder=None, comes_from=None,
options=None, session=None, wheel_cache=None,
constraint=False):
"""Process a single requirements line; This can result in creating/yielding
requirements, or updating the finder.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
:param constraint: If True, parsing a constraints file.
"""
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
# `finder.format_control` will be updated during parsing
defaults.format_control = finder.format_control
args_str, options_str = break_args_options(line)
opts, _ = parser.parse_args(shlex.split(options_str), defaults)
# preserve for the nested code path
line_comes_from = '%s %s (line %s)' % (
'-c' if constraint else '-r', filename, line_number)
# yield a line requirement
if args_str:
isolated = options.isolated_mode if options else False
if options:
cmdoptions.check_install_build_global(options, opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in opts.__dict__ and opts.__dict__[dest]:
req_options[dest] = opts.__dict__[dest]
yield InstallRequirement.from_line(
args_str, line_comes_from, constraint=constraint,
isolated=isolated, options=req_options, wheel_cache=wheel_cache
)
# yield an editable requirement
elif opts.editables:
isolated = options.isolated_mode if options else False
default_vcs = options.default_vcs if options else None
yield InstallRequirement.from_editable(
opts.editables[0], comes_from=line_comes_from,
constraint=constraint, default_vcs=default_vcs, isolated=isolated,
wheel_cache=wheel_cache
)
# parse a nested requirements file
elif opts.requirements or opts.constraints:
if opts.requirements:
req_path = opts.requirements[0]
nested_constraint = False
else:
req_path = opts.constraints[0]
nested_constraint = True
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_dir = os.path.dirname(filename)
req_path = os.path.join(os.path.dirname(filename), req_path)
# TODO: Why not use `comes_from='-r {} (line {})'` here as well?
parser = parse_requirements(
req_path, finder, comes_from, options, session,
constraint=nested_constraint, wheel_cache=wheel_cache
)
for req in parser:
yield req
# set finder options
elif finder:
if opts.index_url:
finder.index_urls = [opts.index_url]
if opts.use_wheel is False:
finder.use_wheel = False
pip.index.fmt_ctl_no_use_wheel(finder.format_control)
if opts.no_index is True:
finder.index_urls = []
if opts.allow_all_external:
finder.allow_all_external = opts.allow_all_external
if opts.extra_index_urls:
finder.index_urls.extend(opts.extra_index_urls)
if opts.allow_external:
finder.allow_external |= set(
[normalize_name(v).lower() for v in opts.allow_external])
if opts.allow_unverified:
# Remove after 7.0
finder.allow_unverified |= set(
[normalize_name(v).lower() for v in opts.allow_unverified])
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
finder.find_links.append(value)
def break_args_options(line):
"""Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
"""
tokens = line.split(' ')
args = []
options = tokens[:]
for token in tokens:
if token.startswith('-') or token.startswith('--'):
break
else:
args.append(token)
options.pop(0)
return ' '.join(args), ' '.join(options)
def build_parser():
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
raise RequirementsFileParseError(msg)
parser.exit = parser_exit
return parser
def join_lines(iterator):
"""
Joins a line ending in '\' with the previous line.
"""
lines = []
for line in iterator:
if not line.endswith('\\'):
if lines:
lines.append(line)
yield ''.join(lines)
lines = []
else:
yield line
else:
lines.append(line.strip('\\'))
# TODO: handle space after '\'.
# TODO: handle '\' on last line.
def ignore_comments(iterator):
"""
Strips and filters empty or commented lines.
"""
for line in iterator:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line
def skip_regex(lines, options):
"""
Optionally exclude lines that match '--skip-requirements-regex'
"""
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
lines = filterfalse(re.compile(skip_regex).search, lines)
return lines
|
40223226/2015cd_midterm2
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/decimal.py
|
623
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module should be kept in sync with the latest updates of the
# IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is an implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
http://en.wikipedia.org/wiki/IEEE_854-1987
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected
Decimal('0.00')).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678')
Decimal('1.2345E+12345680')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print(dig / Decimal(3))
0.333333333
>>> getcontext().prec = 18
>>> print(dig / Decimal(3))
0.333333333333333333
>>> print(dig.sqrt())
1
>>> print(Decimal(3).sqrt())
1.73205080756887729
>>> print(Decimal(3) ** 123)
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print(inf)
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print(neginf)
-Infinity
>>> print(neginf + inf)
NaN
>>> print(neginf * inf)
-Infinity
>>> print(dig / 0)
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print(dig / 0)
Traceback (most recent call last):
...
...
...
decimal.DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> print(c.divide(Decimal(0), Decimal(0)))
Traceback (most recent call last):
...
...
...
decimal.InvalidOperation: 0 / 0
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print(c.divide(Decimal(0), Decimal(0)))
NaN
>>> print(c.flags[InvalidOperation])
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
'FloatOperation',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext',
# Limits for the C version for compatibility
'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY',
# C version: compile time choice that enables the thread local context
'HAVE_THREADS'
]
__version__ = '1.70' # Highest version of the spec this complies with
# See http://speleotrove.com/decimal/
import copy as _copy
import math as _math
import numbers as _numbers
import sys
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Compatibility with the C version
HAVE_THREADS = True
if sys.maxsize == 2**63-1:
MAX_PREC = 999999999999999999
MAX_EMAX = 999999999999999999
MIN_EMIN = -999999999999999999
else:
MAX_PREC = 425000000
MAX_EMAX = 425000000
MIN_EMIN = -425000000
MIN_ETINY = MIN_EMIN - (MAX_PREC-1)
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
#brython fixme
pass
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
#brython fix me
pass
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
#brython fix me
pass
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
#brython fix me
pass
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
#brython fix me
pass
class FloatOperation(DecimalException, TypeError):
"""Enable stricter semantics for mixing floats and Decimals.
If the signal is not trapped (default), mixing floats and Decimals is
permitted in the Decimal() constructor, context.create_decimal() and
all comparison operators. Both conversion and comparisons are exact.
Any occurrence of a mixed operation is silently recorded by setting
FloatOperation in the context flags. Explicit conversions with
Decimal.from_float() or context.create_decimal_from_float() do not
set the flag.
Otherwise (the signal is trapped), only equality comparisons and explicit
conversions are silent. All other mixed operations raise FloatOperation.
"""
#brython fix me
pass
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal, FloatOperation]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
# Valid rounding modes
_rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING,
ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP)
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.current_thread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.current_thread(), '__decimal_context__'):
del threading.current_thread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.current_thread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.current_thread().__decimal_context__
except AttributeError:
context = Context()
threading.current_thread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print(getcontext().prec)
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print(ctx.prec)
...
30
>>> with localcontext(ExtendedContext):
... print(getcontext().prec)
...
9
>>> print(getcontext().prec)
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
# Do not subclass Decimal from numbers.Real and do not register it as such
# (because Decimals are not interoperable with floats). See the notes in
# numbers.py for more detail.
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, str):
value=value.strip().lower()
if value.startswith("-"):
self._sign = 1
value=value[1:]
else:
self._sign = 0
if value in ('', 'nan'):
self._is_special = True
self._int = ''
#if m.group('signal'): #figure out what a signaling NaN is later
# self._exp = 'N'
#else:
# self._exp = 'n'
self._exp='n'
return self
if value in ('inf', 'infinity'):
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
import _jsre as re
_m=re.match("^\d*\.?\d*(e\+?\d*)?$", value)
if not _m:
self._is_special = True
self._int = ''
self._exp='n'
return self
if '.' in value:
intpart, fracpart=value.split('.')
if 'e' in fracpart:
fracpart, exp=fracpart.split('e')
exp=int(exp)
else:
exp=0
#self._int = str(int(intpart+fracpart))
self._int = intpart+fracpart
self._exp = exp - len(fracpart)
self._is_special = False
return self
else:
#is this a pure int?
self._is_special = False
if 'e' in value:
self._int, _exp=value.split('e')
self._exp=int(_exp)
#print(self._int, self._exp)
else:
self._int = value
self._exp = 0
return self
#m = _parser(value.strip())
#if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
#if m.group('sign') == "-":
# self._sign = 1
#else:
# self._sign = 0
#intpart = m.group('int')
#if intpart is not None:
# # finite number
# fracpart = m.group('frac') or ''
# exp = int(m.group('exp') or '0')
# self._int = str(int(intpart+fracpart))
# self._exp = exp - len(fracpart)
# self._is_special = False
#else:
# diag = m.group('diag')
# if diag is not None:
# # NaN
# self._int = str(int(diag or '0')).lstrip('0')
# if m.group('signal'):
# self._exp = 'N'
# else:
# self._exp = 'n'
# else:
# # infinity
# self._int = '0'
# self._exp = 'F'
# self._is_special = True
#return self
# From an integer
if isinstance(value, int):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], int) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, int) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], int):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
if context is None:
context = getcontext()
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are "
"enabled")
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
# @classmethod, but @decorator is not valid Python 2.3 syntax, so
# don't use it (see notes on Py2.3 compatibility at top of file)
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, int): # handle integer inputs
return cls(f)
if not isinstance(f, float):
raise TypeError("argument must be int or float.")
if _math.isinf(f) or _math.isnan(f):
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
if cls is Decimal:
return result
else:
return cls(result)
from_float = classmethod(from_float)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __bool__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __ne__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return True
return self._cmp(other) != 0
def __lt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# In order to make sure that the hash of a Decimal instance
# agrees with the hash of a numerically equal integer, float
# or Fraction, we follow the rules for numeric hashes outlined
# in the documentation. (See library docs, 'Built-in Types').
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
return _PyHASH_NAN
else:
if self._sign:
return -_PyHASH_INF
else:
return _PyHASH_INF
if self._exp >= 0:
exp_hash = pow(10, self._exp, _PyHASH_MODULUS)
else:
exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS)
hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS
ans = hash_ if self >= 0 else -hash_
return -2 if ans == -1 else ans
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
def real(self):
return self
real = property(real)
def imag(self):
return Decimal(0)
imag = property(imag)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if clamp=0,
# precision-1 if clamp=1.
max_payload_len = context.prec - context.clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if clamp==0, and between Etiny and Etop if clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context.clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if clamp == 1 and self has too few digits
if context.clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def __round__(self, n=None):
"""Round self to the nearest integer, or to a given precision.
If only one argument is supplied, round a finite Decimal
instance self to the nearest integer. If self is infinite or
a NaN then a Python exception is raised. If self is finite
and lies exactly halfway between two integers then it is
rounded to the integer with even last digit.
>>> round(Decimal('123.456'))
123
>>> round(Decimal('-456.789'))
-457
>>> round(Decimal('-3.0'))
-3
>>> round(Decimal('2.5'))
2
>>> round(Decimal('3.5'))
4
>>> round(Decimal('Inf'))
Traceback (most recent call last):
...
OverflowError: cannot round an infinity
>>> round(Decimal('NaN'))
Traceback (most recent call last):
...
ValueError: cannot round a NaN
If a second argument n is supplied, self is rounded to n
decimal places using the rounding mode for the current
context.
For an integer n, round(self, -n) is exactly equivalent to
self.quantize(Decimal('1En')).
>>> round(Decimal('123.456'), 0)
Decimal('123')
>>> round(Decimal('123.456'), 2)
Decimal('123.46')
>>> round(Decimal('123.456'), -2)
Decimal('1E+2')
>>> round(Decimal('-Infinity'), 37)
Decimal('NaN')
>>> round(Decimal('sNaN123'), 0)
Decimal('NaN123')
"""
if n is not None:
# two-argument form: use the equivalent quantize call
if not isinstance(n, int):
raise TypeError('Second argument to round should be integral')
exp = _dec_from_triple(0, '1', -n)
return self.quantize(exp)
# one-argument form
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_HALF_EVEN))
def __floor__(self):
"""Return the floor of self, as an integer.
For a finite Decimal instance self, return the greatest
integer n such that n <= self. If self is infinite or a NaN
then a Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_FLOOR))
def __ceil__(self):
"""Return the ceiling of self, as an integer.
For a finite Decimal instance self, return the least integer n
such that n >= self. If self is infinite or a NaN then a
Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_CEILING))
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
third = _convert_other(third, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
other = _convert_other(other)
if other is NotImplemented:
return other
modulo = _convert_other(modulo)
if modulo is NotImplemented:
return modulo
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero ;'
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in range(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1 << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context.clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None, watchexp=True):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# if we're not watching exponents, do a simple rescale
if not watchexp:
ans = self._rescale(exp._exp, rounding)
# raise Inexact and Rounded where appropriate
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
return ans
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other, context=None):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other, context=None):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other, context=None):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other, context=None):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None, traps=None,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self.clamp = clamp if clamp is not None else dc.clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals + traps)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals + flags)
else:
self.flags = flags
def _set_integer_check(self, name, value, vmin, vmax):
if not isinstance(value, int):
raise TypeError("%s must be an integer" % name)
if vmin == '-inf':
if value > vmax:
raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value))
elif vmax == 'inf':
if value < vmin:
raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value))
else:
if value < vmin or value > vmax:
raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value))
return object.__setattr__(self, name, value)
def _set_signal_dict(self, name, d):
if not isinstance(d, dict):
raise TypeError("%s must be a signal dict" % d)
for key in d:
if not key in _signals:
raise KeyError("%s is not a valid signal dict" % d)
for key in _signals:
if not key in d:
raise KeyError("%s is not a valid signal dict" % d)
return object.__setattr__(self, name, d)
def __setattr__(self, name, value):
if name == 'prec':
return self._set_integer_check(name, value, 1, 'inf')
elif name == 'Emin':
return self._set_integer_check(name, value, '-inf', 0)
elif name == 'Emax':
return self._set_integer_check(name, value, 0, 'inf')
elif name == 'capitals':
return self._set_integer_check(name, value, 0, 1)
elif name == 'clamp':
return self._set_integer_check(name, value, 0, 1)
elif name == 'rounding':
if not value in _rounding_modes:
# raise TypeError even for strings to have consistency
# among various implementations.
raise TypeError("%s: invalid rounding mode" % value)
return object.__setattr__(self, name, value)
elif name == 'flags' or name == 'traps':
return self._set_signal_dict(name, value)
elif name == '_ignored_flags':
return object.__setattr__(self, name, value)
else:
raise AttributeError(
"'decimal.Context' object has no attribute '%s'" % name)
def __delattr__(self, name):
raise AttributeError("%s cannot be deleted" % name)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
flags = [sig for sig, v in self.flags.items() if v]
traps = [sig for sig, v in self.traps.items() if v]
return (self.__class__,
(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, flags, traps))
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, '
'clamp=%(clamp)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def clear_traps(self):
"""Reset all traps to zero"""
for flag in self.traps:
self.traps[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, self.flags, self.traps,
self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp,
self.flags.copy(), self.traps.copy(),
self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, str) and num != num.strip():
return self._raise_error(ConversionSyntax,
"no trailing or leading whitespace is "
"permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self.clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
decimal.Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
if not isinstance(a, Decimal):
raise TypeError("canonical requires a Decimal as an argument.")
return a.canonical()
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__truediv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
if not isinstance(a, Decimal):
raise TypeError("is_canonical requires a Decimal as an argument.")
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
_nbits = int.bit_length
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1 << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and abs(y) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest((M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in range(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((x<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = M<<R
for i in range(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in range(R-1, -1, -1):
Mshift = M<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, int):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
def _convert_for_comparison(self, other, equality_op=False):
"""Given a Decimal instance self and a Python object other, return
a pair (s, o) of Decimal instances such that "s op o" is
equivalent to "self op other" for any of the 6 comparison
operators "op".
"""
if isinstance(other, Decimal):
return self, other
# Comparison with a Rational instance (also includes integers):
# self op n/d <=> self*d op n (for n and d integers, d positive).
# A NaN or infinity can be left unchanged without affecting the
# comparison result.
if isinstance(other, _numbers.Rational):
if not self._is_special:
self = _dec_from_triple(self._sign,
str(int(self._int) * other.denominator),
self._exp)
return self, Decimal(other.numerator)
# Comparisons with float and complex types. == and != comparisons
# with complex numbers should succeed, returning either True or False
# as appropriate. Other comparisons return NotImplemented.
if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0:
other = other.real
if isinstance(other, float):
context = getcontext()
if equality_op:
context.flags[FloatOperation] = 1
else:
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are enabled")
return self, Decimal.from_float(other)
return NotImplemented, NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=17, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=308,
Emin=-324,
capitals=1,
clamp=0
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
#import re
#_parser = re.compile(r""" # A numeric string consists of:
# \s*
# (?P<sign>[-+])? # an optional sign, followed by either...
# (
# (?=\d|\.\d) # ...a number (with at least one digit)
# (?P<int>\d*) # having a (possibly empty) integer part
# (\.(?P<frac>\d*))? # followed by an optional fractional part
# (E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
# |
# Inf(inity)? # ...an infinity, or...
# |
# (?P<signal>s)? # ...an (optionally signaling)
# NaN # NaN
# (?P<diag>\d*) # with (possibly empty) diagnostic info.
# )
# \s*
# \Z
#""", re.VERBOSE | re.IGNORECASE).match
import _jsre as re
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][#][0][minimumwidth][,][.precision][type]
#_parse_format_specifier_regex = re.compile(r"""\A
#(?:
# (?P<fill>.)?
# (?P<align>[<>=^])
#)?
#(?P<sign>[-+ ])?
#(?P<alt>\#)?
#(?P<zeropad>0)?
#(?P<minimumwidth>(?!0)\d+)?
#(?P<thousands_sep>,)?
#(?:\.(?P<precision>0|(?!0)\d+))?
#(?P<type>[eEfFgGn%])?
#\Z
#""", re.VERBOSE|re.DOTALL)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gGn':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart or spec['alt']:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo _PyHASH_MODULUS
_PyHASH_MODULUS = sys.hash_info.modulus
# hash values to use for positive and negative infinities, and nans
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
# _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS
_PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
del sys
try:
import _decimal
except ImportError:
pass
else:
s1 = set(dir())
s2 = set(dir(_decimal))
for name in s1 - s2:
del globals()[name]
del s1, s2, name
from _decimal import *
if __name__ == '__main__':
import doctest, decimal
doctest.testmod(decimal)
|
onurhunce/AutoAlbumCreator
|
refs/heads/master
|
Assignment/AlbumCreator/urls.py
|
1
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.get_all_images_from_database, name='all_photos'),
url(r'^popular/$', views.show_popular_photos, name='popular'),
url(r'^export/$', views.post_photos_to_facebook, name='popular'),
url(r'^fetch/$', views.fetch_photos_manually_from_browser, name='fetch'),
]
|
andrewmoses/ssquiz
|
refs/heads/master
|
flask/lib/python2.7/site-packages/whoosh/analysis/morph.py
|
93
|
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.analysis.filters import Filter
from whoosh.compat import integer_types
from whoosh.lang.dmetaphone import double_metaphone
from whoosh.lang.porter import stem
from whoosh.util.cache import lfu_cache, unbound_cache
class StemFilter(Filter):
"""Stems (removes suffixes from) the text of tokens using the Porter
stemming algorithm. Stemming attempts to reduce multiple forms of the same
root word (for example, "rendering", "renders", "rendered", etc.) to a
single word in the index.
>>> stemmer = RegexTokenizer() | StemFilter()
>>> [token.text for token in stemmer("fundamentally willows")]
["fundament", "willow"]
You can pass your own stemming function to the StemFilter. The default
is the Porter stemming algorithm for English.
>>> stemfilter = StemFilter(stem_function)
You can also use one of the Snowball stemming functions by passing the
`lang` keyword argument.
>>> stemfilter = StemFilter(lang="ru")
The list of available languages is in `whoosh.lang.languages`.
You can use :func:`whoosh.lang.has_stemmer` to check if a given language has
a stemming function available.
By default, this class wraps an LRU cache around the stemming function. The
``cachesize`` keyword argument sets the size of the cache. To make the
cache unbounded (the class caches every input), use ``cachesize=-1``. To
disable caching, use ``cachesize=None``.
If you compile and install the py-stemmer library, the
:class:`PyStemmerFilter` provides slightly easier access to the language
stemmers in that library.
"""
__inittypes__ = dict(stemfn=object, ignore=list)
is_morph = True
def __init__(self, stemfn=stem, lang=None, ignore=None, cachesize=50000):
"""
:param stemfn: the function to use for stemming.
:param lang: if not None, overrides the stemfn with a language stemmer
from the ``whoosh.lang.snowball`` package.
:param ignore: a set/list of words that should not be stemmed. This is
converted into a frozenset. If you omit this argument, all tokens
are stemmed.
:param cachesize: the maximum number of words to cache. Use ``-1`` for
an unbounded cache, or ``None`` for no caching.
"""
self.stemfn = stemfn
self.lang = lang
self.ignore = frozenset() if ignore is None else frozenset(ignore)
self.cachesize = cachesize
# clear() sets the _stem attr to a cached wrapper around self.stemfn
self.clear()
def __getstate__(self):
# Can't pickle a dynamic function, so we have to remove the _stem
# attribute from the state
return dict([(k, self.__dict__[k]) for k in self.__dict__
if k != "_stem"])
def __setstate__(self, state):
# Check for old instances of StemFilter class, which didn't have a
# cachesize attribute and pickled the cache attribute
if "cachesize" not in state:
self.cachesize = 50000
if "ignores" in state:
self.ignore = state["ignores"]
elif "ignore" not in state:
self.ignore = frozenset()
if "lang" not in state:
self.lang = None
if "cache" in state:
del state["cache"]
self.__dict__.update(state)
# Set the _stem attribute
self.clear()
def clear(self):
if self.lang:
from whoosh.lang import stemmer_for_language
stemfn = stemmer_for_language(self.lang)
else:
stemfn = self.stemfn
if isinstance(self.cachesize, integer_types) and self.cachesize != 0:
if self.cachesize < 0:
self._stem = unbound_cache(stemfn)
elif self.cachesize > 1:
self._stem = lfu_cache(self.cachesize)(stemfn)
else:
self._stem = stemfn
def cache_info(self):
if self.cachesize <= 1:
return None
return self._stem.cache_info()
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.stemfn == other.stemfn)
def __call__(self, tokens):
stemfn = self._stem
ignore = self.ignore
for t in tokens:
if not t.stopped:
text = t.text
if text not in ignore:
t.text = stemfn(text)
yield t
class PyStemmerFilter(StemFilter):
"""This is a simple subclass of StemFilter that works with the py-stemmer
third-party library. You must have the py-stemmer library installed to use
this filter.
>>> PyStemmerFilter("spanish")
"""
def __init__(self, lang="english", ignore=None, cachesize=10000):
"""
:param lang: a string identifying the stemming algorithm to use. You
can get a list of available algorithms by with the
:meth:`PyStemmerFilter.algorithms` method. The identification
strings are directly from the py-stemmer library.
:param ignore: a set/list of words that should not be stemmed. This is
converted into a frozenset. If you omit this argument, all tokens
are stemmed.
:param cachesize: the maximum number of words to cache.
"""
self.lang = lang
self.ignore = frozenset() if ignore is None else frozenset(ignore)
self.cachesize = cachesize
self._stem = self._get_stemmer_fn()
def algorithms(self):
"""Returns a list of stemming algorithms provided by the py-stemmer
library.
"""
import Stemmer # @UnresolvedImport
return Stemmer.algorithms()
def cache_info(self):
return None
def _get_stemmer_fn(self):
import Stemmer # @UnresolvedImport
stemmer = Stemmer.Stemmer(self.lang)
stemmer.maxCacheSize = self.cachesize
return stemmer.stemWord
def __getstate__(self):
# Can't pickle a dynamic function, so we have to remove the _stem
# attribute from the state
return dict([(k, self.__dict__[k]) for k in self.__dict__
if k != "_stem"])
def __setstate__(self, state):
# Check for old instances of StemFilter class, which didn't have a
# cachesize attribute and pickled the cache attribute
if "cachesize" not in state:
self.cachesize = 10000
if "ignores" in state:
self.ignore = state["ignores"]
elif "ignore" not in state:
self.ignore = frozenset()
if "cache" in state:
del state["cache"]
self.__dict__.update(state)
# Set the _stem attribute
self._stem = self._get_stemmer_fn()
class DoubleMetaphoneFilter(Filter):
"""Transforms the text of the tokens using Lawrence Philips's Double
Metaphone algorithm. This algorithm attempts to encode words in such a way
that similar-sounding words reduce to the same code. This may be useful for
fields containing the names of people and places, and other uses where
tolerance of spelling differences is desireable.
"""
is_morph = True
def __init__(self, primary_boost=1.0, secondary_boost=0.5, combine=False):
"""
:param primary_boost: the boost to apply to the token containing the
primary code.
:param secondary_boost: the boost to apply to the token containing the
secondary code, if any.
:param combine: if True, the original unencoded tokens are kept in the
stream, preceding the encoded tokens.
"""
self.primary_boost = primary_boost
self.secondary_boost = secondary_boost
self.combine = combine
def __eq__(self, other):
return (other
and self.__class__ is other.__class__
and self.primary_boost == other.primary_boost)
def __call__(self, tokens):
primary_boost = self.primary_boost
secondary_boost = self.secondary_boost
combine = self.combine
for t in tokens:
if combine:
yield t
primary, secondary = double_metaphone(t.text)
b = t.boost
# Overwrite the token's text and boost and yield it
if primary:
t.text = primary
t.boost = b * primary_boost
yield t
if secondary:
t.text = secondary
t.boost = b * secondary_boost
yield t
|
Mercador/python-keystoneclient
|
refs/heads/master
|
keystoneclient/v3/services.py
|
6
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import base
from keystoneclient import utils
class Service(base.Resource):
"""Represents an Identity service.
Attributes:
* id: a uuid that identifies the service
* name: user-facing name of the service (e.g. Keystone)
* type: 'compute', 'identity', etc
* enabled: determines whether the service appears in the catalog
"""
pass
class ServiceManager(base.CrudManager):
"""Manager class for manipulating Identity services."""
resource_class = Service
collection_key = 'services'
key = 'service'
@utils.positional(1, enforcement=utils.positional.WARN)
def create(self, name, type, enabled=True, description=None, **kwargs):
return super(ServiceManager, self).create(
name=name,
type=type,
description=description,
enabled=enabled,
**kwargs)
def get(self, service):
return super(ServiceManager, self).get(
service_id=base.getid(service))
@utils.positional(enforcement=utils.positional.WARN)
def list(self, name=None, type=None, **kwargs):
return super(ServiceManager, self).list(
name=name,
type=type,
**kwargs)
@utils.positional(enforcement=utils.positional.WARN)
def update(self, service, name=None, type=None, enabled=None,
description=None, **kwargs):
return super(ServiceManager, self).update(
service_id=base.getid(service),
name=name,
type=type,
description=description,
enabled=enabled,
**kwargs)
def delete(self, service):
return super(ServiceManager, self).delete(
service_id=base.getid(service))
|
ThiagoGarciaAlves/intellij-community
|
refs/heads/master
|
python/testData/intentions/replaceListComprehension2_after.py
|
83
|
t = []
for j in range(7):
t.append([3 for i in range(5)])
|
raoulbq/scipy
|
refs/heads/master
|
scipy/io/mmio.py
|
15
|
"""
Matrix Market I/O in Python.
"""
#
# Author: Pearu Peterson <pearu@cens.ioc.ee>
# Created: October, 2004
#
# References:
# http://math.nist.gov/MatrixMarket/
#
from __future__ import division, print_function, absolute_import
import os
import sys
from numpy import asarray, real, imag, conj, zeros, ndarray, concatenate, \
ones, ascontiguousarray, vstack, savetxt, fromfile, fromstring
from numpy.compat import asbytes, asstr
from scipy._lib.six import string_types
__all__ = ['mminfo','mmread','mmwrite', 'MMFile']
#-------------------------------------------------------------------------------
def mminfo(source):
"""
Queries the contents of the Matrix Market file 'filename' to
extract size and storage information.
Parameters
----------
source : file
Matrix Market filename (extension .mtx) or open file object
Returns
-------
rows,cols : int
Number of matrix rows and columns
entries : int
Number of non-zero entries of a sparse matrix
or rows*cols for a dense matrix
format : str
Either 'coordinate' or 'array'.
field : str
Either 'real', 'complex', 'pattern', or 'integer'.
symm : str
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
"""
return MMFile.info(source)
#-------------------------------------------------------------------------------
def mmread(source):
"""
Reads the contents of a Matrix Market file 'filename' into a matrix.
Parameters
----------
source : file
Matrix Market filename (extensions .mtx, .mtz.gz)
or open file object.
Returns
-------
a:
Sparse or full matrix
"""
return MMFile().read(source)
#-------------------------------------------------------------------------------
def mmwrite(target, a, comment='', field=None, precision=None):
"""
Writes the sparse or dense array `a` to a Matrix Market formatted file.
Parameters
----------
target : file
Matrix Market filename (extension .mtx) or open file object
a : array like
Sparse or dense 2D array
comment : str, optional
comments to be prepended to the Matrix Market file
field : None or str, optional
Either 'real', 'complex', 'pattern', or 'integer'.
precision : None or int, optional
Number of digits to display for real or complex values.
"""
MMFile().write(target, a, comment, field, precision)
################################################################################
class MMFile (object):
__slots__ = (
'_rows',
'_cols',
'_entries',
'_format',
'_field',
'_symmetry')
@property
def rows(self):
return self._rows
@property
def cols(self):
return self._cols
@property
def entries(self):
return self._entries
@property
def format(self):
return self._format
@property
def field(self):
return self._field
@property
def symmetry(self):
return self._symmetry
@property
def has_symmetry(self):
return self._symmetry in (self.SYMMETRY_SYMMETRIC,
self.SYMMETRY_SKEW_SYMMETRIC, self.SYMMETRY_HERMITIAN)
# format values
FORMAT_COORDINATE = 'coordinate'
FORMAT_ARRAY = 'array'
FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY)
@classmethod
def _validate_format(self, format):
if format not in self.FORMAT_VALUES:
raise ValueError('unknown format type %s, must be one of %s' %
(format, self.FORMAT_VALUES))
# field values
FIELD_INTEGER = 'integer'
FIELD_REAL = 'real'
FIELD_COMPLEX = 'complex'
FIELD_PATTERN = 'pattern'
FIELD_VALUES = (FIELD_INTEGER, FIELD_REAL, FIELD_COMPLEX, FIELD_PATTERN)
@classmethod
def _validate_field(self, field):
if field not in self.FIELD_VALUES:
raise ValueError('unknown field type %s, must be one of %s' %
(field, self.FIELD_VALUES))
# symmetry values
SYMMETRY_GENERAL = 'general'
SYMMETRY_SYMMETRIC = 'symmetric'
SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric'
SYMMETRY_HERMITIAN = 'hermitian'
SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC,
SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN)
@classmethod
def _validate_symmetry(self, symmetry):
if symmetry not in self.SYMMETRY_VALUES:
raise ValueError('unknown symmetry type %s, must be one of %s' %
(symmetry, self.SYMMETRY_VALUES))
DTYPES_BY_FIELD = {
FIELD_INTEGER: 'i',
FIELD_REAL: 'd',
FIELD_COMPLEX: 'D',
FIELD_PATTERN: 'd'}
#---------------------------------------------------------------------------
@staticmethod
def reader():
pass
#---------------------------------------------------------------------------
@staticmethod
def writer():
pass
#---------------------------------------------------------------------------
@classmethod
def info(self, source):
source, close_it = self._open(source)
try:
# read and validate header line
line = source.readline()
mmid, matrix, format, field, symmetry = \
[asstr(part.strip()) for part in line.split()]
if not mmid.startswith('%%MatrixMarket'):
raise ValueError('source is not in Matrix Market format')
if not matrix.lower() == 'matrix':
raise ValueError("Problem reading file header: " + line)
# http://math.nist.gov/MatrixMarket/formats.html
if format.lower() == 'array':
format = self.FORMAT_ARRAY
elif format.lower() == 'coordinate':
format = self.FORMAT_COORDINATE
# skip comments
while line.startswith(b'%'):
line = source.readline()
line = line.split()
if format == self.FORMAT_ARRAY:
if not len(line) == 2:
raise ValueError("Header line not of length 2: " + line)
rows, cols = map(int, line)
entries = rows * cols
else:
if not len(line) == 3:
raise ValueError("Header line not of length 3: " + line)
rows, cols, entries = map(int, line)
return (rows, cols, entries, format, field.lower(), symmetry.lower())
finally:
if close_it:
source.close()
#---------------------------------------------------------------------------
@staticmethod
def _open(filespec, mode='rb'):
"""
Return an open file stream for reading based on source. If source is
a file name, open it (after trying to find it with mtx and gzipped mtx
extensions). Otherwise, just return source.
"""
close_it = False
if isinstance(filespec, string_types):
close_it = True
# open for reading
if mode[0] == 'r':
# determine filename plus extension
if not os.path.isfile(filespec):
if os.path.isfile(filespec+'.mtx'):
filespec = filespec + '.mtx'
elif os.path.isfile(filespec+'.mtx.gz'):
filespec = filespec + '.mtx.gz'
elif os.path.isfile(filespec+'.mtx.bz2'):
filespec = filespec + '.mtx.bz2'
# open filename
if filespec.endswith('.gz'):
import gzip
stream = gzip.open(filespec, mode)
elif filespec.endswith('.bz2'):
import bz2
stream = bz2.BZ2File(filespec, 'rb')
else:
stream = open(filespec, mode)
# open for writing
else:
if filespec[-4:] != '.mtx':
filespec = filespec + '.mtx'
stream = open(filespec, mode)
else:
stream = filespec
return stream, close_it
#---------------------------------------------------------------------------
@staticmethod
def _get_symmetry(a):
m,n = a.shape
if m != n:
return MMFile.SYMMETRY_GENERAL
issymm = 1
isskew = 1
isherm = a.dtype.char in 'FD'
for j in range(n):
for i in range(j+1,n):
aij,aji = a[i][j],a[j][i]
if issymm and aij != aji:
issymm = 0
if isskew and aij != -aji:
isskew = 0
if isherm and aij != conj(aji):
isherm = 0
if not (issymm or isskew or isherm):
break
if issymm:
return MMFile.SYMMETRY_SYMMETRIC
if isskew:
return MMFile.SYMMETRY_SKEW_SYMMETRIC
if isherm:
return MMFile.SYMMETRY_HERMITIAN
return MMFile.SYMMETRY_GENERAL
#---------------------------------------------------------------------------
@staticmethod
def _field_template(field, precision):
return {
MMFile.FIELD_REAL: '%%.%ie\n' % precision,
MMFile.FIELD_INTEGER: '%i\n',
MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' % (precision,precision)
}.get(field, None)
#---------------------------------------------------------------------------
def __init__(self, **kwargs):
self._init_attrs(**kwargs)
#---------------------------------------------------------------------------
def read(self, source):
stream, close_it = self._open(source)
try:
self._parse_header(stream)
return self._parse_body(stream)
finally:
if close_it:
stream.close()
#---------------------------------------------------------------------------
def write(self, target, a, comment='', field=None, precision=None):
stream, close_it = self._open(target, 'wb')
try:
self._write(stream, a, comment, field, precision)
finally:
if close_it:
stream.close()
else:
stream.flush()
#---------------------------------------------------------------------------
def _init_attrs(self, **kwargs):
"""
Initialize each attributes with the corresponding keyword arg value
or a default of None
"""
attrs = self.__class__.__slots__
public_attrs = [attr[1:] for attr in attrs]
invalid_keys = set(kwargs.keys()) - set(public_attrs)
if invalid_keys:
raise ValueError('found %s invalid keyword arguments, please only use %s' %
(tuple(invalid_keys), public_attrs))
for attr in attrs:
setattr(self, attr, kwargs.get(attr[1:], None))
#---------------------------------------------------------------------------
def _parse_header(self, stream):
rows, cols, entries, format, field, symmetry = \
self.__class__.info(stream)
self._init_attrs(rows=rows, cols=cols, entries=entries, format=format,
field=field, symmetry=symmetry)
#---------------------------------------------------------------------------
def _parse_body(self, stream):
rows, cols, entries, format, field, symm = (self.rows, self.cols,
self.entries, self.format, self.field, self.symmetry)
try:
from scipy.sparse import coo_matrix
except ImportError:
coo_matrix = None
dtype = self.DTYPES_BY_FIELD.get(field, None)
has_symmetry = self.has_symmetry
is_complex = field == self.FIELD_COMPLEX
is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC
is_herm = symm == self.SYMMETRY_HERMITIAN
is_pattern = field == self.FIELD_PATTERN
if format == self.FORMAT_ARRAY:
a = zeros((rows,cols), dtype=dtype)
line = 1
i,j = 0,0
while line:
line = stream.readline()
if not line or line.startswith(b'%'):
continue
if is_complex:
aij = complex(*map(float,line.split()))
else:
aij = float(line)
a[i,j] = aij
if has_symmetry and i != j:
if is_skew:
a[j,i] = -aij
elif is_herm:
a[j,i] = conj(aij)
else:
a[j,i] = aij
if i < rows-1:
i = i + 1
else:
j = j + 1
if not has_symmetry:
i = 0
else:
i = j
if not (i in [0,j] and j == cols):
raise ValueError("Parse error, did not read all lines.")
elif format == self.FORMAT_COORDINATE and coo_matrix is None:
# Read sparse matrix to dense when coo_matrix is not available.
a = zeros((rows,cols), dtype=dtype)
line = 1
k = 0
while line:
line = stream.readline()
if not line or line.startswith(b'%'):
continue
l = line.split()
i,j = map(int,l[:2])
i,j = i-1,j-1
if is_complex:
aij = complex(*map(float,l[2:]))
else:
aij = float(l[2])
a[i,j] = aij
if has_symmetry and i != j:
if is_skew:
a[j,i] = -aij
elif is_herm:
a[j,i] = conj(aij)
else:
a[j,i] = aij
k = k + 1
if not k == entries:
ValueError("Did not read all entries")
elif format == self.FORMAT_COORDINATE:
# Read sparse COOrdinate format
if entries == 0:
# empty matrix
return coo_matrix((rows, cols), dtype=dtype)
try:
if not _is_fromfile_compatible(stream):
flat_data = fromstring(stream.read(), sep=' ')
else:
# fromfile works for normal files
flat_data = fromfile(stream, sep=' ')
except Exception:
# fallback - fromfile fails for some file-like objects
flat_data = fromstring(stream.read(), sep=' ')
# TODO use iterator (e.g. xreadlines) to avoid reading
# the whole file into memory
if is_pattern:
flat_data = flat_data.reshape(-1,2)
I = ascontiguousarray(flat_data[:,0], dtype='intc')
J = ascontiguousarray(flat_data[:,1], dtype='intc')
V = ones(len(I), dtype='int8') # filler
elif is_complex:
flat_data = flat_data.reshape(-1,4)
I = ascontiguousarray(flat_data[:,0], dtype='intc')
J = ascontiguousarray(flat_data[:,1], dtype='intc')
V = ascontiguousarray(flat_data[:,2], dtype='complex')
V.imag = flat_data[:,3]
else:
flat_data = flat_data.reshape(-1,3)
I = ascontiguousarray(flat_data[:,0], dtype='intc')
J = ascontiguousarray(flat_data[:,1], dtype='intc')
V = ascontiguousarray(flat_data[:,2], dtype='float')
I -= 1 # adjust indices (base 1 -> base 0)
J -= 1
if has_symmetry:
mask = (I != J) # off diagonal mask
od_I = I[mask]
od_J = J[mask]
od_V = V[mask]
I = concatenate((I,od_J))
J = concatenate((J,od_I))
if is_skew:
od_V *= -1
elif is_herm:
od_V = od_V.conjugate()
V = concatenate((V,od_V))
a = coo_matrix((V, (I, J)), shape=(rows, cols), dtype=dtype)
else:
raise NotImplementedError(format)
return a
#---------------------------------------------------------------------------
def _write(self, stream, a, comment='', field=None, precision=None):
if isinstance(a, list) or isinstance(a, ndarray) or isinstance(a, tuple) or hasattr(a,'__array__'):
rep = self.FORMAT_ARRAY
a = asarray(a)
if len(a.shape) != 2:
raise ValueError('Expected 2 dimensional array')
rows,cols = a.shape
if field is not None:
if field == self.FIELD_INTEGER:
a = a.astype('i')
elif field == self.FIELD_REAL:
if a.dtype.char not in 'fd':
a = a.astype('d')
elif field == self.FIELD_COMPLEX:
if a.dtype.char not in 'FD':
a = a.astype('D')
else:
from scipy.sparse import spmatrix
if not isinstance(a,spmatrix):
raise ValueError('unknown matrix type: %s' % type(a))
rep = 'coordinate'
rows, cols = a.shape
typecode = a.dtype.char
if precision is None:
if typecode in 'fF':
precision = 8
else:
precision = 16
if field is None:
kind = a.dtype.kind
if kind == 'i':
field = 'integer'
elif kind == 'f':
field = 'real'
elif kind == 'c':
field = 'complex'
else:
raise TypeError('unexpected dtype kind ' + kind)
if rep == self.FORMAT_ARRAY:
symm = self._get_symmetry(a)
else:
symm = self.SYMMETRY_GENERAL
# validate rep, field, and symmetry
self.__class__._validate_format(rep)
self.__class__._validate_field(field)
self.__class__._validate_symmetry(symm)
# write initial header line
stream.write(asbytes('%%%%MatrixMarket matrix %s %s %s\n' % (rep,field,symm)))
# write comments
for line in comment.split('\n'):
stream.write(asbytes('%%%s\n' % (line)))
template = self._field_template(field, precision)
# write dense format
if rep == self.FORMAT_ARRAY:
# write shape spec
stream.write(asbytes('%i %i\n' % (rows,cols)))
if field in (self.FIELD_INTEGER, self.FIELD_REAL):
if symm == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
stream.write(asbytes(template % a[i,j]))
else:
for j in range(cols):
for i in range(j,rows):
stream.write(asbytes(template % a[i,j]))
elif field == self.FIELD_COMPLEX:
if symm == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
aij = a[i,j]
stream.write(asbytes(template % (real(aij),imag(aij))))
else:
for j in range(cols):
for i in range(j,rows):
aij = a[i,j]
stream.write(asbytes(template % (real(aij),imag(aij))))
elif field == self.FIELD_PATTERN:
raise ValueError('pattern type inconsisted with dense format')
else:
raise TypeError('Unknown field type %s' % field)
# write sparse format
else:
if symm != self.SYMMETRY_GENERAL:
raise NotImplementedError('symmetric matrices not yet supported')
coo = a.tocoo() # convert to COOrdinate format
# write shape spec
stream.write(asbytes('%i %i %i\n' % (rows, cols, coo.nnz)))
fmt = '%%.%dg' % precision
if field == self.FIELD_PATTERN:
IJV = vstack((coo.row, coo.col)).T
elif field in [self.FIELD_INTEGER, self.FIELD_REAL]:
IJV = vstack((coo.row, coo.col, coo.data)).T
elif field == self.FIELD_COMPLEX:
IJV = vstack((coo.row, coo.col, coo.data.real, coo.data.imag)).T
else:
raise TypeError('Unknown field type %s' % field)
IJV[:,:2] += 1 # change base 0 -> base 1
savetxt(stream, IJV, fmt=fmt)
def _is_fromfile_compatible(stream):
"""
Check whether stream is compatible with numpy.fromfile.
Passing a gzipped file to fromfile/fromstring doesn't work
with Python3
"""
if sys.version_info[0] < 3:
return True
bad_cls = []
try:
import gzip
bad_cls.append(gzip.GzipFile)
except ImportError:
pass
try:
import bz2
bad_cls.append(bz2.BZ2File)
except ImportError:
pass
bad_cls = tuple(bad_cls)
return not isinstance(stream, bad_cls)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
import sys
import time
for filename in sys.argv[1:]:
print('Reading',filename,'...', end=' ')
sys.stdout.flush()
t = time.time()
mmread(filename)
print('took %s seconds' % (time.time() - t))
|
eonpatapon/contrail-controller
|
refs/heads/master
|
src/config/fabric-ansible/job_manager/job_messages.py
|
1
|
class MsgBundle(object):
JOB_TEMPLATE_MISSING = 1,
JOB_EXECUTION_ID_MISSING = 2,
JOB_SUMMARY_MESSAGE_HDR = 3,
JOB_RESULT_STATUS_NONE = 4,
JOB_MULTI_DEVICE_FAILED_MESSAGE_HDR = 5,
JOB_SINGLE_DEVICE_FAILED_MESSAGE_HDR = 6,
PLAYBOOK_RESULTS_MESSAGE = 7,
PLAYBOOK_EXIT_WITH_ERROR = 8,
PLAYBOOK_RETURN_WITH_ERROR = 9,
NO_PLAYBOOK_INPUT_DATA = 10,
SANDESH_INITIALIZATION_TIMEOUT_ERROR = 11,
INPUT_SCHEMA_INPUT_NOT_FOUND = 12,
DEVICE_JSON_NOT_FOUND = 13,
NO_DEVICE_DATA_FOUND = 14,
NO_CREDENTIALS_FOUND = 15,
INVALID_SCHEMA = 16,
SEND_JOB_LOG_ERROR = 17,
SEND_JOB_EXC_UVE_ERROR = 18,
PLAYBOOK_INPUT_PARSING_ERROR = 19,
PLAYBOOK_EXECUTE_ERROR = 20,
CREATE_JOB_SUMMARY_ERROR = 21,
DEVICE_VENDOR_FAMILY_MISSING = 22,
READ_JOB_TEMPLATE_ERROR = 23,
GET_PLAYBOOK_INFO_ERROR = 24,
PLAYBOOK_NOT_FOUND = 25,
PLAYBOOK_INFO_DEVICE_MISMATCH = 26,
RUN_PLAYBOOK_PROCESS_ERROR = 27,
RUN_PLAYBOOK_ERROR = 28,
SEND_PROUTER_OBJECT_LOG_ERROR = 29,
CLOSE_SANDESH_EXCEPTION = 30,
RUN_PLAYBOOK_PROCESS_TIMEOUT = 31,
PLAYBOOK_EXECUTION_COMPLETE = 32,
START_JOB_MESSAGE = 33,
VNC_INITIALIZATION_ERROR = 34,
JOB_ERROR = 35,
JOB_EXECUTION_COMPLETE = 36,
START_EXE_PB_MSG = 37,
STOP_EXE_PB_MSG = 38,
JOB_EXC_REC_HDR = 39,
EXC_JOB_ERR_HDR = 40,
PLAYBOOK_STATUS_FAILED = 41,
PLAYBOOK_OUTPUT_MISSING = 42,
EMPTY_DEVICE_LIST = 43,
PRODUCT_NAME_MISSING = 44
_msgs = {
'en': {
JOB_TEMPLATE_MISSING: 'job_template_id is missing '
'in the job input',
JOB_EXECUTION_ID_MISSING: 'job_execution_id is missing'
' in the job input',
JOB_SUMMARY_MESSAGE_HDR: 'Job summary: ',
JOB_RESULT_STATUS_NONE: 'Error in getting the '
'job completion '
'status after job execution. \n',
JOB_MULTI_DEVICE_FAILED_MESSAGE_HDR: 'Job failed with '
'for devices: ',
JOB_SINGLE_DEVICE_FAILED_MESSAGE_HDR: 'Job failed. \n',
PLAYBOOK_RESULTS_MESSAGE: 'Detailed job results: \n',
PLAYBOOK_EXIT_WITH_ERROR: 'Playbook "{playbook_uri}" exited'
' with error.',
PLAYBOOK_RETURN_WITH_ERROR: 'Playbook returned '
'with error',
PLAYBOOK_STATUS_FAILED: 'Playbook completed with status Failure.',
PLAYBOOK_OUTPUT_MISSING: 'Playbook completed without sending the'
'output with status details.',
NO_PLAYBOOK_INPUT_DATA: 'Playbook input data'
' is not passed. '
'Aborting execution.',
SANDESH_INITIALIZATION_TIMEOUT_ERROR: 'Sandesh '
'initialization '
'timeout after 15s',
INPUT_SCHEMA_INPUT_NOT_FOUND: 'Required: input paramater'
' in execute-job',
DEVICE_JSON_NOT_FOUND: 'No Device details found for'
' any device',
NO_DEVICE_DATA_FOUND: 'Device details for the device '
'"{device_id}" not found',
NO_CREDENTIALS_FOUND: 'Discovered device "{device_id}" '
'does not have credentials',
INVALID_SCHEMA: 'Error while validating input schema'
' for job template "{job_template_id}" '
': {exc_obj.message}',
SEND_JOB_LOG_ERROR: 'Error while creating the job'
' log for job template '
'"{job_template_fqname}" '
'and execution id "{job_execution_id}"'
' : {exc_msg}',
SEND_JOB_EXC_UVE_ERROR: 'Error while sending the job'
' execution UVE for job '
'template "{job_template_fqname}"'
' and execution id '
'"{job_execution_id}" : {exc_msg}',
PLAYBOOK_INPUT_PARSING_ERROR: 'Exiting due playbook'
' input parsing error:'
' {exc_msg}',
PLAYBOOK_EXECUTE_ERROR: 'Exception in playbook process'
' for playbook "{playbook_uri}" '
'(exec_id: {execution_id}): {exc_msg} ',
CREATE_JOB_SUMMARY_ERROR: 'Error while generating the'
' job summary message'
' : {exc_msg}',
DEVICE_VENDOR_FAMILY_MISSING: 'device_vendor or '
'device_family not found'
' for "{device_id}"',
PRODUCT_NAME_MISSING: 'device_product name not found '
' for "{device_id}"',
READ_JOB_TEMPLATE_ERROR: 'Error while reading the '
'job template "{job_template_id}"'
' from database',
GET_PLAYBOOK_INFO_ERROR: 'Error while getting the playbook'
' information from the job'
' template "{job_template_id}"'
' : {exc_msg}',
PLAYBOOK_NOT_FOUND: 'Playbook "{playbook_uri}" '
'does not exist',
PLAYBOOK_INFO_DEVICE_MISMATCH: 'Playbook info not found'
' in the job template'
' for "{device_vendor}"'
' and "{device_family}"',
RUN_PLAYBOOK_PROCESS_ERROR: 'Exception in executing '
'the playbook '
'for "{playbook_uri}"'
' : {exc_msg}',
RUN_PLAYBOOK_ERROR: 'Error while executing the playbook'
' "{playbook_uri}" : {exc_msg}',
SEND_PROUTER_OBJECT_LOG_ERROR: 'Error while creating '
'prouter object log'
' for router '
'"{prouter_fqname}" '
'and execution id '
'"{job_execution_id}"'
' : {exc_msg}',
CLOSE_SANDESH_EXCEPTION: 'Error in confirming the'
' SANDESH message send operation.'
' The Job Logs might '
'not be complete.',
RUN_PLAYBOOK_PROCESS_TIMEOUT: 'Timeout while executing'
' the playbook '
'for "{playbook_uri}" : '
'{exc_msg}. Playbook'
' process is aborted.',
PLAYBOOK_EXECUTION_COMPLETE: 'Completed playbook execution'
' for job template '
'"{job_template_name}" with '
'execution'
' id "{job_execution_id}"',
START_JOB_MESSAGE: 'Starting execution for job '
'template "{job_template_name}"'
' and execution id "{job_execution_id}"',
VNC_INITIALIZATION_ERROR: 'Exiting due to vnc api '
'initialization error: {exc_msg}',
JOB_ERROR: 'Exiting job due to error: {exc_msg} ',
JOB_EXECUTION_COMPLETE: 'Job execution completed '
'successfully.',
START_EXE_PB_MSG: 'Starting to execute the '
'playbook "{playbook_name}"',
STOP_EXE_PB_MSG: 'Finished executing the '
'playbook "{playbook_name}"',
JOB_EXC_REC_HDR: 'Job Exception recieved: ',
EXC_JOB_ERR_HDR: 'Error while executing job ',
EMPTY_DEVICE_LIST: 'Need to pass a valid device list '
}
}
@classmethod
def getMessage(cls, msg_id, locale='en', *args, **kwargs):
if locale not in MsgBundle._msgs:
return 'Failed to construct job message due to invalid '\
'locale: %s' % locale
if msg_id not in MsgBundle._msgs[locale]:
return 'Failed to construct job message due to invalid '\
'message id: %s' % msg_id
try:
return MsgBundle._msgs[locale][msg_id].format(*args, **kwargs)
except KeyError as ex:
return 'Failed to construct job message due to missing message '\
'arguments: %s' % ex.message
|
edx/ansible
|
refs/heads/stable-1.9-plus-edx
|
v2/ansible/parsing/utils/__init__.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
enthought/etsproxy
|
refs/heads/master
|
enthought/tvtk/pyface/ui/wx/init.py
|
1
|
# proxy module
from __future__ import absolute_import
from tvtk.pyface.ui.wx.init import *
|
kvar/ansible
|
refs/heads/seas_master_2.9.5
|
lib/ansible/modules/cloud/vmware/vmware_host_datastore.py
|
23
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_datastore
short_description: Manage a datastore on ESXi host
description:
- This module can be used to mount/umount datastore on ESXi host.
- This module only supports NFS (NFS v3 or NFS v4.1) and VMFS datastores.
- For VMFS datastore, available device must already be connected on ESXi host.
- All parameters and VMware object names are case sensitive.
version_added: '2.5'
author:
- Ludovic Rivallain (@lrivallain) <ludovic.rivallain@gmail.com>
- Christian Kotte (@ckotte) <christian.kotte@gmx.de>
notes:
- Tested on vSphere 6.0, 6.5 and ESXi 6.7
- NFS v4.1 tested on vSphere 6.5
- Kerberos authentication with NFS v4.1 isn't implemented
requirements:
- python >= 2.6
- PyVmomi
options:
datacenter_name:
description:
- Name of the datacenter to add the datastore.
- The datacenter isn't used by the API to create a datastore.
- Will be removed in 2.11.
required: false
type: str
datastore_name:
description:
- Name of the datastore to add/remove.
required: true
type: str
datastore_type:
description:
- Type of the datastore to configure (nfs/nfs41/vmfs).
required: true
choices: [ 'nfs', 'nfs41', 'vmfs' ]
type: str
nfs_server:
description:
- NFS host serving nfs datastore.
- Required if datastore type is set to C(nfs)/C(nfs41) and state is set to C(present), else unused.
- Two or more servers can be defined if datastore type is set to C(nfs41)
type: str
nfs_path:
description:
- Resource path on NFS host.
- Required if datastore type is set to C(nfs)/C(nfs41) and state is set to C(present), else unused.
type: str
nfs_ro:
description:
- ReadOnly or ReadWrite mount.
- Unused if datastore type is not set to C(nfs)/C(nfs41) and state is not set to C(present).
default: False
type: bool
vmfs_device_name:
description:
- Name of the device to be used as VMFS datastore.
- Required for VMFS datastore type and state is set to C(present), else unused.
type: str
vmfs_version:
description:
- VMFS version to use for datastore creation.
- Unused if datastore type is not set to C(vmfs) and state is not set to C(present).
type: int
esxi_hostname:
description:
- ESXi hostname to manage the datastore.
- Required when used with a vcenter
type: str
required: false
state:
description:
- "present: Mount datastore on host if datastore is absent else do nothing."
- "absent: Umount datastore if datastore is present else do nothing."
default: present
choices: [ present, absent ]
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Mount VMFS datastores to ESXi
vmware_host_datastore:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datastore_name: '{{ item.name }}'
datastore_type: '{{ item.type }}'
vmfs_device_name: 'naa.XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
vmfs_version: 6
esxi_hostname: '{{ inventory_hostname }}'
state: present
delegate_to: localhost
- name: Mount NFS datastores to ESXi
vmware_host_datastore:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datastore_name: '{{ item.name }}'
datastore_type: '{{ item.type }}'
nfs_server: '{{ item.server }}'
nfs_path: '{{ item.path }}'
nfs_ro: no
esxi_hostname: '{{ inventory_hostname }}'
state: present
delegate_to: localhost
loop:
- { 'name': 'NasDS_vol01', 'server': 'nas01', 'path': '/mnt/vol01', 'type': 'nfs'}
- { 'name': 'NasDS_vol02', 'server': 'nas01', 'path': '/mnt/vol02', 'type': 'nfs'}
- name: Mount NFS v4.1 datastores to ESXi
vmware_host_datastore:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datastore_name: '{{ item.name }}'
datastore_type: '{{ item.type }}'
nfs_server: '{{ item.server }}'
nfs_path: '{{ item.path }}'
nfs_ro: no
esxi_hostname: '{{ inventory_hostname }}'
state: present
delegate_to: localhost
loop:
- { 'name': 'NasDS_vol03', 'server': 'nas01,nas02', 'path': '/mnt/vol01', 'type': 'nfs41'}
- { 'name': 'NasDS_vol04', 'server': 'nas01,nas02', 'path': '/mnt/vol02', 'type': 'nfs41'}
- name: Remove/Umount Datastores from a ESXi
vmware_host_datastore:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
datastore_name: NasDS_vol01
state: absent
delegate_to: localhost
'''
RETURN = r'''
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datastore_by_name, find_obj
from ansible.module_utils._text import to_native
class VMwareHostDatastore(PyVmomi):
def __init__(self, module):
super(VMwareHostDatastore, self).__init__(module)
# NOTE: The below parameter is deprecated starting from Ansible v2.11
self.datacenter_name = module.params['datacenter_name']
self.datastore_name = module.params['datastore_name']
self.datastore_type = module.params['datastore_type']
self.nfs_server = module.params['nfs_server']
self.nfs_path = module.params['nfs_path']
self.nfs_ro = module.params['nfs_ro']
self.vmfs_device_name = module.params['vmfs_device_name']
self.vmfs_version = module.params['vmfs_version']
self.esxi_hostname = module.params['esxi_hostname']
self.state = module.params['state']
if self.is_vcenter():
if not self.esxi_hostname:
self.module.fail_json(msg="esxi_hostname is mandatory with a vcenter")
self.esxi = self.find_hostsystem_by_name(self.esxi_hostname)
if self.esxi is None:
self.module.fail_json(msg="Failed to find ESXi hostname %s" % self.esxi_hostname)
else:
self.esxi = find_obj(self.content, [vim.HostSystem], None)
def process_state(self):
ds_states = {
'absent': {
'present': self.umount_datastore_host,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_exit_unchanged,
'absent': self.mount_datastore_host,
}
}
try:
ds_states[self.state][self.check_datastore_host_state()]()
except (vmodl.RuntimeFault, vmodl.MethodFault) as vmodl_fault:
self.module.fail_json(msg=to_native(vmodl_fault.msg))
except Exception as e:
self.module.fail_json(msg=to_native(e))
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def check_datastore_host_state(self):
storage_system = self.esxi.configManager.storageSystem
host_file_sys_vol_mount_info = storage_system.fileSystemVolumeInfo.mountInfo
for host_mount_info in host_file_sys_vol_mount_info:
if host_mount_info.volume.name == self.datastore_name:
return 'present'
return 'absent'
def get_used_disks_names(self):
used_disks = []
storage_system = self.esxi.configManager.storageSystem
for each_vol_mount_info in storage_system.fileSystemVolumeInfo.mountInfo:
if hasattr(each_vol_mount_info.volume, 'extent'):
for each_partition in each_vol_mount_info.volume.extent:
used_disks.append(each_partition.diskName)
return used_disks
def umount_datastore_host(self):
ds = find_datastore_by_name(self.content, self.datastore_name)
if not ds:
self.module.fail_json(msg="No datastore found with name %s" % self.datastore_name)
if self.module.check_mode is False:
error_message_umount = "Cannot umount datastore %s from host %s" % (self.datastore_name, self.esxi.name)
try:
self.esxi.configManager.datastoreSystem.RemoveDatastore(ds)
except (vim.fault.NotFound, vim.fault.HostConfigFault, vim.fault.ResourceInUse) as fault:
self.module.fail_json(msg="%s: %s" % (error_message_umount, to_native(fault.msg)))
except Exception as e:
self.module.fail_json(msg="%s: %s" % (error_message_umount, to_native(e)))
self.module.exit_json(changed=True, result="Datastore %s on host %s" % (self.datastore_name, self.esxi.name))
def mount_datastore_host(self):
if self.datastore_type == 'nfs' or self.datastore_type == 'nfs41':
self.mount_nfs_datastore_host()
if self.datastore_type == 'vmfs':
self.mount_vmfs_datastore_host()
def mount_nfs_datastore_host(self):
if self.module.check_mode is False:
mnt_specs = vim.host.NasVolume.Specification()
# NFS v3
if self.datastore_type == 'nfs':
mnt_specs.type = "NFS"
mnt_specs.remoteHost = self.nfs_server
# NFS v4.1
if self.datastore_type == 'nfs41':
mnt_specs.type = "NFS41"
# remoteHost needs to be set to a non-empty string, but the value is not used
mnt_specs.remoteHost = "something"
mnt_specs.remoteHostNames = [self.nfs_server]
mnt_specs.remotePath = self.nfs_path
mnt_specs.localPath = self.datastore_name
if self.nfs_ro:
mnt_specs.accessMode = "readOnly"
else:
mnt_specs.accessMode = "readWrite"
error_message_mount = "Cannot mount datastore %s on host %s" % (self.datastore_name, self.esxi.name)
try:
ds = self.esxi.configManager.datastoreSystem.CreateNasDatastore(mnt_specs)
if not ds:
self.module.fail_json(msg=error_message_mount)
except (vim.fault.NotFound, vim.fault.DuplicateName,
vim.fault.AlreadyExists, vim.fault.HostConfigFault,
vmodl.fault.InvalidArgument, vim.fault.NoVirtualNic,
vim.fault.NoGateway) as fault:
self.module.fail_json(msg="%s: %s" % (error_message_mount, to_native(fault.msg)))
except Exception as e:
self.module.fail_json(msg="%s : %s" % (error_message_mount, to_native(e)))
self.module.exit_json(changed=True, result="Datastore %s on host %s" % (self.datastore_name, self.esxi.name))
def mount_vmfs_datastore_host(self):
if self.module.check_mode is False:
ds_path = "/vmfs/devices/disks/" + str(self.vmfs_device_name)
host_ds_system = self.esxi.configManager.datastoreSystem
ds_system = vim.host.DatastoreSystem
if self.vmfs_device_name in self.get_used_disks_names():
error_message_used_disk = "VMFS disk %s already in use" % self.vmfs_device_name
self.module.fail_json(msg="%s" % error_message_used_disk)
error_message_mount = "Cannot mount datastore %s on host %s" % (self.datastore_name, self.esxi.name)
try:
vmfs_ds_options = ds_system.QueryVmfsDatastoreCreateOptions(host_ds_system,
ds_path,
self.vmfs_version)
vmfs_ds_options[0].spec.vmfs.volumeName = self.datastore_name
ds = ds_system.CreateVmfsDatastore(host_ds_system,
vmfs_ds_options[0].spec)
except (vim.fault.NotFound, vim.fault.DuplicateName,
vim.fault.HostConfigFault, vmodl.fault.InvalidArgument) as fault:
self.module.fail_json(msg="%s : %s" % (error_message_mount, to_native(fault.msg)))
except Exception as e:
self.module.fail_json(msg="%s : %s" % (error_message_mount, to_native(e)))
self.module.exit_json(changed=True, result="Datastore %s on host %s" % (self.datastore_name, self.esxi.name))
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
datacenter_name=dict(type='str', required=False, removed_in_version=2.11),
datastore_name=dict(type='str', required=True),
datastore_type=dict(type='str', choices=['nfs', 'nfs41', 'vmfs']),
nfs_server=dict(type='str'),
nfs_path=dict(type='str'),
nfs_ro=dict(type='bool', default=False),
vmfs_device_name=dict(type='str'),
vmfs_version=dict(type='int'),
esxi_hostname=dict(type='str', required=False),
state=dict(type='str', default='present', choices=['absent', 'present'])
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_together=[
['nfs_server', 'nfs_path']
],
)
# more complex required_if
if module.params['state'] == 'present':
if module.params['datastore_type'] == 'nfs' and not module.params['nfs_server']:
msg = "Missing nfs_server with datastore_type = nfs"
module.fail_json(msg=msg)
if module.params['datastore_type'] == 'nfs41' and not module.params['nfs_server']:
msg = "Missing nfs_server with datastore_type = nfs41"
module.fail_json(msg=msg)
if module.params['datastore_type'] == 'vmfs' and not module.params['vmfs_device_name']:
msg = "Missing vmfs_device_name with datastore_type = vmfs"
module.fail_json(msg=msg)
vmware_host_datastore = VMwareHostDatastore(module)
vmware_host_datastore.process_state()
if __name__ == '__main__':
main()
|
PsYear/scrapy_example
|
refs/heads/master
|
xiaobaihe/xiaobaihe/settings.py
|
5
|
# -*- coding: utf-8 -*-
# Scrapy settings for xiaobaihe project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'xiaobaihe'
SPIDER_MODULES = ['xiaobaihe.spiders']
NEWSPIDER_MODULE = 'xiaobaihe.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'xiaobaihe (+http://www.yourdomain.com)'
|
mcanthony/supercollider
|
refs/heads/master
|
editors/sced/sced/util.py
|
44
|
# sced (SuperCollider mode for gedit)
# Copyright 2009 Artem Popov and other contributors (see AUTHORS)
#
# sced is free software:
# you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def is_block_beginning(s):
s = "".join(s.split())
# FIXME: clarify this later
if s == "(" or s.startswith("(//") or s.startswith("(/*"):
return True
else:
return False
def find_block(doc, where=None):
if where is None:
i1 = doc.get_iter_at_mark(doc.get_insert())
else:
i1 = where.copy()
# move backward until a block beginning is found
while True:
i1.set_line_offset(0)
i2 = i1.copy()
i2.forward_to_line_end()
if is_block_beginning(doc.get_text(i1, i2)):
break
if not i1.backward_line():
raise RuntimeError("Couldn't find where code block starts!")
i2 = i1.copy()
count = 1
line_comment = False
block_comment = 0
# move forward to the end of the block
while True:
if not i2.forward_char():
raise RuntimeError("Couldn't find where code block ends!")
char = i2.get_char()
i3 = i2.copy()
i3.forward_chars(2)
ct = i2.get_text(i3)
if ct == "*/":
block_comment -= 1
elif ct == "/*":
block_comment += 1
elif ct == "//":
line_comment = True
elif char == "\n" and line_comment:
line_comment = False
if not block_comment and not line_comment:
if char == "(":
count += 1
elif char == ")":
count -= 1
if count == 0:
break
# XXX: include 2 more characters just in case "where" is near the end
i2.forward_chars(2)
if where.in_range(i1, i2):
return i1, i2
else:
raise RuntimeError("Couldn't find code block!")
import re
def scpred(c, *args):
if re.match("[A-Za-z0-9_]", c):
return False
return True
def find_word(doc, where=None):
if where is None:
i1 = doc.get_iter_at_mark(doc.get_insert())
else:
i1 = where.copy()
i1.backward_find_char(scpred)
i1.forward_char() # <-- FIXME: forward should not normally be required
i2 = i1.copy()
i2.forward_find_char(scpred)
return i1, i2
|
alimony/django
|
refs/heads/master
|
django/contrib/sessions/backends/db.py
|
27
|
import logging
from django.contrib.sessions.backends.base import (
CreateError, SessionBase, UpdateError,
)
from django.core.exceptions import SuspiciousOperation
from django.db import DatabaseError, IntegrityError, router, transaction
from django.utils import timezone
from django.utils.functional import cached_property
class SessionStore(SessionBase):
"""
Implement database session store.
"""
def __init__(self, session_key=None):
super().__init__(session_key)
@classmethod
def get_model_class(cls):
# Avoids a circular import and allows importing SessionStore when
# django.contrib.sessions is not in INSTALLED_APPS.
from django.contrib.sessions.models import Session
return Session
@cached_property
def model(self):
return self.get_model_class()
def load(self):
try:
s = self.model.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
return self.decode(s.session_data)
except (self.model.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' % e.__class__.__name__)
logger.warning(str(e))
self._session_key = None
return {}
def exists(self, session_key):
return self.model.objects.filter(session_key=session_key).exists()
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
return
def create_model_instance(self, data):
"""
Return a new instance of the session model object, which represents the
current session state. Intended to be used for saving the session data
to the database.
"""
return self.model(
session_key=self._get_or_create_session_key(),
session_data=self.encode(data),
expire_date=self.get_expiry_date(),
)
def save(self, must_create=False):
"""
Save the current session data to the database. If 'must_create' is
True, raise a database error if the saving operation doesn't create a
new entry (as opposed to possibly updating an existing entry).
"""
if self.session_key is None:
return self.create()
data = self._get_session(no_load=must_create)
obj = self.create_model_instance(data)
using = router.db_for_write(self.model, instance=obj)
try:
with transaction.atomic(using=using):
obj.save(force_insert=must_create, force_update=not must_create, using=using)
except IntegrityError:
if must_create:
raise CreateError
raise
except DatabaseError:
if not must_create:
raise UpdateError
raise
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
self.model.objects.get(session_key=session_key).delete()
except self.model.DoesNotExist:
pass
@classmethod
def clear_expired(cls):
cls.get_model_class().objects.filter(expire_date__lt=timezone.now()).delete()
|
Panda3D-google-code-repositories/naith
|
refs/heads/master
|
game/plugins/particlemanager/particlemanager.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright Tom SF Haines
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import posixpath
from panda3d.core import *
from direct.particles.ParticleEffect import ParticleEffect
from direct.interval.MetaInterval import Sequence
from direct.interval.FunctionInterval import *
class ParticleManager:
"""Simple particle system wrapper - put here in the hope that latter on a better way of doing things will exist and this will make the swap out easy."""
def __init__(self,manager,xml):
# Get the particle system path...
self.basePath = manager.get('paths').getConfig().find('particles').get('path')
# Enable the particle system...
base.enableParticles()
self.reload(manager,xml)
def reload(self,manager,xml):
# Generate a database of particle effects from the xml - this consists of a name to request the effect by, a configuration file to load and how long to let it live before destroying it. (If omited it will live until a stop.)
self.pdb = dict()
for effect in xml.findall('effect'):
new = dict()
new['name'] = effect.get('name')
self.pdb[new['name']] = new
new['file'] = posixpath.join(self.basePath,effect.get('file'))
new['life'] = effect.get('lifetime',None)
# This stores all current particle effects, indexed by their id - used by stop to end it all...
self.effects = dict()
def stop(self):
# Kill all running effects - as these are meant to be 'fire once' particle effects this makes sense...
for key,pn in self.effects.iteritems():
pn[0].cleanup()
pn[0].remove_node()
pn[1].removeNode()
def doEffect(self,name,source,track = False,pos = None,quat = None):
"""Creates the particle effect with the given name. source is the node to appear at - if track is false its initialised here and then remains fixed in world space, otherwise it moves with the source node. pos and quat are offset's in source's local coordinate sytem for the origin."""
# Create node to store the particle effect in...
if track:
n = source.attachNewNode('')
self.prepNode(n)
else:
n = render.attachNewNode('')
self.prepNode(n)
n.setPos(render,source.getPos(render))
n.setQuat(render,source.getQuat(render))
# Handle local offset/rotation...
if pos!=None:
n.setPos(n,pos)
if quat!=None:
n.setQuat(n,quat)
# Create the particle effect...
p = ParticleEffect()
p.loadConfig(Filename(self.pdb[name]['file']))
p.start(n)
# Store effect in the effect database...
self.effects[id(p)] = (p,n)
# If the effect has a finite life arrange for it to be terminated...
if self.pdb[name]['life']!=None:
def kill(p):
if self.effects.has_key(id(p)):
pn = self.effects[id(p)]
pn[0].cleanup()
pn[0].remove_node()
pn[1].removeNode()
del self.effects[id(p)]
s = Sequence(Wait(float(self.pdb[name]['life'])),Func(kill,p))
s.start()
def prepNode(self,node):
"""Internal use only really - given a node this prepares it to be the parent of a paricle system."""
node.setBin('fixed',0)
node.setDepthWrite(False)
node.setLightOff()
node.setShaderOff()
|
RCAD/ringling-render-tools
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup, find_packages
from pkg_resources import Requirement, resource_filename
import sys, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),'src')))
import rrt
setup(
name = "ringling_render_tools",
version = rrt.__version__,
author = "Owen Nelson, Stephen Garcia",
author_email = "onelson@ringling.edu, sgmorale@ringling.edu",
license = "MIT",
description = "Pipeline tools for Maya, 3DSmax, and C4D rendering with Windows HPC",
keywords = "Maya 3DSmax C4D WindowsHPC",
platforms = ["windows"],
package_dir = {'':'src'},
packages = find_packages('src'),
include_package_data = True,
install_requires = ['setuptools'],
extras_require = {'pymel': ['pymel>=1']},
dependency_links = ['http://pymel.googlecode.com/files/pymel-1.0.2.zip'],
test_suite = 'nose.collector',
tests_require = ['nose','ludibrio','pymel'],
entry_points = {
'console_scripts': [
'hpc-node-prep = rrt.hpc.scripts:prep_delegator',
'hpc-node-release = rrt.hpc.scripts:release_delegator',
'hpc-deploy-extras = rrt.hpc.scripts:deploy_extras',
],
'gui_scripts': [
'hpc-submit-max = rrt.max.gui:submit_gui',
'hpc-submit-md = rrt.md.gui:submit_gui',
'hpc-submit-maya = rrt.maya.outsideGui:submit_gui',
]
}
)
|
TimYi/django
|
refs/heads/master
|
django/contrib/postgres/forms/ranges.py
|
393
|
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django import forms
from django.core import exceptions
from django.forms.widgets import MultiWidget
from django.utils.translation import ugettext_lazy as _
__all__ = ['IntegerRangeField', 'FloatRangeField', 'DateTimeRangeField', 'DateRangeField']
class BaseRangeField(forms.MultiValueField):
default_error_messages = {
'invalid': _('Enter two valid values.'),
'bound_ordering': _('The start of the range must not exceed the end of the range.'),
}
def __init__(self, **kwargs):
kwargs.setdefault('widget', RangeWidget(self.base_field.widget))
kwargs.setdefault('fields', [self.base_field(required=False), self.base_field(required=False)])
kwargs.setdefault('required', False)
kwargs.setdefault('require_all_fields', False)
super(BaseRangeField, self).__init__(**kwargs)
def prepare_value(self, value):
lower_base, upper_base = self.fields
if isinstance(value, self.range_type):
return [
lower_base.prepare_value(value.lower),
upper_base.prepare_value(value.upper),
]
if value is None:
return [
lower_base.prepare_value(None),
upper_base.prepare_value(None),
]
return value
def compress(self, values):
if not values:
return None
lower, upper = values
if lower is not None and upper is not None and lower > upper:
raise exceptions.ValidationError(
self.error_messages['bound_ordering'],
code='bound_ordering',
)
try:
range_value = self.range_type(lower, upper)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
)
else:
return range_value
class IntegerRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two whole numbers.')}
base_field = forms.IntegerField
range_type = NumericRange
class FloatRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two numbers.')}
base_field = forms.FloatField
range_type = NumericRange
class DateTimeRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid date/times.')}
base_field = forms.DateTimeField
range_type = DateTimeTZRange
class DateRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid dates.')}
base_field = forms.DateField
range_type = DateRange
class RangeWidget(MultiWidget):
def __init__(self, base_widget, attrs=None):
widgets = (base_widget, base_widget)
super(RangeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return (value.lower, value.upper)
return (None, None)
|
mbauskar/Das_frappe
|
refs/heads/develop
|
frappe/email/receive.py
|
10
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import time
import _socket, poplib
import frappe
from frappe import _
from frappe.utils import extract_email_id, convert_utc_to_user_timezone, now, cint, cstr, strip
from frappe.utils.scheduler import log
from email_reply_parser import EmailReplyParser
from email.header import decode_header
from frappe.utils.file_manager import get_random_filename
class EmailSizeExceededError(frappe.ValidationError): pass
class EmailTimeoutError(frappe.ValidationError): pass
class TotalSizeExceededError(frappe.ValidationError): pass
class LoginLimitExceeded(frappe.ValidationError): pass
class POP3Server:
"""Wrapper for POP server to pull emails."""
def __init__(self, args=None):
self.setup(args)
def setup(self, args=None):
# overrride
self.settings = args or frappe._dict()
def check_mails(self):
# overrride
return True
def process_message(self, mail):
# overrride
pass
def connect(self):
"""Connect to **Email Account**."""
try:
if cint(self.settings.use_ssl):
self.pop = Timed_POP3_SSL(self.settings.host, timeout=frappe.conf.get("pop_timeout"))
else:
self.pop = Timed_POP3(self.settings.host, timeout=frappe.conf.get("pop_timeout"))
self.pop.user(self.settings.username)
self.pop.pass_(self.settings.password)
# connection established!
return True
except _socket.error:
# Invalid mail server -- due to refusing connection
frappe.msgprint(_('Invalid Mail Server. Please rectify and try again.'))
raise
except poplib.error_proto, e:
if self.is_temporary_system_problem(e):
return False
else:
frappe.msgprint(_('Invalid User Name or Support Password. Please rectify and try again.'))
raise
def get_messages(self):
"""Returns new email messages in a list."""
if not self.check_mails():
return # nothing to do
frappe.db.commit()
if not self.connect():
return []
try:
# track if errors arised
self.errors = False
self.latest_messages = []
pop_list = self.pop.list()[1]
num = num_copy = len(pop_list)
# WARNING: Hard coded max no. of messages to be popped
if num > 20: num = 20
# size limits
self.total_size = 0
self.max_email_size = cint(frappe.local.conf.get("max_email_size"))
self.max_total_size = 5 * self.max_email_size
for i, pop_meta in enumerate(pop_list):
# do not pull more than NUM emails
if (i+1) > num:
break
try:
self.retrieve_message(pop_meta, i+1)
except (TotalSizeExceededError, EmailTimeoutError, LoginLimitExceeded):
break
# WARNING: Mark as read - message number 101 onwards from the pop list
# This is to avoid having too many messages entering the system
num = num_copy
if num > 100 and not self.errors:
for m in xrange(101, num+1):
self.pop.dele(m)
except Exception, e:
if self.has_login_limit_exceeded(e):
pass
else:
raise
finally:
# no matter the exception, pop should quit if connected
self.pop.quit()
return self.latest_messages
def retrieve_message(self, pop_meta, msg_num):
incoming_mail = None
try:
self.validate_pop(pop_meta)
msg = self.pop.retr(msg_num)
self.latest_messages.append(b'\n'.join(msg[1]))
except (TotalSizeExceededError, EmailTimeoutError):
# propagate this error to break the loop
self.errors = True
raise
except Exception, e:
if self.has_login_limit_exceeded(e):
self.errors = True
raise LoginLimitExceeded, e
else:
# log performs rollback and logs error in scheduler log
log("receive.get_messages", self.make_error_msg(msg_num, incoming_mail))
self.errors = True
frappe.db.rollback()
self.pop.dele(msg_num)
else:
self.pop.dele(msg_num)
def has_login_limit_exceeded(self, e):
return "-ERR Exceeded the login limit" in strip(cstr(e.message))
def is_temporary_system_problem(self, e):
messages = (
"-ERR [SYS/TEMP] Temporary system problem. Please try again later.",
"Connection timed out",
)
for message in messages:
if message in strip(cstr(e.message)):
return True
return False
def validate_pop(self, pop_meta):
# throttle based on email size
if not self.max_email_size:
return
m, size = pop_meta.split()
size = cint(size)
if size < self.max_email_size:
self.total_size += size
if self.total_size > self.max_total_size:
raise TotalSizeExceededError
else:
raise EmailSizeExceededError
def make_error_msg(self, msg_num, incoming_mail):
error_msg = "Error in retrieving email."
if not incoming_mail:
try:
# retrieve headers
incoming_mail = Email(b'\n'.join(self.pop.top(msg_num, 5)[1]))
except:
pass
if incoming_mail:
error_msg += "\nDate: {date}\nFrom: {from_email}\nSubject: {subject}\n".format(
date=incoming_mail.date, from_email=incoming_mail.from_email, subject=incoming_mail.subject)
return error_msg
class Email:
"""Wrapper for an email."""
def __init__(self, content):
"""Parses headers, content, attachments from given raw message.
:param content: Raw message."""
import email, email.utils
import datetime
self.raw = content
self.mail = email.message_from_string(self.raw)
self.text_content = ''
self.html_content = ''
self.attachments = []
self.parse()
self.set_content_and_type()
self.set_subject()
self.from_email = extract_email_id(self.mail["From"])
self.from_real_name = email.utils.parseaddr(self.mail["From"])[0]
if self.mail["Date"]:
utc = email.utils.mktime_tz(email.utils.parsedate_tz(self.mail["Date"]))
utc_dt = datetime.datetime.utcfromtimestamp(utc)
self.date = convert_utc_to_user_timezone(utc_dt).strftime('%Y-%m-%d %H:%M:%S')
else:
self.date = now()
def parse(self):
"""Walk and process multi-part email."""
for part in self.mail.walk():
self.process_part(part)
def set_subject(self):
"""Parse and decode `Subject` header."""
import email.header
_subject = email.header.decode_header(self.mail.get("Subject", "No Subject"))
self.subject = _subject[0][0] or ""
if _subject[0][1]:
self.subject = self.subject.decode(_subject[0][1])
else:
# assume that the encoding is utf-8
self.subject = self.subject.decode("utf-8")
if not self.subject:
self.subject = "No Subject"
def set_content_and_type(self):
self.content, self.content_type = '[Blank Email]', 'text/plain'
if self.html_content:
self.content, self.content_type = self.html_content, 'text/html'
else:
self.content, self.content_type = EmailReplyParser.parse_reply(self.text_content), 'text/plain'
def process_part(self, part):
"""Parse email `part` and set it to `text_content`, `html_content` or `attachments`."""
content_type = part.get_content_type()
charset = part.get_content_charset()
if not charset: charset = self.get_charset(part)
if content_type == 'text/plain':
self.text_content += self.get_payload(part, charset)
if content_type == 'text/html':
self.html_content += self.get_payload(part, charset)
if part.get_filename():
self.get_attachment(part, charset)
def get_charset(self, part):
"""Detect chartset."""
charset = part.get_content_charset()
if not charset:
import chardet
charset = chardet.detect(str(part))['encoding']
return charset
def get_payload(self, part, charset):
try:
return unicode(part.get_payload(decode=True),str(charset),"ignore")
except LookupError:
return part.get_payload()
def get_attachment(self, part, charset):
fcontent = part.get_payload(decode=True)
if fcontent:
content_type = part.get_content_type()
fname = part.get_filename()
if fname:
try:
fname = cstr(decode_header(fname)[0][0])
except:
fname = get_random_filename(content_type=content_type)
else:
fname = get_random_filename(content_type=content_type)
self.attachments.append({
'content_type': content_type,
'fname': fname,
'fcontent': fcontent,
})
def save_attachments_in_doc(self, doc):
"""Save email attachments in given document."""
from frappe.utils.file_manager import save_file, MaxFileSizeReachedError
for attachment in self.attachments:
try:
save_file(attachment['fname'], attachment['fcontent'],
doc.doctype, doc.name)
except MaxFileSizeReachedError:
# WARNING: bypass max file size exception
pass
except frappe.DuplicateEntryError:
# same file attached twice??
pass
def get_thread_id(self):
"""Extract thread ID from `[]`"""
import re
l = re.findall('(?<=\[)[\w/-]+', self.subject)
return l and l[0] or None
class TimerMixin(object):
def __init__(self, *args, **kwargs):
self.timeout = kwargs.pop('timeout', 0.0)
self.elapsed_time = 0.0
self._super.__init__(self, *args, **kwargs)
if self.timeout:
# set per operation timeout to one-fifth of total pop timeout
self.sock.settimeout(self.timeout / 5.0)
def _getline(self, *args, **kwargs):
start_time = time.time()
ret = self._super._getline(self, *args, **kwargs)
self.elapsed_time += time.time() - start_time
if self.timeout and self.elapsed_time > self.timeout:
raise EmailTimeoutError
return ret
def quit(self, *args, **kwargs):
self.elapsed_time = 0.0
return self._super.quit(self, *args, **kwargs)
class Timed_POP3(TimerMixin, poplib.POP3):
_super = poplib.POP3
class Timed_POP3_SSL(TimerMixin, poplib.POP3_SSL):
_super = poplib.POP3_SSL
|
grandquista/rethinkdb
|
refs/heads/next
|
drivers/python/rethinkdb/handshake.py
|
10
|
import base64
import binascii
import hashlib
import hmac
import random
import struct
import sys
import threading
from . import ql2_pb2
from .errors import *
try:
xrange
except NameError:
xrange = range
class HandshakeV0_4(object):
VERSION = ql2_pb2.VersionDummy.Version.V0_4
PROTOCOL = ql2_pb2.VersionDummy.Protocol.JSON
def __init__(self, host, port, auth_key):
self._host = host
self._port = port
self._auth_key = auth_key.encode("ascii")
self._state = 0
def reset(self):
self._state = 0
def next_message(self, response):
if self._state == 0:
if response is not None:
raise ReqlDriverError("Unexpected response")
self._state = 1
return \
struct.pack("<2L", self.VERSION, len(self._auth_key)) + \
self._auth_key + \
struct.pack("<L", self.PROTOCOL)
elif self._state == 1:
if response is None:
raise ReqlDriverError("Expected response")
self._state = 2
if response != b"SUCCESS":
# This is an error case, we attempt to decode `response` as UTF-8 with
# with fallbacks to get something useful
message = None
try:
message = response.decode("utf-8", errors="ignore").strip()
except TypeError:
try:
message = response.decode("utf-8").strip()
except UnicodeError:
message = repr(response).strip()
if message == "ERROR: Incorrect authorization key.":
raise ReqlAuthError("Incorrect authentication key.", self._host, self._port)
else:
raise ReqlDriverError(
"Server dropped connection with message: \"%s\"" % (message, ))
return None
else:
raise ReqlDriverError("Unexpected handshake state")
class HandshakeV1_0(object):
VERSION = ql2_pb2.VersionDummy.Version.V1_0
PROTOCOL = ql2_pb2.VersionDummy.Protocol.JSON
def __init__(self, json_decoder, json_encoder, host, port, username, password):
self._json_decoder = json_decoder
self._json_encoder = json_encoder
self._host = host
self._port = port
self._username = \
username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C")
try:
self._password = bytes(password, "utf-8")
except TypeError:
self._password = bytes(password)
# Python 2.7.7+ and Python 3.3+
self._compare_digest = getattr(hmac, "compare_digest", None)
if not self._compare_digest:
self._compare_digest = self.__compare_digest
# Python 2.7.8+ and Python 3.4+.
self._pbkdf2_hmac = getattr(hashlib, "pbkdf2_hmac", None)
if not self._pbkdf2_hmac:
self._pbkdf2_hmac = HandshakeV1_0.__pbkdf2_hmac
self._protocol_version = 0
self._random = random.SystemRandom()
self._state = 0
def reset(self):
self._r = None
self._client_first_message_bare = None
self._server_signature = None
self._state = 0
def next_message(self, response):
if self._state == 0:
if response is not None:
raise ReqlDriverError("Unexpected response")
# Using base64 encoding for printable characters
self._r = base64.standard_b64encode(
bytes(bytearray(self._random.getrandbits(8) for i in range(18))))
self._client_first_message_bare = b"n=" + self._username + b",r=" + self._r
# Here we send the version as well as the initial JSON as an optimization
self._state = 1
return struct.pack("<L", self.VERSION) + \
self._json_encoder.encode({
"protocol_version": self._protocol_version,
"authentication_method": "SCRAM-SHA-256",
"authentication":
(b"n,," + self._client_first_message_bare).decode("ascii")
}).encode("utf-8") + \
b'\0'
elif self._state == 1:
response = response.decode("utf-8")
if response.startswith("ERROR"):
raise ReqlDriverError("Received an unexpected reply. You may be attempting to connect to a RethinkDB server that is too old for this driver. The minimum supported server version is 2.3.0.")
json = self._json_decoder.decode(response)
try:
if json["success"] == False:
if 10 <= json["error_code"] <= 20:
raise ReqlAuthError(json["error"], self._host, self._port)
else:
raise ReqlDriverError(json["error"])
min = json["min_protocol_version"]
max = json["max_protocol_version"]
if not min <= self._protocol_version <= max:
raise ReqlDriverError(
"Unsupported protocol version %d, expected between %d and %d" % (
self._protocol_version, min, max))
except KeyError as key_error:
raise ReqlDriverError("Missing key: %s" % (key_error, ))
# We've already sent the initial JSON above, and only support a single
# protocol version at the moment thus we simply read the next response
self._state = 2
return ""
elif self._state == 2:
json = self._json_decoder.decode(response.decode("utf-8"))
server_first_message = r = salt = i = None
try:
if json["success"] == False:
if 10 <= json["error_code"] <= 20:
raise ReqlAuthError(json["error"], self._host, self._port)
else:
raise ReqlDriverError(json["error"])
server_first_message = json["authentication"].encode("ascii")
authentication = dict(
x.split(b"=", 1) for x in server_first_message.split(b","))
r = authentication[b"r"]
if not r.startswith(self._r):
raise ReqlAuthError("Invalid nonce from server", self._host, self._port)
salt = base64.standard_b64decode(authentication[b"s"])
i = int(authentication[b"i"])
except KeyError as key_error:
raise ReqlDriverError("Missing key: %s" % (key_error, ))
client_final_message_without_proof = b"c=biws,r=" + r
# SaltedPassword := Hi(Normalize(password), salt, i)
salted_password = self._pbkdf2_hmac("sha256", self._password, salt, i)
# ClientKey := HMAC(SaltedPassword, "Client Key")
client_key = hmac.new(
salted_password, b"Client Key", hashlib.sha256).digest()
# StoredKey := H(ClientKey)
stored_key = hashlib.sha256(client_key).digest()
# AuthMessage := client-first-message-bare + "," +
# server-first-message + "," +
# client-final-message-without-proof
auth_message = b",".join((
self._client_first_message_bare,
server_first_message,
client_final_message_without_proof))
# ClientSignature := HMAC(StoredKey, AuthMessage)
client_signature = hmac.new(
stored_key, auth_message, hashlib.sha256).digest()
# ClientProof := ClientKey XOR ClientSignature
client_proof = struct.pack(
"32B",
*(l ^ r for l, r in zip(
struct.unpack("32B", client_key),
struct.unpack("32B", client_signature))))
# ServerKey := HMAC(SaltedPassword, "Server Key")
server_key = hmac.new(
salted_password, b"Server Key", hashlib.sha256).digest()
# ServerSignature := HMAC(ServerKey, AuthMessage)
self._server_signature = hmac.new(
server_key, auth_message, hashlib.sha256).digest()
self._state = 3
return self._json_encoder.encode({
"authentication": (
client_final_message_without_proof +
b",p=" + base64.standard_b64encode(client_proof)
).decode("ascii")
}).encode("utf-8") + \
b'\0'
elif self._state == 3:
json = self._json_decoder.decode(response.decode("utf-8"))
v = None
try:
if json["success"] == False:
if 10 <= json["error_code"] <= 20:
raise ReqlAuthError(json["error"], self._host, self._port)
else:
raise ReqlDriverError(json["error"])
authentication = dict(
x.split(b"=", 1)
for x in json["authentication"].encode("ascii").split(b","))
v = base64.standard_b64decode(authentication[b"v"])
except KeyError as key_error:
raise ReqlDriverError("Missing key: %s" % (key_error, ))
if not self._compare_digest(v, self._server_signature):
raise ReqlAuthError("Invalid server signature", self._host, self._port)
self._state = 4
return None
else:
raise ReqlDriverError("Unexpected handshake state");
@staticmethod
def __compare_digest(a, b):
if sys.version_info[0] == 3:
def xor_bytes(a, b):
return a ^ b
else:
def xor_bytes(a, b, _ord=ord):
return _ord(a) ^ _ord(b)
left = None
right = b
if len(a) == len(b):
left = a
result = 0
if len(a) != len(b):
left = b
result = 1
for l, r in zip(left, right):
result |= xor_bytes(l, r)
return result == 0
class thread_local_cache(threading.local):
def __init__(self):
self.cache = {}
def set(self, key, val):
self.cache[key] = val
def get(self, key):
return self.cache.get(key)
pbkdf2_cache = thread_local_cache()
@staticmethod
def __pbkdf2_hmac(hash_name, password, salt, iterations):
assert hash_name == "sha256", hash_name
def from_bytes(value, hexlify=binascii.hexlify, int=int):
return int(hexlify(value), 16)
def to_bytes(value, unhexlify=binascii.unhexlify):
try:
return unhexlify(bytes("%064x" % value, "ascii"))
except TypeError:
return unhexlify(bytes("%064x" % value))
cache_key = (password, salt, iterations)
cache_result = HandshakeV1_0.pbkdf2_cache.get(cache_key)
if cache_result is not None:
return cache_result
mac = hmac.new(password, None, hashlib.sha256)
def digest(msg, mac=mac):
mac_copy = mac.copy()
mac_copy.update(msg)
return mac_copy.digest()
t = digest(salt + b"\x00\x00\x00\x01")
u = from_bytes(t)
for c in xrange(iterations - 1):
t = digest(t)
u ^= from_bytes(t)
u = to_bytes(u)
HandshakeV1_0.pbkdf2_cache.set(cache_key, u)
return u
|
damiansoriano/odoo
|
refs/heads/master
|
openerp/conf/__init__.py
|
442
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Library-wide configuration variables.
For now, configuration code is in openerp.tools.config. It is in mainly
unprocessed form, e.g. addons_path is a string with commas-separated
paths. The aim is to have code related to configuration (command line
parsing, configuration file loading and saving, ...) in this module
and provide real Python variables, e.g. addons_paths is really a list
of paths.
To initialize properly this module, openerp.tools.config.parse_config()
must be used.
"""
import deprecation
# Paths to search for OpenERP addons.
addons_paths = []
# List of server-wide modules to load. Those modules are supposed to provide
# features not necessarily tied to a particular database. This is in contrast
# to modules that are always bound to a specific database when they are
# installed (i.e. the majority of OpenERP addons). This is set with the --load
# command-line option.
server_wide_modules = []
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
katiewsimon/JP-Morgan-Hackathon-Project
|
refs/heads/master
|
jp_server/lib/python2.7/site-packages/werkzeug/contrib/lint.py
|
295
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.lint
~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module provides a middleware that performs sanity checks of the WSGI
application. It checks that :pep:`333` is properly implemented and warns
on some common HTTP errors such as non-empty responses for 304 status
codes.
This module provides a middleware, the :class:`LintMiddleware`. Wrap your
application with it and it will warn about common problems with WSGI and
HTTP while your application is running.
It's strongly recommended to use it during development.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from urlparse import urlparse
from warnings import warn
from werkzeug.datastructures import Headers
from werkzeug.http import is_entity_header
from werkzeug.wsgi import FileWrapper
from werkzeug._compat import string_types
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(WSGIWarning('%s requires bytestrings, got %s' %
(context, obj.__class__.__name__)))
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
'input stream, thus making calls to '
'wsgi.input.read() unsafe. Conforming servers '
'may never return from this call.'),
stacklevel=2)
elif len(args) != 1:
warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
stacklevel=2)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
' are unsafe. Use wsgi.input.read() instead.'),
stacklevel=2)
elif len(args) == 1:
warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
'WSGI does not support this, although it\'s available '
'on all major servers.'),
stacklevel=2)
else:
raise TypeError('too many arguments passed to wsgi.input.readline()')
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
return iter(())
def close(self):
warn(WSGIWarning('application closed the input stream!'),
stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string('wsgi.error.write()', s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(seq)
def close(self):
warn(WSGIWarning('application closed the error stream!'),
stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string('write()', s)
self._write.write(s)
self._chunks.append(len(s))
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
self._next = iter(iterator).next
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def next(self):
if self.closed:
warn(WSGIWarning('iterated over closed app_iter'),
stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(WSGIWarning('Application returned before it '
'started the response'), stacklevel=2)
check_string('application iterator items', rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, 'close'):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get('content-length', type=int)
if status_code == 304:
for key, value in headers:
key = key.lower()
if key not in ('expires', 'content-location') and \
is_entity_header(key):
warn(HTTPWarning('entity header %r found in 304 '
'response' % key))
if bytes_sent:
warn(HTTPWarning('304 responses must not have a body'))
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(HTTPWarning('%r responses must have an empty '
'content length') % status_code)
if bytes_sent:
warn(HTTPWarning('%r responses must not have a body' %
status_code))
elif content_length is not None and content_length != bytes_sent:
warn(WSGIWarning('Content-Length and the number of bytes '
'sent to the client do not match.'))
def __del__(self):
if not self.closed:
try:
warn(WSGIWarning('Iterator was garbage collected before '
'it was closed.'))
except Exception:
pass
class LintMiddleware(object):
"""This middleware wraps an application and warns on common errors.
Among other thing it currently checks for the following problems:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Detected errors are emitted using the standard Python :mod:`warnings`
system and usually end up on :data:`stderr`.
::
from werkzeug.contrib.lint import LintMiddleware
app = LintMiddleware(app)
:param app: the application to wrap
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(WSGIWarning('WSGI environment is not a standard python dict.'),
stacklevel=4)
for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once'):
if key not in environ:
warn(WSGIWarning('required environment key %r not found'
% key), stacklevel=3)
if environ['wsgi.version'] != (1, 0):
warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
stacklevel=3)
script_name = environ.get('SCRIPT_NAME', '')
if script_name and script_name[:1] != '/':
warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
% script_name), stacklevel=3)
path_info = environ.get('PATH_INFO', '')
if path_info[:1] != '/':
warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
% path_info), stacklevel=3)
def check_start_response(self, status, headers, exc_info):
check_string('status', status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
if len(status) < 4 or status[3] != ' ':
warn(WSGIWarning('Invalid value for status %r. Valid '
'status strings are three digits, a space '
'and a status explanation'), stacklevel=3)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning('header list is not a list'), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning('Headers must tuple 2-item tuples'),
stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning('header items must be strings'),
stacklevel=3)
if name.lower() == 'status':
warn(WSGIWarning('The status header is not supported due to '
'conflicts with the CGI spec.'),
stacklevel=3)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get('etag')
if etag is not None:
if etag.startswith('w/'):
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
location = headers.get('location')
if location is not None:
if not urlparse(location).netloc:
warn(HTTPWarning('absolute URLs required for location header'),
stacklevel=4)
def check_iterator(self, app_iter):
if isinstance(app_iter, string_types):
warn(WSGIWarning('application returned string. Response will '
'send character for character to the client '
'which will kill the performance. Return a '
'list or iterable instead.'), stacklevel=3)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
if kwargs:
warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
stacklevel=2)
environ, start_response = args
self.check_environ(environ)
environ['wsgi.input'] = InputStream(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
# hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length
environ['wsgi.file_wrapper'] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(WSGIWarning('Invalid number of arguments: %s, expected '
'2 or 3' % len(args), stacklevel=2))
if kwargs:
warn(WSGIWarning('no keyword arguments allowed.'))
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers,
exc_info)
return GuardedWrite(start_response(status, headers, exc_info),
chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)
|
megaumi/django
|
refs/heads/master
|
tests/inspectdb/tests.py
|
108
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import re
from unittest import skipUnless
from django.core.management import call_command
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.utils.six import PY3, StringIO
from .models import ColumnTypes
class InspectDBTestCase(TestCase):
def test_stealth_table_name_filter_option(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
error_message = "inspectdb has examined a table that should have been filtered out."
# contrib.contenttypes is one of the apps always installed when running
# the Django test suite, check that one of its tables hasn't been
# inspected
self.assertNotIn("class DjangoContentType(models.Model):", out.getvalue(), msg=error_message)
def make_field_type_asserter(self):
"""Call inspectdb and return a function to validate a field type in its output"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
def assertFieldType(name, definition):
out_def = re.search(r'^\s*%s = (models.*)$' % name, output, re.MULTILINE).groups()[0]
self.assertEqual(definition, out_def)
return assertFieldType
def test_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
# Inspecting Oracle DB doesn't produce correct results (#19884):
# - it gets max_length wrong: it returns a number of bytes.
# - it reports fields as blank=True when they aren't.
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('char_field', "models.CharField(max_length=10)")
assertFieldType('null_char_field', "models.CharField(max_length=10, blank=True, null=True)")
assertFieldType('comma_separated_int_field', "models.CharField(max_length=99)")
assertFieldType('date_field', "models.DateField()")
assertFieldType('date_time_field', "models.DateTimeField()")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('email_field', "models.CharField(max_length=254)")
assertFieldType('file_field', "models.CharField(max_length=100)")
assertFieldType('file_path_field', "models.CharField(max_length=100)")
if connection.features.can_introspect_ip_address_field:
assertFieldType('gen_ip_adress_field', "models.GenericIPAddressField()")
elif (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('gen_ip_adress_field', "models.CharField(max_length=39)")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('slug_field', "models.CharField(max_length=50)")
if not connection.features.interprets_empty_strings_as_nulls:
assertFieldType('text_field', "models.TextField()")
if connection.features.can_introspect_time_field:
assertFieldType('time_field', "models.TimeField()")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('url_field', "models.CharField(max_length=200)")
def test_number_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
if not connection.features.can_introspect_autofield:
assertFieldType('id', "models.IntegerField(primary_key=True) # AutoField?")
if connection.features.can_introspect_big_integer_field:
assertFieldType('big_int_field', "models.BigIntegerField()")
else:
assertFieldType('big_int_field', "models.IntegerField()")
bool_field = ColumnTypes._meta.get_field('bool_field')
bool_field_type = connection.features.introspected_boolean_field_type(bool_field)
assertFieldType('bool_field', "models.{}()".format(bool_field_type))
null_bool_field = ColumnTypes._meta.get_field('null_bool_field')
null_bool_field_type = connection.features.introspected_boolean_field_type(null_bool_field)
if 'BooleanField' in null_bool_field_type:
assertFieldType('null_bool_field', "models.{}()".format(null_bool_field_type))
else:
if connection.features.can_introspect_null:
assertFieldType('null_bool_field', "models.{}(blank=True, null=True)".format(null_bool_field_type))
else:
assertFieldType('null_bool_field', "models.{}()".format(null_bool_field_type))
if connection.features.can_introspect_decimal_field:
assertFieldType('decimal_field', "models.DecimalField(max_digits=6, decimal_places=1)")
else: # Guessed arguments on SQLite, see #5014
assertFieldType('decimal_field', "models.DecimalField(max_digits=10, decimal_places=5) "
"# max_digits and decimal_places have been guessed, "
"as this database handles decimal fields as float")
assertFieldType('float_field', "models.FloatField()")
assertFieldType('int_field', "models.IntegerField()")
if connection.features.can_introspect_positive_integer_field:
assertFieldType('pos_int_field', "models.PositiveIntegerField()")
else:
assertFieldType('pos_int_field', "models.IntegerField()")
if connection.features.can_introspect_positive_integer_field:
if connection.features.can_introspect_small_integer_field:
assertFieldType('pos_small_int_field', "models.PositiveSmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.PositiveIntegerField()")
else:
if connection.features.can_introspect_small_integer_field:
assertFieldType('pos_small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.IntegerField()")
if connection.features.can_introspect_small_integer_field:
assertFieldType('small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('small_int_field', "models.IntegerField()")
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_attribute_name_not_python_keyword(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated an attribute name which is a python keyword"
# Recursive foreign keys should be set to 'self'
self.assertIn("parent = models.ForeignKey('self', models.DO_NOTHING)", output)
self.assertNotIn(
"from = models.ForeignKey(InspectdbPeople, models.DO_NOTHING)",
output,
msg=error_message,
)
# As InspectdbPeople model is defined after InspectdbMessage, it should be quoted
self.assertIn(
"from_field = models.ForeignKey('InspectdbPeople', models.DO_NOTHING, db_column='from_id')",
output,
)
self.assertIn(
"people_pk = models.ForeignKey(InspectdbPeople, models.DO_NOTHING, primary_key=True)",
output,
)
self.assertIn(
"people_unique = models.ForeignKey(InspectdbPeople, models.DO_NOTHING, unique=True)",
output,
)
def test_digits_column_name_introspection(self):
"""Introspection of column names consist/start with digits (#16536/#17676)"""
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated a model field name which is a number"
self.assertNotIn(" 123 = models.CharField", output, msg=error_message)
self.assertIn("number_123 = models.CharField", output)
error_message = "inspectdb generated a model field name which starts with a digit"
self.assertNotIn(" 4extra = models.CharField", output, msg=error_message)
self.assertIn("number_4extra = models.CharField", output)
self.assertNotIn(" 45extra = models.CharField", output, msg=error_message)
self.assertIn("number_45extra = models.CharField", output)
def test_special_column_name_introspection(self):
"""
Introspection of column names containing special characters,
unsuitable for Python identifiers
"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
base_name = 'Field' if not connection.features.uppercases_column_names else 'field'
self.assertIn("field = models.IntegerField()", output)
self.assertIn("field_field = models.IntegerField(db_column='%s_')" % base_name, output)
self.assertIn("field_field_0 = models.IntegerField(db_column='%s__')" % base_name, output)
self.assertIn("field_field_1 = models.IntegerField(db_column='__field')", output)
self.assertIn("prc_x = models.IntegerField(db_column='prc(%) x')", output)
if PY3:
# Python 3 allows non-ASCII identifiers
self.assertIn("tamaño = models.IntegerField()", output)
else:
self.assertIn("tama_o = models.IntegerField(db_column='tama\\xf1o')", output)
def test_table_name_introspection(self):
"""
Introspection of table names containing special characters,
unsuitable for Python identifiers
"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
self.assertIn("class InspectdbSpecialTableName(models.Model):", output)
def test_managed_models(self):
"""Test that by default the command generates models with `Meta.managed = False` (#14305)"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
self.longMessage = False
self.assertIn(" managed = False", output, msg='inspectdb should generate unmanaged models.')
def test_unique_together_meta(self):
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_uniquetogether'),
stdout=out)
output = out.getvalue()
self.assertIn(" unique_together = (('field1', 'field2'),)", output, msg='inspectdb should generate unique_together.')
@skipUnless(connection.vendor == 'sqlite',
"Only patched sqlite's DatabaseIntrospection.data_types_reverse for this test")
def test_custom_fields(self):
"""
Introspection of columns with a custom field (#21090)
"""
out = StringIO()
orig_data_types_reverse = connection.introspection.data_types_reverse
try:
connection.introspection.data_types_reverse = {
'text': 'myfields.TextField',
'bigint': 'BigIntegerField',
}
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
self.assertIn("text_field = myfields.TextField()", output)
self.assertIn("big_int_field = models.BigIntegerField()", output)
finally:
connection.introspection.data_types_reverse = orig_data_types_reverse
|
Jgarcia-IAS/SAT
|
refs/heads/master
|
openerp/addons-extra/odoo-pruebas/odoo-server/addons/account/wizard/account_period_close.py
|
341
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_period_close(osv.osv_memory):
"""
close period
"""
_name = "account.period.close"
_description = "period close"
_columns = {
'sure': fields.boolean('Check this box'),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close period
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: account period close’s ID or list of IDs
"""
journal_period_pool = self.pool.get('account.journal.period')
period_pool = self.pool.get('account.period')
account_move_obj = self.pool.get('account.move')
mode = 'done'
for form in self.read(cr, uid, ids, context=context):
if form['sure']:
for id in context['active_ids']:
account_move_ids = account_move_obj.search(cr, uid, [('period_id', '=', id), ('state', '=', "draft")], context=context)
if account_move_ids:
raise osv.except_osv(_('Invalid Action!'), _('In order to close a period, you must first post related journal entries.'))
cr.execute('update account_journal_period set state=%s where period_id=%s', (mode, id))
cr.execute('update account_period set state=%s where id=%s', (mode, id))
self.invalidate_cache(cr, uid, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
edry/edx-platform
|
refs/heads/master
|
common/djangoapps/course_modes/models.py
|
10
|
"""
Add and create new modes for running courses on this particular LMS
"""
import pytz
from datetime import datetime
from django.core.exceptions import ValidationError
from django.db import models
from collections import namedtuple, defaultdict
from django.utils.translation import ugettext_lazy as _
from django.db.models import Q
from xmodule_django.models import CourseKeyField
Mode = namedtuple('Mode',
[
'slug',
'name',
'min_price',
'suggested_prices',
'currency',
'expiration_datetime',
'description',
'sku',
])
class CourseMode(models.Model):
"""
We would like to offer a course in a variety of modes.
"""
# the course that this mode is attached to
course_id = CourseKeyField(max_length=255, db_index=True, verbose_name=_("Course"))
# the reference to this mode that can be used by Enrollments to generate
# similar behavior for the same slug across courses
mode_slug = models.CharField(max_length=100, verbose_name=_("Mode"))
# The 'pretty' name that can be translated and displayed
mode_display_name = models.CharField(max_length=255, verbose_name=_("Display Name"))
# The price in USD that we would like to charge for this mode of the course
# Historical note: We used to allow users to choose from several prices, but later
# switched to using a single price. Although this field is called `min_price`, it is
# really just the price of the course.
min_price = models.IntegerField(default=0, verbose_name=_("Price"))
# the currency these prices are in, using lower case ISO currency codes
currency = models.CharField(default="usd", max_length=8)
# The datetime at which the course mode will expire.
# This is used to implement "upgrade" deadlines.
# For example, if there is a verified mode that expires on 1/1/2015,
# then users will be able to upgrade into the verified mode before that date.
# Once the date passes, users will no longer be able to enroll as verified.
expiration_datetime = models.DateTimeField(
default=None, null=True, blank=True,
verbose_name=_(u"Upgrade Deadline"),
help_text=_(
u"OPTIONAL: After this date/time, users will no longer be able to enroll in this mode. "
u"Leave this blank if users can enroll in this mode until enrollment closes for the course."
),
)
# DEPRECATED: the `expiration_date` field has been replaced by `expiration_datetime`
expiration_date = models.DateField(default=None, null=True, blank=True)
# DEPRECATED: the suggested prices for this mode
# We used to allow users to choose from a set of prices, but we now allow only
# a single price. This field has been deprecated by `min_price`
suggested_prices = models.CommaSeparatedIntegerField(max_length=255, blank=True, default='')
# optional description override
# WARNING: will not be localized
description = models.TextField(null=True, blank=True)
# Optional SKU for integration with the ecommerce service
sku = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name="SKU",
help_text=_(
u"OPTIONAL: This is the SKU (stock keeping unit) of this mode in the external ecommerce service. "
u"Leave this blank if the course has not yet been migrated to the ecommerce service."
)
)
HONOR = 'honor'
PROFESSIONAL = 'professional'
VERIFIED = "verified"
AUDIT = "audit"
NO_ID_PROFESSIONAL_MODE = "no-id-professional"
CREDIT_MODE = "credit"
DEFAULT_MODE = Mode(HONOR, _('Honor Code Certificate'), 0, '', 'usd', None, None, None)
DEFAULT_MODE_SLUG = HONOR
# Modes that allow a student to pursue a verified certificate
VERIFIED_MODES = [VERIFIED, PROFESSIONAL]
# Modes that allow a student to pursue a non-verified certificate
NON_VERIFIED_MODES = [HONOR, AUDIT, NO_ID_PROFESSIONAL_MODE]
# Modes that allow a student to earn credit with a university partner
CREDIT_MODES = [CREDIT_MODE]
class Meta(object):
""" meta attributes of this model """
unique_together = ('course_id', 'mode_slug', 'currency')
def clean(self):
"""
Object-level validation - implemented in this method so DRF serializers
catch errors in advance of a save() attempt.
"""
if self.is_professional_slug(self.mode_slug) and self.expiration_datetime is not None:
raise ValidationError(
_(u"Professional education modes are not allowed to have expiration_datetime set.")
)
def save(self, force_insert=False, force_update=False, using=None):
# Ensure currency is always lowercase.
self.clean() # ensure object-level validation is performed before we save.
self.currency = self.currency.lower()
super(CourseMode, self).save(force_insert, force_update, using)
@property
def slug(self):
"""
Returns mode_slug
NOTE (CCB): This is a silly hack needed because all of the class methods use tuples
with a property named slug instead of mode_slug.
"""
return self.mode_slug
@classmethod
def all_modes_for_courses(cls, course_id_list):
"""Find all modes for a list of course IDs, including expired modes.
Courses that do not have a course mode will be given a default mode.
Arguments:
course_id_list (list): List of `CourseKey`s
Returns:
dict mapping `CourseKey` to lists of `Mode`
"""
modes_by_course = defaultdict(list)
for mode in cls.objects.filter(course_id__in=course_id_list):
modes_by_course[mode.course_id].append(mode.to_tuple())
# Assign default modes if nothing available in the database
missing_courses = set(course_id_list) - set(modes_by_course.keys())
for course_id in missing_courses:
modes_by_course[course_id] = [cls.DEFAULT_MODE]
return modes_by_course
@classmethod
def all_and_unexpired_modes_for_courses(cls, course_id_list):
"""Retrieve course modes for a list of courses.
To reduce the number of database queries, this function
loads *all* course modes, then creates a second list
of unexpired course modes.
Arguments:
course_id_list (list of `CourseKey`): List of courses for which
to retrieve course modes.
Returns:
Tuple of `(all_course_modes, unexpired_course_modes)`, where
the first is a list of *all* `Mode`s (including expired ones),
and the second is a list of only unexpired `Mode`s.
"""
now = datetime.now(pytz.UTC)
all_modes = cls.all_modes_for_courses(course_id_list)
unexpired_modes = {
course_id: [
mode for mode in modes
if mode.expiration_datetime is None or mode.expiration_datetime >= now
]
for course_id, modes in all_modes.iteritems()
}
return (all_modes, unexpired_modes)
@classmethod
def paid_modes_for_course(cls, course_id):
"""
Returns a list of non-expired modes for a course ID that have a set minimum price.
If no modes have been set, returns an empty list.
Args:
course_id (CourseKey): The course to find paid modes for.
Returns:
A list of CourseModes with a minimum price.
"""
now = datetime.now(pytz.UTC)
found_course_modes = cls.objects.filter(
Q(course_id=course_id) &
Q(min_price__gt=0) &
(
Q(expiration_datetime__isnull=True) |
Q(expiration_datetime__gte=now)
)
)
return [mode.to_tuple() for mode in found_course_modes]
@classmethod
def modes_for_course(cls, course_id, include_expired=False, only_selectable=True):
"""
Returns a list of the non-expired modes for a given course id
If no modes have been set in the table, returns the default mode
Arguments:
course_id (CourseKey): Search for course modes for this course.
Keyword Arguments:
include_expired (bool): If True, expired course modes will be included
in the returned JSON data. If False, these modes will be omitted.
only_selectable (bool): If True, include only modes that are shown
to users on the track selection page. (Currently, "credit" modes
aren't available to users until they complete the course, so
they are hidden in track selection.)
Returns:
list of `Mode` tuples
"""
now = datetime.now(pytz.UTC)
found_course_modes = cls.objects.filter(course_id=course_id)
# Filter out expired course modes if include_expired is not set
if not include_expired:
found_course_modes = found_course_modes.filter(
Q(expiration_datetime__isnull=True) | Q(expiration_datetime__gte=now)
)
# Credit course modes are currently not shown on the track selection page;
# they're available only when students complete a course. For this reason,
# we exclude them from the list if we're only looking for selectable modes
# (e.g. on the track selection page or in the payment/verification flows).
if only_selectable:
found_course_modes = found_course_modes.exclude(mode_slug__in=cls.CREDIT_MODES)
modes = ([mode.to_tuple() for mode in found_course_modes])
if not modes:
modes = [cls.DEFAULT_MODE]
return modes
@classmethod
def modes_for_course_dict(cls, course_id, modes=None, **kwargs):
"""Returns the non-expired modes for a particular course.
Arguments:
course_id (CourseKey): Search for course modes for this course.
Keyword Arguments:
modes (list of `Mode`): If provided, search through this list
of course modes. This can be used to avoid an additional
database query if you have already loaded the modes list.
include_expired (bool): If True, expired course modes will be included
in the returned values. If False, these modes will be omitted.
only_selectable (bool): If True, include only modes that are shown
to users on the track selection page. (Currently, "credit" modes
aren't available to users until they complete the course, so
they are hidden in track selection.)
Returns:
dict: Keys are mode slugs, values are lists of `Mode` namedtuples.
"""
if modes is None:
modes = cls.modes_for_course(course_id, **kwargs)
return {mode.slug: mode for mode in modes}
@classmethod
def mode_for_course(cls, course_id, mode_slug, modes=None):
"""Returns the mode for the course corresponding to mode_slug.
Returns only non-expired modes.
If this particular mode is not set for the course, returns None
Arguments:
course_id (CourseKey): Search for course modes for this course.
mode_slug (str): Search for modes with this slug.
Keyword Arguments:
modes (list of `Mode`): If provided, search through this list
of course modes. This can be used to avoid an additional
database query if you have already loaded the modes list.
Returns:
Mode
"""
if modes is None:
modes = cls.modes_for_course(course_id)
matched = [m for m in modes if m.slug == mode_slug]
if matched:
return matched[0]
else:
return None
@classmethod
def verified_mode_for_course(cls, course_id, modes=None):
"""Find a verified mode for a particular course.
Since we have multiple modes that can go through the verify flow,
we want to be able to select the 'correct' verified mode for a given course.
Currently, we prefer to return the professional mode over the verified one
if both exist for the given course.
Arguments:
course_id (CourseKey): Search for course modes for this course.
Keyword Arguments:
modes (list of `Mode`): If provided, search through this list
of course modes. This can be used to avoid an additional
database query if you have already loaded the modes list.
Returns:
Mode or None
"""
modes_dict = cls.modes_for_course_dict(course_id, modes=modes)
verified_mode = modes_dict.get('verified', None)
professional_mode = modes_dict.get('professional', None)
# we prefer professional over verify
return professional_mode if professional_mode else verified_mode
@classmethod
def min_course_price_for_verified_for_currency(cls, course_id, currency): # pylint: disable=invalid-name
"""
Returns the minimum price of the course int he appropriate currency over all the
course's *verified*, non-expired modes.
Assuming all verified courses have a minimum price of >0, this value should always
be >0.
If no verified mode is found, 0 is returned.
"""
modes = cls.modes_for_course(course_id)
for mode in modes:
if (mode.currency.lower() == currency.lower()) and (mode.slug == 'verified'):
return mode.min_price
return 0
@classmethod
def has_verified_mode(cls, course_mode_dict):
"""Check whether the modes for a course allow a student to pursue a verfied certificate.
Args:
course_mode_dict (dictionary mapping course mode slugs to Modes)
Returns:
bool: True iff the course modes contain a verified track.
"""
for mode in cls.VERIFIED_MODES:
if mode in course_mode_dict:
return True
return False
@classmethod
def has_professional_mode(cls, modes_dict):
"""
check the course mode is profession or no-id-professional
Args:
modes_dict (dict): course modes.
Returns:
bool
"""
return cls.PROFESSIONAL in modes_dict or cls.NO_ID_PROFESSIONAL_MODE in modes_dict
@classmethod
def is_professional_mode(cls, course_mode_tuple):
"""
checking that tuple is professional mode.
Args:
course_mode_tuple (tuple) : course mode tuple
Returns:
bool
"""
return course_mode_tuple.slug in [cls.PROFESSIONAL, cls.NO_ID_PROFESSIONAL_MODE] if course_mode_tuple else False
@classmethod
def is_professional_slug(cls, slug):
"""checking slug is professional
Args:
slug (str) : course mode string
Return:
bool
"""
return slug in [cls.PROFESSIONAL, cls.NO_ID_PROFESSIONAL_MODE]
@classmethod
def is_verified_mode(cls, course_mode_tuple):
"""Check whether the given modes is_verified or not.
Args:
course_mode_tuple(Mode): Mode tuple
Returns:
bool: True iff the course modes is verified else False.
"""
return course_mode_tuple.slug in cls.VERIFIED_MODES
@classmethod
def is_verified_slug(cls, mode_slug):
"""Check whether the given mode_slug is_verified or not.
Args:
mode_slug(str): Mode Slug
Returns:
bool: True iff the course mode slug is verified else False.
"""
return mode_slug in cls.VERIFIED_MODES
@classmethod
def is_credit_mode(cls, course_mode_tuple):
"""Check whether this is a credit mode.
Students enrolled in a credit mode are eligible to
receive university credit upon completion of a course.
"""
return course_mode_tuple.slug in cls.CREDIT_MODES
@classmethod
def has_payment_options(cls, course_id):
"""Determines if there is any mode that has payment options
Check the dict of course modes and see if any of them have a minimum price or
suggested prices. Returns True if any course mode has a payment option.
Args:
course_mode_dict (dict): Dictionary mapping course mode slugs to Modes
Returns:
True if any course mode has a payment option.
"""
for mode in cls.modes_for_course(course_id):
if mode.min_price > 0 or mode.suggested_prices != '':
return True
return False
@classmethod
def can_auto_enroll(cls, course_id, modes_dict=None):
"""Check whether students should be auto-enrolled in the course.
If a course is behind a paywall (e.g. professional ed or white-label),
then users should NOT be auto-enrolled. Instead, the user will
be enrolled when he/she completes the payment flow.
Otherwise, users can be enrolled in the default mode "honor"
with the option to upgrade later.
Args:
course_id (CourseKey): The course to check.
Keyword Args:
modes_dict (dict): If provided, use these course modes.
Useful for avoiding unnecessary database queries.
Returns:
bool
"""
if modes_dict is None:
modes_dict = cls.modes_for_course_dict(course_id)
# Professional and no-id-professional mode courses are always behind a paywall
if cls.has_professional_mode(modes_dict):
return False
# White-label uses course mode honor with a price
# to indicate that the course is behind a paywall.
if cls.is_white_label(course_id, modes_dict=modes_dict):
return False
# Check that the default mode is available.
return cls.HONOR in modes_dict
@classmethod
def is_white_label(cls, course_id, modes_dict=None):
"""Check whether a course is a "white label" (paid) course.
By convention, white label courses have a course mode slug "honor"
and a price.
Args:
course_id (CourseKey): The course to check.
Keyword Args:
modes_dict (dict): If provided, use these course modes.
Useful for avoiding unnecessary database queries.
Returns:
bool
"""
if modes_dict is None:
modes_dict = cls.modes_for_course_dict(course_id)
# White-label uses course mode honor with a price
# to indicate that the course is behind a paywall.
if cls.HONOR in modes_dict and len(modes_dict) == 1:
if modes_dict["honor"].min_price > 0 or modes_dict["honor"].suggested_prices != '':
return True
return False
@classmethod
def min_course_price_for_currency(cls, course_id, currency): # pylint: disable=invalid-name
"""
Returns the minimum price of the course in the appropriate currency over all the course's
non-expired modes.
If there is no mode found, will return the price of DEFAULT_MODE, which is 0
"""
modes = cls.modes_for_course(course_id)
return min(mode.min_price for mode in modes if mode.currency.lower() == currency.lower())
@classmethod
def enrollment_mode_display(cls, mode, verification_status):
""" Select appropriate display strings and CSS classes.
Uses mode and verification status to select appropriate display strings and CSS classes
for certificate display.
Args:
mode (str): enrollment mode.
verification_status (str) : verification status of student
Returns:
dictionary:
"""
# import inside the function to avoid the circular import
from student.helpers import (
VERIFY_STATUS_NEED_TO_VERIFY,
VERIFY_STATUS_SUBMITTED,
VERIFY_STATUS_APPROVED
)
show_image = False
image_alt = ''
if mode == cls.VERIFIED:
if verification_status in [VERIFY_STATUS_NEED_TO_VERIFY, VERIFY_STATUS_SUBMITTED]:
enrollment_title = _("Your verification is pending")
enrollment_value = _("Verified: Pending Verification")
show_image = True
image_alt = _("ID verification pending")
elif verification_status == VERIFY_STATUS_APPROVED:
enrollment_title = _("You're enrolled as a verified student")
enrollment_value = _("Verified")
show_image = True
image_alt = _("ID Verified Ribbon/Badge")
else:
enrollment_title = _("You're enrolled as an honor code student")
enrollment_value = _("Honor Code")
elif mode == cls.HONOR:
enrollment_title = _("You're enrolled as an honor code student")
enrollment_value = _("Honor Code")
elif mode == cls.AUDIT:
enrollment_title = _("You're auditing this course")
enrollment_value = _("Auditing")
elif mode in [cls.PROFESSIONAL, cls.NO_ID_PROFESSIONAL_MODE]:
enrollment_title = _("You're enrolled as a professional education student")
enrollment_value = _("Professional Ed")
else:
enrollment_title = ''
enrollment_value = ''
return {
'enrollment_title': unicode(enrollment_title),
'enrollment_value': unicode(enrollment_value),
'show_image': show_image,
'image_alt': unicode(image_alt),
'display_mode': cls._enrollment_mode_display(mode, verification_status)
}
@staticmethod
def _enrollment_mode_display(enrollment_mode, verification_status):
"""Checking enrollment mode and status and returns the display mode
Args:
enrollment_mode (str): enrollment mode.
verification_status (str) : verification status of student
Returns:
display_mode (str) : display mode for certs
"""
# import inside the function to avoid the circular import
from student.helpers import (
VERIFY_STATUS_NEED_TO_VERIFY,
VERIFY_STATUS_SUBMITTED,
VERIFY_STATUS_APPROVED
)
if enrollment_mode == CourseMode.VERIFIED:
if verification_status in [VERIFY_STATUS_NEED_TO_VERIFY, VERIFY_STATUS_SUBMITTED, VERIFY_STATUS_APPROVED]:
display_mode = "verified"
else:
display_mode = "honor"
elif enrollment_mode in [CourseMode.PROFESSIONAL, CourseMode.NO_ID_PROFESSIONAL_MODE]:
display_mode = "professional"
else:
display_mode = enrollment_mode
return display_mode
def to_tuple(self):
"""
Takes a mode model and turns it into a model named tuple.
Returns:
A 'Model' namedtuple with all the same attributes as the model.
"""
return Mode(
self.mode_slug,
self.mode_display_name,
self.min_price,
self.suggested_prices,
self.currency,
self.expiration_datetime,
self.description,
self.sku
)
def __unicode__(self):
return u"{} : {}, min={}".format(
self.course_id, self.mode_slug, self.min_price
)
class CourseModesArchive(models.Model):
"""
Store the past values of course_mode that a course had in the past. We decided on having
separate model, because there is a uniqueness contraint on (course_mode, course_id)
field pair in CourseModes. Having a separate table allows us to have an audit trail of any changes
such as course price changes
"""
# the course that this mode is attached to
course_id = CourseKeyField(max_length=255, db_index=True)
# the reference to this mode that can be used by Enrollments to generate
# similar behavior for the same slug across courses
mode_slug = models.CharField(max_length=100)
# The 'pretty' name that can be translated and displayed
mode_display_name = models.CharField(max_length=255)
# minimum price in USD that we would like to charge for this mode of the course
min_price = models.IntegerField(default=0)
# the suggested prices for this mode
suggested_prices = models.CommaSeparatedIntegerField(max_length=255, blank=True, default='')
# the currency these prices are in, using lower case ISO currency codes
currency = models.CharField(default="usd", max_length=8)
# turn this mode off after the given expiration date
expiration_date = models.DateField(default=None, null=True, blank=True)
expiration_datetime = models.DateTimeField(default=None, null=True, blank=True)
|
banditlev/meangarden
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py
|
1869
|
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
|
be-cloud-be/horizon-addons
|
refs/heads/9.0
|
server/openerp/modules/db.py
|
49
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import openerp.modules
import logging
_logger = logging.getLogger(__name__)
def is_initialized(cr):
""" Check if a database has been initialized for the ORM.
The database can be initialized with the 'initialize' function below.
"""
cr.execute("SELECT relname FROM pg_class WHERE relkind='r' AND relname='ir_module_module'")
return len(cr.fetchall()) > 0
def initialize(cr):
""" Initialize a database with for the ORM.
This executes base/base.sql, creates the ir_module_categories (taken
from each module descriptor file), and creates the ir_module_module
and ir_model_data entries.
"""
f = openerp.modules.get_module_resource('base', 'base.sql')
if not f:
m = "File not found: 'base.sql' (provided by module 'base')."
_logger.critical(m)
raise IOError(m)
base_sql_file = openerp.tools.misc.file_open(f)
try:
cr.execute(base_sql_file.read())
cr.commit()
finally:
base_sql_file.close()
for i in openerp.modules.get_modules():
mod_path = openerp.modules.get_module_path(i)
if not mod_path:
continue
# This will raise an exception if no/unreadable descriptor file.
info = openerp.modules.load_information_from_description_file(i)
if not info:
continue
categories = info['category'].split('/')
category_id = create_categories(cr, categories)
if info['installable']:
state = 'uninstalled'
else:
state = 'uninstallable'
cr.execute('INSERT INTO ir_module_module \
(author, website, name, shortdesc, description, \
category_id, auto_install, state, web, license, application, icon, sequence, summary) \
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING id', (
info['author'],
info['website'], i, info['name'],
info['description'], category_id,
info['auto_install'], state,
info['web'],
info['license'],
info['application'], info['icon'],
info['sequence'], info['summary']))
id = cr.fetchone()[0]
cr.execute('INSERT INTO ir_model_data \
(name,model,module, res_id, noupdate) VALUES (%s,%s,%s,%s,%s)', (
'module_'+i, 'ir.module.module', 'base', id, True))
dependencies = info['depends']
for d in dependencies:
cr.execute('INSERT INTO ir_module_module_dependency \
(module_id,name) VALUES (%s, %s)', (id, d))
# Install recursively all auto-installing modules
while True:
cr.execute("""SELECT m.name FROM ir_module_module m WHERE m.auto_install AND state != 'to install'
AND NOT EXISTS (
SELECT 1 FROM ir_module_module_dependency d JOIN ir_module_module mdep ON (d.name = mdep.name)
WHERE d.module_id = m.id AND mdep.state != 'to install'
)""")
to_auto_install = [x[0] for x in cr.fetchall()]
if not to_auto_install: break
cr.execute("""UPDATE ir_module_module SET state='to install' WHERE name in %s""", (tuple(to_auto_install),))
cr.commit()
def create_categories(cr, categories):
""" Create the ir_module_category entries for some categories.
categories is a list of strings forming a single category with its
parent categories, like ['Grand Parent', 'Parent', 'Child'].
Return the database id of the (last) category.
"""
p_id = None
category = []
while categories:
category.append(categories[0])
xml_id = 'module_category_' + ('_'.join(map(lambda x: x.lower(), category))).replace('&', 'and').replace(' ', '_')
# search via xml_id (because some categories are renamed)
cr.execute("SELECT res_id FROM ir_model_data WHERE name=%s AND module=%s AND model=%s",
(xml_id, "base", "ir.module.category"))
c_id = cr.fetchone()
if not c_id:
cr.execute('INSERT INTO ir_module_category \
(name, parent_id) \
VALUES (%s, %s) RETURNING id', (categories[0], p_id))
c_id = cr.fetchone()[0]
cr.execute('INSERT INTO ir_model_data (module, name, res_id, model) \
VALUES (%s, %s, %s, %s)', ('base', xml_id, c_id, 'ir.module.category'))
else:
c_id = c_id[0]
p_id = c_id
categories = categories[1:]
return p_id
def has_unaccent(cr):
""" Test if the database has an unaccent function.
The unaccent is supposed to be provided by the PostgreSQL unaccent contrib
module but any similar function will be picked by OpenERP.
"""
cr.execute("SELECT proname FROM pg_proc WHERE proname='unaccent'")
return len(cr.fetchall()) > 0
|
UCC-Organism/crosswalk-odroid-ucc
|
refs/heads/master
|
scripts/gyp/finalize_apk.py
|
1
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=F0401
"""Signs and zipaligns APK.
"""
import optparse
import os
import shutil
import sys
from util import build_utils
def SignApk(keystore_path, unsigned_path, signed_path, alias, code):
intermediate_path = unsigned_path + '.copy'
shutil.copy(unsigned_path, intermediate_path)
sign_cmd = [
'jarsigner',
'-sigalg', 'MD5withRSA',
'-digestalg', 'SHA1',
'-keystore', keystore_path,
'-storepass', code,
intermediate_path, alias
]
build_utils.CheckCallDie(sign_cmd)
shutil.move(intermediate_path, signed_path)
def AlignApk(android_sdk_root, unaligned_path, final_path):
align_cmd = [
os.path.join(android_sdk_root, 'build-tools', '20.0.0', 'zipalign'),
'-f', '4', # 4 bytes
unaligned_path,
final_path
]
build_utils.CheckCallDie(align_cmd)
def main():
parser = optparse.OptionParser()
parser.add_option('--android-sdk-root', help='Android sdk root directory.')
parser.add_option('--unsigned-apk-path', help='Path to input unsigned APK.')
parser.add_option('--final-apk-path',
help='Path to output signed and aligned APK.')
parser.add_option('--keystore-path', help='Path to keystore for signing.')
parser.add_option('--keystore-alias', help='Alias name of keystore.')
parser.add_option('--keystore-passcode', help='Passcode of keystore.')
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args()
signed_apk_path = options.unsigned_apk_path + '.signed.apk'
SignApk(options.keystore_path, options.unsigned_apk_path,
signed_apk_path, options.keystore_alias, options.keystore_passcode)
AlignApk(options.android_sdk_root, signed_apk_path, options.final_apk_path)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
|
rooshilp/CMPUT410Lab4
|
refs/heads/master
|
env-lab4/lib/python2.7/site-packages/werkzeug/contrib/sessions.py
|
295
|
# -*- coding: utf-8 -*-
r"""
werkzeug.contrib.sessions
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains some helper classes that help one to add session
support to a python WSGI application. For full client-side session
storage see :mod:`~werkzeug.contrib.securecookie` which implements a
secure, client-side session storage.
Application Integration
=======================
::
from werkzeug.contrib.sessions import SessionMiddleware, \
FilesystemSessionStore
app = SessionMiddleware(app, FilesystemSessionStore())
The current session will then appear in the WSGI environment as
`werkzeug.session`. However it's recommended to not use the middleware
but the stores directly in the application. However for very simple
scripts a middleware for sessions could be sufficient.
This module does not implement methods or ways to check if a session is
expired. That should be done by a cronjob and storage specific. For
example to prune unused filesystem sessions one could check the modified
time of the files. It sessions are stored in the database the new()
method should add an expiration timestamp for the session.
For better flexibility it's recommended to not use the middleware but the
store and session object directly in the application dispatching::
session_store = FilesystemSessionStore()
def application(environ, start_response):
request = Request(environ)
sid = request.cookies.get('cookie_name')
if sid is None:
request.session = session_store.new()
else:
request.session = session_store.get(sid)
response = get_the_response_object(request)
if request.session.should_save:
session_store.save(request.session)
response.set_cookie('cookie_name', request.session.sid)
return response(environ, start_response)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import tempfile
from os import path
from time import time
from random import random
from hashlib import sha1
from pickle import dump, load, HIGHEST_PROTOCOL
from werkzeug.datastructures import CallbackDict
from werkzeug.utils import dump_cookie, parse_cookie
from werkzeug.wsgi import ClosingIterator
from werkzeug.posixemulation import rename
from werkzeug._compat import PY2, text_type
_sha1_re = re.compile(r'^[a-f0-9]{40}$')
def _urandom():
if hasattr(os, 'urandom'):
return os.urandom(30)
return text_type(random()).encode('ascii')
def generate_key(salt=None):
if salt is None:
salt = repr(salt).encode('ascii')
return sha1(b''.join([
salt,
str(time()).encode('ascii'),
_urandom()
])).hexdigest()
class ModificationTrackingDict(CallbackDict):
__slots__ = ('modified',)
def __init__(self, *args, **kwargs):
def on_update(self):
self.modified = True
self.modified = False
CallbackDict.__init__(self, on_update=on_update)
dict.update(self, *args, **kwargs)
def copy(self):
"""Create a flat copy of the dict."""
missing = object()
result = object.__new__(self.__class__)
for name in self.__slots__:
val = getattr(self, name, missing)
if val is not missing:
setattr(result, name, val)
return result
def __copy__(self):
return self.copy()
class Session(ModificationTrackingDict):
"""Subclass of a dict that keeps track of direct object changes. Changes
in mutable structures are not tracked, for those you have to set
`modified` to `True` by hand.
"""
__slots__ = ModificationTrackingDict.__slots__ + ('sid', 'new')
def __init__(self, data, sid, new=False):
ModificationTrackingDict.__init__(self, data)
self.sid = sid
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved.
.. versionchanged:: 0.6
By default the session is now only saved if the session is
modified, not if it is new like it was before.
"""
return self.modified
class SessionStore(object):
"""Baseclass for all session stores. The Werkzeug contrib module does not
implement any useful stores besides the filesystem store, application
developers are encouraged to create their own stores.
:param session_class: The session class to use. Defaults to
:class:`Session`.
"""
def __init__(self, session_class=None):
if session_class is None:
session_class = Session
self.session_class = session_class
def is_valid_key(self, key):
"""Check if a key has the correct format."""
return _sha1_re.match(key) is not None
def generate_key(self, salt=None):
"""Simple function that generates a new session key."""
return generate_key(salt)
def new(self):
"""Generate a new session."""
return self.session_class({}, self.generate_key(), True)
def save(self, session):
"""Save a session."""
def save_if_modified(self, session):
"""Save if a session class wants an update."""
if session.should_save:
self.save(session)
def delete(self, session):
"""Delete a session."""
def get(self, sid):
"""Get a session for this sid or a new session object. This method
has to check if the session key is valid and create a new session if
that wasn't the case.
"""
return self.session_class({}, sid, True)
#: used for temporary files by the filesystem session store
_fs_transaction_suffix = '.__wz_sess'
class FilesystemSessionStore(SessionStore):
"""Simple example session store that saves sessions on the filesystem.
This store works best on POSIX systems and Windows Vista / Windows
Server 2008 and newer.
.. versionchanged:: 0.6
`renew_missing` was added. Previously this was considered `True`,
now the default changed to `False` and it can be explicitly
deactivated.
:param path: the path to the folder used for storing the sessions.
If not provided the default temporary directory is used.
:param filename_template: a string template used to give the session
a filename. ``%s`` is replaced with the
session id.
:param session_class: The session class to use. Defaults to
:class:`Session`.
:param renew_missing: set to `True` if you want the store to
give the user a new sid if the session was
not yet saved.
"""
def __init__(self, path=None, filename_template='werkzeug_%s.sess',
session_class=None, renew_missing=False, mode=0o644):
SessionStore.__init__(self, session_class)
if path is None:
path = tempfile.gettempdir()
self.path = path
if isinstance(filename_template, text_type) and PY2:
filename_template = filename_template.encode(
sys.getfilesystemencoding() or 'utf-8')
assert not filename_template.endswith(_fs_transaction_suffix), \
'filename templates may not end with %s' % _fs_transaction_suffix
self.filename_template = filename_template
self.renew_missing = renew_missing
self.mode = mode
def get_session_filename(self, sid):
# out of the box, this should be a strict ASCII subset but
# you might reconfigure the session object to have a more
# arbitrary string.
if isinstance(sid, text_type) and PY2:
sid = sid.encode(sys.getfilesystemencoding() or 'utf-8')
return path.join(self.path, self.filename_template % sid)
def save(self, session):
fn = self.get_session_filename(session.sid)
fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix,
dir=self.path)
f = os.fdopen(fd, 'wb')
try:
dump(dict(session), f, HIGHEST_PROTOCOL)
finally:
f.close()
try:
rename(tmp, fn)
os.chmod(fn, self.mode)
except (IOError, OSError):
pass
def delete(self, session):
fn = self.get_session_filename(session.sid)
try:
os.unlink(fn)
except OSError:
pass
def get(self, sid):
if not self.is_valid_key(sid):
return self.new()
try:
f = open(self.get_session_filename(sid), 'rb')
except IOError:
if self.renew_missing:
return self.new()
data = {}
else:
try:
try:
data = load(f)
except Exception:
data = {}
finally:
f.close()
return self.session_class(data, sid, False)
def list(self):
"""Lists all sessions in the store.
.. versionadded:: 0.6
"""
before, after = self.filename_template.split('%s', 1)
filename_re = re.compile(r'%s(.{5,})%s$' % (re.escape(before),
re.escape(after)))
result = []
for filename in os.listdir(self.path):
#: this is a session that is still being saved.
if filename.endswith(_fs_transaction_suffix):
continue
match = filename_re.match(filename)
if match is not None:
result.append(match.group(1))
return result
class SessionMiddleware(object):
"""A simple middleware that puts the session object of a store provided
into the WSGI environ. It automatically sets cookies and restores
sessions.
However a middleware is not the preferred solution because it won't be as
fast as sessions managed by the application itself and will put a key into
the WSGI environment only relevant for the application which is against
the concept of WSGI.
The cookie parameters are the same as for the :func:`~dump_cookie`
function just prefixed with ``cookie_``. Additionally `max_age` is
called `cookie_age` and not `cookie_max_age` because of backwards
compatibility.
"""
def __init__(self, app, store, cookie_name='session_id',
cookie_age=None, cookie_expires=None, cookie_path='/',
cookie_domain=None, cookie_secure=None,
cookie_httponly=False, environ_key='werkzeug.session'):
self.app = app
self.store = store
self.cookie_name = cookie_name
self.cookie_age = cookie_age
self.cookie_expires = cookie_expires
self.cookie_path = cookie_path
self.cookie_domain = cookie_domain
self.cookie_secure = cookie_secure
self.cookie_httponly = cookie_httponly
self.environ_key = environ_key
def __call__(self, environ, start_response):
cookie = parse_cookie(environ.get('HTTP_COOKIE', ''))
sid = cookie.get(self.cookie_name, None)
if sid is None:
session = self.store.new()
else:
session = self.store.get(sid)
environ[self.environ_key] = session
def injecting_start_response(status, headers, exc_info=None):
if session.should_save:
self.store.save(session)
headers.append(('Set-Cookie', dump_cookie(self.cookie_name,
session.sid, self.cookie_age,
self.cookie_expires, self.cookie_path,
self.cookie_domain, self.cookie_secure,
self.cookie_httponly)))
return start_response(status, headers, exc_info)
return ClosingIterator(self.app(environ, injecting_start_response),
lambda: self.store.save_if_modified(session))
|
FRidh/Sea
|
refs/heads/master
|
Sea/actions/__init__.py
|
2
|
"""
A module with actions related to :mod:`Sea.adapter` instances.
"""
import factory
import document
import system
import component
import connection
|
8l/beri
|
refs/heads/master
|
cheritest/trunk/tests/alu/test_mul_div_loop.py
|
1
|
#-
# Copyright (c) 2011 Jonathan Woodruff
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
class test_mul_div_loop(BaseBERITestCase):
def test_a0(self):
'''Test that result of multiply and divide loop test is correct.'''
self.assertRegisterEqual(self.MIPS.s7, 0x0000000000058980, "Final Multiply Result is Incorrect")
self.assertRegisterEqual(self.MIPS.t8, 0x0000000000009D80, "Final Divide Result is Incorrect")
# mul result: 0000000000058980
# div result: 0000000000009D80
|
0k/odoo
|
refs/heads/master
|
addons/base_gengo/wizard/__init__.py
|
434
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_gengo_translations
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
srinia6/RackHD
|
refs/heads/master
|
test/tests/rackhd11/test_rackhd11_api_workflows.py
|
12
|
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import fit_path # NOQA: unused import
import os
import sys
import subprocess
import fit_common
# Local methods
MON_NODES = fit_common.node_select()
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(api_1_1=True)
class rackhd11_api_workflows(fit_common.unittest.TestCase):
def test_api_11_workflows(self):
api_data = fit_common.rackhdapi("/api/1.1/workflows")
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for item in api_data['json']:
# check required fields
for subitem in ['id', 'name', 'updatedAt', 'createdAt']:
if fit_common.VERBOSITY >= 2:
print "Checking:", item['name'], subitem
self.assertGreater(len(item[subitem]), 0, subitem + ' field error')
def test_api_11_workflows_tasks(self):
data_payload = {
"friendlyName": "Shell commands hwtest",
"injectableName": "Task.Linux.Commands.Hwtest",
"implementsTask": "Task.Base.Linux.Commands",
"options": {
"commands": [
{"command": "sudo /opt/test/hwtest",
"format": "json", "source": "hwtest"}
]
},
"properties": {"type": "hwtestRun"}
}
api_data = fit_common.rackhdapi("/api/1.1/workflows/tasks", action="put",
payload=data_payload)
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
def test_api_11_workflows_tasks_library(self):
api_data = fit_common.rackhdapi("/api/1.1/workflows/tasks/library")
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for item in api_data['json']:
if fit_common.VERBOSITY >= 2:
print "Checking:", item["friendlyName"]
self.assertGreater(len(item['injectableName']), 0, 'injectableName field error')
self.assertGreater(len(item['friendlyName']), 0, 'friendlyName field error')
def test_api_11_workflows_tasks_library_ID(self):
api_data = fit_common.rackhdapi("/api/1.1/workflows/library/*")
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
for item in api_data['json']:
if fit_common.VERBOSITY >= 2:
print "Checking:", item["friendlyName"]
self.assertGreater(len(item['injectableName']), 0, 'injectableName field error')
self.assertGreater(len(item['friendlyName']), 0, 'friendlyName field error')
self.assertGreater(len(item['tasks']), 0, 'tasks field error')
def test_api_11_workflows_put_ipmi(self):
data_payload = \
{
"friendlyName": "TestIPMI",
"injectableName": 'Graph.Obm.Ipmi.CreateSettings.Test',
"options": {
"obm-ipmi-task":{
"user": "rackhd",
"password": "rackhd"
}
},
"tasks": [
{
"label": "obm-ipmi-task",
"taskName": "Task.Obm.Ipmi.CreateSettings"
}
]
}
api_data = fit_common.rackhdapi("/api/1.1/workflows", action="put", payload=data_payload)
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
api_data = fit_common.rackhdapi("/api/1.1/workflows/library/" + 'Graph.Obm.Ipmi.CreateSettings.Test')
self.assertEqual(api_data['status'], 200, 'Incorrect HTTP return code, expected 200, got:' + str(api_data['status']))
if __name__ == '__main__':
fit_common.unittest.main()
|
allotria/intellij-community
|
refs/heads/master
|
python/testData/postfix/while/function.py
|
39
|
def f(a):
a.while<caret>
|
tfroehlich82/erpnext
|
refs/heads/develop
|
erpnext/demo/domains.py
|
7
|
from __future__ import unicode_literals
data = {
'Manufacturing': {
'company_name': 'Wind Power LLC'
},
'Retail': {
'company_name': 'Annapurna Dairy Shop',
},
'Distribution': {
'company_name': 'Soltice Hardware',
},
'Services': {
'company_name': 'Acme Consulting'
},
'Education': {
'company_name': 'Whitmore College'
},
'Healthcare': {
'company_name': 'ABC Hospital Ltd.'
},
'Agriculture': {
'company_name': 'Schrute Farms'
},
'Non Profit': {
'company_name': 'Erpnext Foundation'
}
}
|
kimjaejoong/nova
|
refs/heads/master
|
nova/db/sqlalchemy/migrate_repo/versions/294_add_service_heartbeat.py
|
72
|
# Copyright (c) 2015 Wind River Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Column, DateTime
BASE_TABLE_NAME = 'services'
NEW_COLUMN_NAME = 'last_seen_up'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
new_column = Column(NEW_COLUMN_NAME, DateTime, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)
|
DevangS/CoralNet
|
refs/heads/master
|
images/migrations/0023_change_percentage_annotation_area_format.py
|
1
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
print (
"-----\n"
"This migration changes the format of percentage annotation areas,\n"
"so that they're semicolon-separated instead of comma-separated.\n"
"The comma-separated format is only for pixel annotation areas now.\n"
"...\n"
)
num_annotation_areas_changed = 0
# We'll assume that only sources (and not images) have comma-separated
# percentage annotation areas. Semicolon-separated percentages were
# introduced at the same time that percentages for image-level annotation
# areas were introduced, so this should be a safe assumption.
all_sources = orm.Source.objects.all()
for source in all_sources:
current_annoarea = source.image_annotation_area
if current_annoarea.find(',') != -1:
source.image_annotation_area = current_annoarea.replace(',',';')
source.save()
num_annotation_areas_changed += 1
print (
"{0} annotation areas have been reformatted.\n"
"-----".format(num_annotation_areas_changed)
)
def backwards(self, orm):
print (
"-----\n"
"This migration rollback doesn't do anything. There doesn't seem to\n"
"be a pressing reason to support the old annotation area format.\n"
"-----"
)
models = {
'annotations.label': {
'Meta': {'object_name': 'Label'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['annotations.LabelGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'})
},
'annotations.labelgroup': {
'Meta': {'object_name': 'LabelGroup'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'annotations.labelset': {
'Meta': {'object_name': 'LabelSet'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'edit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['annotations.Label']", 'symmetrical': 'False'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'images.image': {
'Meta': {'object_name': 'Image'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_robot_annotator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Robot']", 'null': 'True'}),
'metadata': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Metadata']"}),
'original_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'original_height': ('django.db.models.fields.IntegerField', [], {}),
'original_width': ('django.db.models.fields.IntegerField', [], {}),
'point_generation_method': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'process_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.ImageStatus']"}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'images.imagestatus': {
'Meta': {'object_name': 'ImageStatus'},
'annotatedByHuman': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'annotatedByRobot': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featureFileHasHumanLabels': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featuresExtracted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hasRandomPoints': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'preprocessed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'usedInCurrentModel': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'images.metadata': {
'Meta': {'object_name': 'Metadata'},
'annotation_area': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'balance': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'camera': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'blank': 'True'}),
'depth': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'framing': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'group1_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group2_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group3_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group4_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group5_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group6_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'group7_percent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'height_in_cm': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'photo_date': ('django.db.models.fields.DateField', [], {}),
'photographer': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'}),
'strobes': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'value1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value1']", 'null': 'True'}),
'value2': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value2']", 'null': 'True'}),
'value3': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value3']", 'null': 'True'}),
'value4': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value4']", 'null': 'True'}),
'value5': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Value5']", 'null': 'True'}),
'water_quality': ('django.db.models.fields.CharField', [], {'max_length': '45', 'blank': 'True'})
},
'images.point': {
'Meta': {'object_name': 'Point'},
'annotation_status': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'column': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Image']"}),
'point_number': ('django.db.models.fields.IntegerField', [], {}),
'row': ('django.db.models.fields.IntegerField', [], {})
},
'images.robot': {
'Meta': {'object_name': 'Robot'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path_to_model': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"}),
'time_to_train': ('django.db.models.fields.BigIntegerField', [], {}),
'version': ('django.db.models.fields.IntegerField', [], {'unique': 'True'})
},
'images.source': {
'Meta': {'object_name': 'Source'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_point_generation_method': ('django.db.models.fields.CharField', [], {'default': "'m_200'", 'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_annotation_area': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'image_height_in_cm': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'key1': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key2': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key3': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key4': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key5': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'labelset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['annotations.LabelSet']"}),
'latitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'v'", 'max_length': '1'})
},
'images.sourceinvite': {
'Meta': {'unique_together': "(['recipient', 'source'],)", 'object_name': 'SourceInvite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invites_received'", 'to': "orm['auth.User']"}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invites_sent'", 'to': "orm['auth.User']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"}),
'source_perm': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'images.value1': {
'Meta': {'object_name': 'Value1'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value2': {
'Meta': {'object_name': 'Value2'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value3': {
'Meta': {'object_name': 'Value3'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value4': {
'Meta': {'object_name': 'Value4'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
},
'images.value5': {
'Meta': {'object_name': 'Value5'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Source']"})
}
}
complete_apps = ['images']
|
staticlibs/android-ndk-r9d-arm-linux-androideabi-4.8
|
refs/heads/master
|
lib/python2.7/sqlite3/test/dbapi.py
|
69
|
#-*- coding: ISO-8859-1 -*-
# pysqlite2/test/dbapi.py: tests for DB-API compliance
#
# Copyright (C) 2004-2010 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import unittest
import sys
import sqlite3 as sqlite
try:
import threading
except ImportError:
threading = None
class ModuleTests(unittest.TestCase):
def CheckAPILevel(self):
self.assertEqual(sqlite.apilevel, "2.0",
"apilevel is %s, should be 2.0" % sqlite.apilevel)
def CheckThreadSafety(self):
self.assertEqual(sqlite.threadsafety, 1,
"threadsafety is %d, should be 1" % sqlite.threadsafety)
def CheckParamStyle(self):
self.assertEqual(sqlite.paramstyle, "qmark",
"paramstyle is '%s', should be 'qmark'" %
sqlite.paramstyle)
def CheckWarning(self):
self.assertTrue(issubclass(sqlite.Warning, StandardError),
"Warning is not a subclass of StandardError")
def CheckError(self):
self.assertTrue(issubclass(sqlite.Error, StandardError),
"Error is not a subclass of StandardError")
def CheckInterfaceError(self):
self.assertTrue(issubclass(sqlite.InterfaceError, sqlite.Error),
"InterfaceError is not a subclass of Error")
def CheckDatabaseError(self):
self.assertTrue(issubclass(sqlite.DatabaseError, sqlite.Error),
"DatabaseError is not a subclass of Error")
def CheckDataError(self):
self.assertTrue(issubclass(sqlite.DataError, sqlite.DatabaseError),
"DataError is not a subclass of DatabaseError")
def CheckOperationalError(self):
self.assertTrue(issubclass(sqlite.OperationalError, sqlite.DatabaseError),
"OperationalError is not a subclass of DatabaseError")
def CheckIntegrityError(self):
self.assertTrue(issubclass(sqlite.IntegrityError, sqlite.DatabaseError),
"IntegrityError is not a subclass of DatabaseError")
def CheckInternalError(self):
self.assertTrue(issubclass(sqlite.InternalError, sqlite.DatabaseError),
"InternalError is not a subclass of DatabaseError")
def CheckProgrammingError(self):
self.assertTrue(issubclass(sqlite.ProgrammingError, sqlite.DatabaseError),
"ProgrammingError is not a subclass of DatabaseError")
def CheckNotSupportedError(self):
self.assertTrue(issubclass(sqlite.NotSupportedError,
sqlite.DatabaseError),
"NotSupportedError is not a subclass of DatabaseError")
class ConnectionTests(unittest.TestCase):
def setUp(self):
self.cx = sqlite.connect(":memory:")
cu = self.cx.cursor()
cu.execute("create table test(id integer primary key, name text)")
cu.execute("insert into test(name) values (?)", ("foo",))
def tearDown(self):
self.cx.close()
def CheckCommit(self):
self.cx.commit()
def CheckCommitAfterNoChanges(self):
"""
A commit should also work when no changes were made to the database.
"""
self.cx.commit()
self.cx.commit()
def CheckRollback(self):
self.cx.rollback()
def CheckRollbackAfterNoChanges(self):
"""
A rollback should also work when no changes were made to the database.
"""
self.cx.rollback()
self.cx.rollback()
def CheckCursor(self):
cu = self.cx.cursor()
def CheckFailedOpen(self):
YOU_CANNOT_OPEN_THIS = "/foo/bar/bla/23534/mydb.db"
try:
con = sqlite.connect(YOU_CANNOT_OPEN_THIS)
except sqlite.OperationalError:
return
self.fail("should have raised an OperationalError")
def CheckClose(self):
self.cx.close()
def CheckExceptions(self):
# Optional DB-API extension.
self.assertEqual(self.cx.Warning, sqlite.Warning)
self.assertEqual(self.cx.Error, sqlite.Error)
self.assertEqual(self.cx.InterfaceError, sqlite.InterfaceError)
self.assertEqual(self.cx.DatabaseError, sqlite.DatabaseError)
self.assertEqual(self.cx.DataError, sqlite.DataError)
self.assertEqual(self.cx.OperationalError, sqlite.OperationalError)
self.assertEqual(self.cx.IntegrityError, sqlite.IntegrityError)
self.assertEqual(self.cx.InternalError, sqlite.InternalError)
self.assertEqual(self.cx.ProgrammingError, sqlite.ProgrammingError)
self.assertEqual(self.cx.NotSupportedError, sqlite.NotSupportedError)
class CursorTests(unittest.TestCase):
def setUp(self):
self.cx = sqlite.connect(":memory:")
self.cu = self.cx.cursor()
self.cu.execute("create table test(id integer primary key, name text, income number)")
self.cu.execute("insert into test(name) values (?)", ("foo",))
def tearDown(self):
self.cu.close()
self.cx.close()
def CheckExecuteNoArgs(self):
self.cu.execute("delete from test")
def CheckExecuteIllegalSql(self):
try:
self.cu.execute("select asdf")
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
return
except:
self.fail("raised wrong exception")
def CheckExecuteTooMuchSql(self):
try:
self.cu.execute("select 5+4; select 4+5")
self.fail("should have raised a Warning")
except sqlite.Warning:
return
except:
self.fail("raised wrong exception")
def CheckExecuteTooMuchSql2(self):
self.cu.execute("select 5+4; -- foo bar")
def CheckExecuteTooMuchSql3(self):
self.cu.execute("""
select 5+4;
/*
foo
*/
""")
def CheckExecuteWrongSqlArg(self):
try:
self.cu.execute(42)
self.fail("should have raised a ValueError")
except ValueError:
return
except:
self.fail("raised wrong exception.")
def CheckExecuteArgInt(self):
self.cu.execute("insert into test(id) values (?)", (42,))
def CheckExecuteArgFloat(self):
self.cu.execute("insert into test(income) values (?)", (2500.32,))
def CheckExecuteArgString(self):
self.cu.execute("insert into test(name) values (?)", ("Hugo",))
def CheckExecuteArgStringWithZeroByte(self):
self.cu.execute("insert into test(name) values (?)", ("Hu\x00go",))
self.cu.execute("select name from test where id=?", (self.cu.lastrowid,))
row = self.cu.fetchone()
self.assertEqual(row[0], "Hu\x00go")
def CheckExecuteWrongNoOfArgs1(self):
# too many parameters
try:
self.cu.execute("insert into test(id) values (?)", (17, "Egon"))
self.fail("should have raised ProgrammingError")
except sqlite.ProgrammingError:
pass
def CheckExecuteWrongNoOfArgs2(self):
# too little parameters
try:
self.cu.execute("insert into test(id) values (?)")
self.fail("should have raised ProgrammingError")
except sqlite.ProgrammingError:
pass
def CheckExecuteWrongNoOfArgs3(self):
# no parameters, parameters are needed
try:
self.cu.execute("insert into test(id) values (?)")
self.fail("should have raised ProgrammingError")
except sqlite.ProgrammingError:
pass
def CheckExecuteParamList(self):
self.cu.execute("insert into test(name) values ('foo')")
self.cu.execute("select name from test where name=?", ["foo"])
row = self.cu.fetchone()
self.assertEqual(row[0], "foo")
def CheckExecuteParamSequence(self):
class L(object):
def __len__(self):
return 1
def __getitem__(self, x):
assert x == 0
return "foo"
self.cu.execute("insert into test(name) values ('foo')")
self.cu.execute("select name from test where name=?", L())
row = self.cu.fetchone()
self.assertEqual(row[0], "foo")
def CheckExecuteDictMapping(self):
self.cu.execute("insert into test(name) values ('foo')")
self.cu.execute("select name from test where name=:name", {"name": "foo"})
row = self.cu.fetchone()
self.assertEqual(row[0], "foo")
def CheckExecuteDictMapping_Mapping(self):
# Test only works with Python 2.5 or later
if sys.version_info < (2, 5, 0):
return
class D(dict):
def __missing__(self, key):
return "foo"
self.cu.execute("insert into test(name) values ('foo')")
self.cu.execute("select name from test where name=:name", D())
row = self.cu.fetchone()
self.assertEqual(row[0], "foo")
def CheckExecuteDictMappingTooLittleArgs(self):
self.cu.execute("insert into test(name) values ('foo')")
try:
self.cu.execute("select name from test where name=:name and id=:id", {"name": "foo"})
self.fail("should have raised ProgrammingError")
except sqlite.ProgrammingError:
pass
def CheckExecuteDictMappingNoArgs(self):
self.cu.execute("insert into test(name) values ('foo')")
try:
self.cu.execute("select name from test where name=:name")
self.fail("should have raised ProgrammingError")
except sqlite.ProgrammingError:
pass
def CheckExecuteDictMappingUnnamed(self):
self.cu.execute("insert into test(name) values ('foo')")
try:
self.cu.execute("select name from test where name=?", {"name": "foo"})
self.fail("should have raised ProgrammingError")
except sqlite.ProgrammingError:
pass
def CheckClose(self):
self.cu.close()
def CheckRowcountExecute(self):
self.cu.execute("delete from test")
self.cu.execute("insert into test(name) values ('foo')")
self.cu.execute("insert into test(name) values ('foo')")
self.cu.execute("update test set name='bar'")
self.assertEqual(self.cu.rowcount, 2)
def CheckRowcountSelect(self):
"""
pysqlite does not know the rowcount of SELECT statements, because we
don't fetch all rows after executing the select statement. The rowcount
has thus to be -1.
"""
self.cu.execute("select 5 union select 6")
self.assertEqual(self.cu.rowcount, -1)
def CheckRowcountExecutemany(self):
self.cu.execute("delete from test")
self.cu.executemany("insert into test(name) values (?)", [(1,), (2,), (3,)])
self.assertEqual(self.cu.rowcount, 3)
def CheckTotalChanges(self):
self.cu.execute("insert into test(name) values ('foo')")
self.cu.execute("insert into test(name) values ('foo')")
if self.cx.total_changes < 2:
self.fail("total changes reported wrong value")
# Checks for executemany:
# Sequences are required by the DB-API, iterators
# enhancements in pysqlite.
def CheckExecuteManySequence(self):
self.cu.executemany("insert into test(income) values (?)", [(x,) for x in range(100, 110)])
def CheckExecuteManyIterator(self):
class MyIter:
def __init__(self):
self.value = 5
def next(self):
if self.value == 10:
raise StopIteration
else:
self.value += 1
return (self.value,)
self.cu.executemany("insert into test(income) values (?)", MyIter())
def CheckExecuteManyGenerator(self):
def mygen():
for i in range(5):
yield (i,)
self.cu.executemany("insert into test(income) values (?)", mygen())
def CheckExecuteManyWrongSqlArg(self):
try:
self.cu.executemany(42, [(3,)])
self.fail("should have raised a ValueError")
except ValueError:
return
except:
self.fail("raised wrong exception.")
def CheckExecuteManySelect(self):
try:
self.cu.executemany("select ?", [(3,)])
self.fail("should have raised a ProgrammingError")
except sqlite.ProgrammingError:
return
except:
self.fail("raised wrong exception.")
def CheckExecuteManyNotIterable(self):
try:
self.cu.executemany("insert into test(income) values (?)", 42)
self.fail("should have raised a TypeError")
except TypeError:
return
except Exception, e:
print "raised", e.__class__
self.fail("raised wrong exception.")
def CheckFetchIter(self):
# Optional DB-API extension.
self.cu.execute("delete from test")
self.cu.execute("insert into test(id) values (?)", (5,))
self.cu.execute("insert into test(id) values (?)", (6,))
self.cu.execute("select id from test order by id")
lst = []
for row in self.cu:
lst.append(row[0])
self.assertEqual(lst[0], 5)
self.assertEqual(lst[1], 6)
def CheckFetchone(self):
self.cu.execute("select name from test")
row = self.cu.fetchone()
self.assertEqual(row[0], "foo")
row = self.cu.fetchone()
self.assertEqual(row, None)
def CheckFetchoneNoStatement(self):
cur = self.cx.cursor()
row = cur.fetchone()
self.assertEqual(row, None)
def CheckArraySize(self):
# must default ot 1
self.assertEqual(self.cu.arraysize, 1)
# now set to 2
self.cu.arraysize = 2
# now make the query return 3 rows
self.cu.execute("delete from test")
self.cu.execute("insert into test(name) values ('A')")
self.cu.execute("insert into test(name) values ('B')")
self.cu.execute("insert into test(name) values ('C')")
self.cu.execute("select name from test")
res = self.cu.fetchmany()
self.assertEqual(len(res), 2)
def CheckFetchmany(self):
self.cu.execute("select name from test")
res = self.cu.fetchmany(100)
self.assertEqual(len(res), 1)
res = self.cu.fetchmany(100)
self.assertEqual(res, [])
def CheckFetchmanyKwArg(self):
"""Checks if fetchmany works with keyword arguments"""
self.cu.execute("select name from test")
res = self.cu.fetchmany(size=100)
self.assertEqual(len(res), 1)
def CheckFetchall(self):
self.cu.execute("select name from test")
res = self.cu.fetchall()
self.assertEqual(len(res), 1)
res = self.cu.fetchall()
self.assertEqual(res, [])
def CheckSetinputsizes(self):
self.cu.setinputsizes([3, 4, 5])
def CheckSetoutputsize(self):
self.cu.setoutputsize(5, 0)
def CheckSetoutputsizeNoColumn(self):
self.cu.setoutputsize(42)
def CheckCursorConnection(self):
# Optional DB-API extension.
self.assertEqual(self.cu.connection, self.cx)
def CheckWrongCursorCallable(self):
try:
def f(): pass
cur = self.cx.cursor(f)
self.fail("should have raised a TypeError")
except TypeError:
return
self.fail("should have raised a ValueError")
def CheckCursorWrongClass(self):
class Foo: pass
foo = Foo()
try:
cur = sqlite.Cursor(foo)
self.fail("should have raised a ValueError")
except TypeError:
pass
@unittest.skipUnless(threading, 'This test requires threading.')
class ThreadTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.cur = self.con.cursor()
self.cur.execute("create table test(id integer primary key, name text, bin binary, ratio number, ts timestamp)")
def tearDown(self):
self.cur.close()
self.con.close()
def CheckConCursor(self):
def run(con, errors):
try:
cur = con.cursor()
errors.append("did not raise ProgrammingError")
return
except sqlite.ProgrammingError:
return
except:
errors.append("raised wrong exception")
errors = []
t = threading.Thread(target=run, kwargs={"con": self.con, "errors": errors})
t.start()
t.join()
if len(errors) > 0:
self.fail("\n".join(errors))
def CheckConCommit(self):
def run(con, errors):
try:
con.commit()
errors.append("did not raise ProgrammingError")
return
except sqlite.ProgrammingError:
return
except:
errors.append("raised wrong exception")
errors = []
t = threading.Thread(target=run, kwargs={"con": self.con, "errors": errors})
t.start()
t.join()
if len(errors) > 0:
self.fail("\n".join(errors))
def CheckConRollback(self):
def run(con, errors):
try:
con.rollback()
errors.append("did not raise ProgrammingError")
return
except sqlite.ProgrammingError:
return
except:
errors.append("raised wrong exception")
errors = []
t = threading.Thread(target=run, kwargs={"con": self.con, "errors": errors})
t.start()
t.join()
if len(errors) > 0:
self.fail("\n".join(errors))
def CheckConClose(self):
def run(con, errors):
try:
con.close()
errors.append("did not raise ProgrammingError")
return
except sqlite.ProgrammingError:
return
except:
errors.append("raised wrong exception")
errors = []
t = threading.Thread(target=run, kwargs={"con": self.con, "errors": errors})
t.start()
t.join()
if len(errors) > 0:
self.fail("\n".join(errors))
def CheckCurImplicitBegin(self):
def run(cur, errors):
try:
cur.execute("insert into test(name) values ('a')")
errors.append("did not raise ProgrammingError")
return
except sqlite.ProgrammingError:
return
except:
errors.append("raised wrong exception")
errors = []
t = threading.Thread(target=run, kwargs={"cur": self.cur, "errors": errors})
t.start()
t.join()
if len(errors) > 0:
self.fail("\n".join(errors))
def CheckCurClose(self):
def run(cur, errors):
try:
cur.close()
errors.append("did not raise ProgrammingError")
return
except sqlite.ProgrammingError:
return
except:
errors.append("raised wrong exception")
errors = []
t = threading.Thread(target=run, kwargs={"cur": self.cur, "errors": errors})
t.start()
t.join()
if len(errors) > 0:
self.fail("\n".join(errors))
def CheckCurExecute(self):
def run(cur, errors):
try:
cur.execute("select name from test")
errors.append("did not raise ProgrammingError")
return
except sqlite.ProgrammingError:
return
except:
errors.append("raised wrong exception")
errors = []
self.cur.execute("insert into test(name) values ('a')")
t = threading.Thread(target=run, kwargs={"cur": self.cur, "errors": errors})
t.start()
t.join()
if len(errors) > 0:
self.fail("\n".join(errors))
def CheckCurIterNext(self):
def run(cur, errors):
try:
row = cur.fetchone()
errors.append("did not raise ProgrammingError")
return
except sqlite.ProgrammingError:
return
except:
errors.append("raised wrong exception")
errors = []
self.cur.execute("insert into test(name) values ('a')")
self.cur.execute("select name from test")
t = threading.Thread(target=run, kwargs={"cur": self.cur, "errors": errors})
t.start()
t.join()
if len(errors) > 0:
self.fail("\n".join(errors))
class ConstructorTests(unittest.TestCase):
def CheckDate(self):
d = sqlite.Date(2004, 10, 28)
def CheckTime(self):
t = sqlite.Time(12, 39, 35)
def CheckTimestamp(self):
ts = sqlite.Timestamp(2004, 10, 28, 12, 39, 35)
def CheckDateFromTicks(self):
d = sqlite.DateFromTicks(42)
def CheckTimeFromTicks(self):
t = sqlite.TimeFromTicks(42)
def CheckTimestampFromTicks(self):
ts = sqlite.TimestampFromTicks(42)
def CheckBinary(self):
b = sqlite.Binary(chr(0) + "'")
class ExtensionTests(unittest.TestCase):
def CheckScriptStringSql(self):
con = sqlite.connect(":memory:")
cur = con.cursor()
cur.executescript("""
-- bla bla
/* a stupid comment */
create table a(i);
insert into a(i) values (5);
""")
cur.execute("select i from a")
res = cur.fetchone()[0]
self.assertEqual(res, 5)
def CheckScriptStringUnicode(self):
con = sqlite.connect(":memory:")
cur = con.cursor()
cur.executescript(u"""
create table a(i);
insert into a(i) values (5);
select i from a;
delete from a;
insert into a(i) values (6);
""")
cur.execute("select i from a")
res = cur.fetchone()[0]
self.assertEqual(res, 6)
def CheckScriptSyntaxError(self):
con = sqlite.connect(":memory:")
cur = con.cursor()
raised = False
try:
cur.executescript("create table test(x); asdf; create table test2(x)")
except sqlite.OperationalError:
raised = True
self.assertEqual(raised, True, "should have raised an exception")
def CheckScriptErrorNormal(self):
con = sqlite.connect(":memory:")
cur = con.cursor()
raised = False
try:
cur.executescript("create table test(sadfsadfdsa); select foo from hurz;")
except sqlite.OperationalError:
raised = True
self.assertEqual(raised, True, "should have raised an exception")
def CheckConnectionExecute(self):
con = sqlite.connect(":memory:")
result = con.execute("select 5").fetchone()[0]
self.assertEqual(result, 5, "Basic test of Connection.execute")
def CheckConnectionExecutemany(self):
con = sqlite.connect(":memory:")
con.execute("create table test(foo)")
con.executemany("insert into test(foo) values (?)", [(3,), (4,)])
result = con.execute("select foo from test order by foo").fetchall()
self.assertEqual(result[0][0], 3, "Basic test of Connection.executemany")
self.assertEqual(result[1][0], 4, "Basic test of Connection.executemany")
def CheckConnectionExecutescript(self):
con = sqlite.connect(":memory:")
con.executescript("create table test(foo); insert into test(foo) values (5);")
result = con.execute("select foo from test").fetchone()[0]
self.assertEqual(result, 5, "Basic test of Connection.executescript")
class ClosedConTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def CheckClosedConCursor(self):
con = sqlite.connect(":memory:")
con.close()
try:
cur = con.cursor()
self.fail("Should have raised a ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("Should have raised a ProgrammingError")
def CheckClosedConCommit(self):
con = sqlite.connect(":memory:")
con.close()
try:
con.commit()
self.fail("Should have raised a ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("Should have raised a ProgrammingError")
def CheckClosedConRollback(self):
con = sqlite.connect(":memory:")
con.close()
try:
con.rollback()
self.fail("Should have raised a ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("Should have raised a ProgrammingError")
def CheckClosedCurExecute(self):
con = sqlite.connect(":memory:")
cur = con.cursor()
con.close()
try:
cur.execute("select 4")
self.fail("Should have raised a ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("Should have raised a ProgrammingError")
def CheckClosedCreateFunction(self):
con = sqlite.connect(":memory:")
con.close()
def f(x): return 17
try:
con.create_function("foo", 1, f)
self.fail("Should have raised a ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("Should have raised a ProgrammingError")
def CheckClosedCreateAggregate(self):
con = sqlite.connect(":memory:")
con.close()
class Agg:
def __init__(self):
pass
def step(self, x):
pass
def finalize(self):
return 17
try:
con.create_aggregate("foo", 1, Agg)
self.fail("Should have raised a ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("Should have raised a ProgrammingError")
def CheckClosedSetAuthorizer(self):
con = sqlite.connect(":memory:")
con.close()
def authorizer(*args):
return sqlite.DENY
try:
con.set_authorizer(authorizer)
self.fail("Should have raised a ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("Should have raised a ProgrammingError")
def CheckClosedSetProgressCallback(self):
con = sqlite.connect(":memory:")
con.close()
def progress(): pass
try:
con.set_progress_handler(progress, 100)
self.fail("Should have raised a ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("Should have raised a ProgrammingError")
def CheckClosedCall(self):
con = sqlite.connect(":memory:")
con.close()
try:
con()
self.fail("Should have raised a ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("Should have raised a ProgrammingError")
class ClosedCurTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def CheckClosed(self):
con = sqlite.connect(":memory:")
cur = con.cursor()
cur.close()
for method_name in ("execute", "executemany", "executescript", "fetchall", "fetchmany", "fetchone"):
if method_name in ("execute", "executescript"):
params = ("select 4 union select 5",)
elif method_name == "executemany":
params = ("insert into foo(bar) values (?)", [(3,), (4,)])
else:
params = []
try:
method = getattr(cur, method_name)
method(*params)
self.fail("Should have raised a ProgrammingError: method " + method_name)
except sqlite.ProgrammingError:
pass
except:
self.fail("Should have raised a ProgrammingError: " + method_name)
def suite():
module_suite = unittest.makeSuite(ModuleTests, "Check")
connection_suite = unittest.makeSuite(ConnectionTests, "Check")
cursor_suite = unittest.makeSuite(CursorTests, "Check")
thread_suite = unittest.makeSuite(ThreadTests, "Check")
constructor_suite = unittest.makeSuite(ConstructorTests, "Check")
ext_suite = unittest.makeSuite(ExtensionTests, "Check")
closed_con_suite = unittest.makeSuite(ClosedConTests, "Check")
closed_cur_suite = unittest.makeSuite(ClosedCurTests, "Check")
return unittest.TestSuite((module_suite, connection_suite, cursor_suite, thread_suite, constructor_suite, ext_suite, closed_con_suite, closed_cur_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
gonboy/sl4a
|
refs/heads/master
|
python/src/Lib/xml/dom/minidom.py
|
60
|
"""\
minidom.py -- a lightweight DOM implementation.
parse("foo.xml")
parseString("<foo><bar/></foo>")
Todo:
=====
* convenience methods for getting elements and text.
* more testing
* bring some of the writer and linearizer code into conformance with this
interface
* SAX 2 namespaces
"""
import xml.dom
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
from xml.dom.minicompat import *
from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS
# This is used by the ID-cache invalidation checks; the list isn't
# actually complete, since the nodes being checked will never be the
# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is
# the node being added or removed, not the node being modified.)
#
_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE,
xml.dom.Node.ENTITY_REFERENCE_NODE)
class Node(xml.dom.Node):
namespaceURI = None # this is non-null only for elements and attributes
parentNode = None
ownerDocument = None
nextSibling = None
previousSibling = None
prefix = EMPTY_PREFIX # non-null only for NS elements and attributes
def __nonzero__(self):
return True
def toxml(self, encoding = None):
return self.toprettyxml("", "", encoding)
def toprettyxml(self, indent="\t", newl="\n", encoding = None):
# indent = the indentation string to prepend, per level
# newl = the newline string to append
writer = _get_StringIO()
if encoding is not None:
import codecs
# Can't use codecs.getwriter to preserve 2.0 compatibility
writer = codecs.lookup(encoding)[3](writer)
if self.nodeType == Node.DOCUMENT_NODE:
# Can pass encoding only to document, to put it into XML header
self.writexml(writer, "", indent, newl, encoding)
else:
self.writexml(writer, "", indent, newl)
return writer.getvalue()
def hasChildNodes(self):
if self.childNodes:
return True
else:
return False
def _get_childNodes(self):
return self.childNodes
def _get_firstChild(self):
if self.childNodes:
return self.childNodes[0]
def _get_lastChild(self):
if self.childNodes:
return self.childNodes[-1]
def insertBefore(self, newChild, refChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(newChild.childNodes):
self.insertBefore(c, refChild)
### The DOM does not clearly specify what to return in this case
return newChild
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
if refChild is None:
self.appendChild(newChild)
else:
try:
index = self.childNodes.index(refChild)
except ValueError:
raise xml.dom.NotFoundErr()
if newChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
self.childNodes.insert(index, newChild)
newChild.nextSibling = refChild
refChild.previousSibling = newChild
if index:
node = self.childNodes[index-1]
node.nextSibling = newChild
newChild.previousSibling = node
else:
newChild.previousSibling = None
newChild.parentNode = self
return newChild
def appendChild(self, node):
if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
for c in tuple(node.childNodes):
self.appendChild(c)
### The DOM does not clearly specify what to return in this case
return node
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
elif node.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
if node.parentNode is not None:
node.parentNode.removeChild(node)
_append_child(self, node)
node.nextSibling = None
return node
def replaceChild(self, newChild, oldChild):
if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
refChild = oldChild.nextSibling
self.removeChild(oldChild)
return self.insertBefore(newChild, refChild)
if newChild.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(newChild), repr(self)))
if newChild is oldChild:
return
if newChild.parentNode is not None:
newChild.parentNode.removeChild(newChild)
try:
index = self.childNodes.index(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
self.childNodes[index] = newChild
newChild.parentNode = self
oldChild.parentNode = None
if (newChild.nodeType in _nodeTypes_with_children
or oldChild.nodeType in _nodeTypes_with_children):
_clear_id_cache(self)
newChild.nextSibling = oldChild.nextSibling
newChild.previousSibling = oldChild.previousSibling
oldChild.nextSibling = None
oldChild.previousSibling = None
if newChild.previousSibling:
newChild.previousSibling.nextSibling = newChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
return oldChild
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
if oldChild.nextSibling is not None:
oldChild.nextSibling.previousSibling = oldChild.previousSibling
if oldChild.previousSibling is not None:
oldChild.previousSibling.nextSibling = oldChild.nextSibling
oldChild.nextSibling = oldChild.previousSibling = None
if oldChild.nodeType in _nodeTypes_with_children:
_clear_id_cache(self)
oldChild.parentNode = None
return oldChild
def normalize(self):
L = []
for child in self.childNodes:
if child.nodeType == Node.TEXT_NODE:
data = child.data
if data and L and L[-1].nodeType == child.nodeType:
# collapse text node
node = L[-1]
node.data = node.data + child.data
node.nextSibling = child.nextSibling
child.unlink()
elif data:
if L:
L[-1].nextSibling = child
child.previousSibling = L[-1]
else:
child.previousSibling = None
L.append(child)
else:
# empty text node; discard
child.unlink()
else:
if L:
L[-1].nextSibling = child
child.previousSibling = L[-1]
else:
child.previousSibling = None
L.append(child)
if child.nodeType == Node.ELEMENT_NODE:
child.normalize()
if L:
L[-1].nextSibling = None
self.childNodes[:] = L
def cloneNode(self, deep):
return _clone_node(self, deep, self.ownerDocument or self)
def isSupported(self, feature, version):
return self.ownerDocument.implementation.hasFeature(feature, version)
def _get_localName(self):
# Overridden in Element and Attr where localName can be Non-Null
return None
# Node interfaces from Level 3 (WD 9 April 2002)
def isSameNode(self, other):
return self is other
def getInterface(self, feature):
if self.isSupported(feature, None):
return self
else:
return None
# The "user data" functions use a dictionary that is only present
# if some user data has been set, so be careful not to assume it
# exists.
def getUserData(self, key):
try:
return self._user_data[key][0]
except (AttributeError, KeyError):
return None
def setUserData(self, key, data, handler):
old = None
try:
d = self._user_data
except AttributeError:
d = {}
self._user_data = d
if key in d:
old = d[key][0]
if data is None:
# ignore handlers passed for None
handler = None
if old is not None:
del d[key]
else:
d[key] = (data, handler)
return old
def _call_user_data_handler(self, operation, src, dst):
if hasattr(self, "_user_data"):
for key, (data, handler) in self._user_data.items():
if handler is not None:
handler.handle(operation, key, data, src, dst)
# minidom-specific API:
def unlink(self):
self.parentNode = self.ownerDocument = None
if self.childNodes:
for child in self.childNodes:
child.unlink()
self.childNodes = NodeList()
self.previousSibling = None
self.nextSibling = None
defproperty(Node, "firstChild", doc="First child node, or None.")
defproperty(Node, "lastChild", doc="Last child node, or None.")
defproperty(Node, "localName", doc="Namespace-local name of this node.")
def _append_child(self, node):
# fast path with less checks; usable by DOM builders if careful
childNodes = self.childNodes
if childNodes:
last = childNodes[-1]
node.__dict__["previousSibling"] = last
last.__dict__["nextSibling"] = node
childNodes.append(node)
node.__dict__["parentNode"] = self
def _in_document(node):
# return True iff node is part of a document tree
while node is not None:
if node.nodeType == Node.DOCUMENT_NODE:
return True
node = node.parentNode
return False
def _write_data(writer, data):
"Writes datachars to writer."
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
writer.write(data)
def _get_elements_by_tagName_helper(parent, name, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE and \
(name == "*" or node.tagName == name):
rc.append(node)
_get_elements_by_tagName_helper(node, name, rc)
return rc
def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc):
for node in parent.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
if ((localName == "*" or node.localName == localName) and
(nsURI == "*" or node.namespaceURI == nsURI)):
rc.append(node)
_get_elements_by_tagName_ns_helper(node, nsURI, localName, rc)
return rc
class DocumentFragment(Node):
nodeType = Node.DOCUMENT_FRAGMENT_NODE
nodeName = "#document-fragment"
nodeValue = None
attributes = None
parentNode = None
_child_node_types = (Node.ELEMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.NOTATION_NODE)
def __init__(self):
self.childNodes = NodeList()
class Attr(Node):
nodeType = Node.ATTRIBUTE_NODE
attributes = None
ownerElement = None
specified = False
_is_id = False
_child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None,
prefix=None):
# skip setattr for performance
d = self.__dict__
d["nodeName"] = d["name"] = qName
d["namespaceURI"] = namespaceURI
d["prefix"] = prefix
d['childNodes'] = NodeList()
# Add the single child node that represents the value of the attr
self.childNodes.append(Text())
# nodeValue and value are set elsewhere
def _get_localName(self):
return self.nodeName.split(":", 1)[-1]
def _get_name(self):
return self.name
def _get_specified(self):
return self.specified
def __setattr__(self, name, value):
d = self.__dict__
if name in ("value", "nodeValue"):
d["value"] = d["nodeValue"] = value
d2 = self.childNodes[0].__dict__
d2["data"] = d2["nodeValue"] = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
elif name in ("name", "nodeName"):
d["name"] = d["nodeName"] = value
if self.ownerElement is not None:
_clear_id_cache(self.ownerElement)
else:
d[name] = value
def _set_prefix(self, prefix):
nsuri = self.namespaceURI
if prefix == "xmlns":
if nsuri and nsuri != XMLNS_NAMESPACE:
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix for the wrong namespace")
d = self.__dict__
d['prefix'] = prefix
if prefix is None:
newName = self.localName
else:
newName = "%s:%s" % (prefix, self.localName)
if self.ownerElement:
_clear_id_cache(self.ownerElement)
d['nodeName'] = d['name'] = newName
def _set_value(self, value):
d = self.__dict__
d['value'] = d['nodeValue'] = value
if self.ownerElement:
_clear_id_cache(self.ownerElement)
self.childNodes[0].data = value
def unlink(self):
# This implementation does not call the base implementation
# since most of that is not needed, and the expense of the
# method call is not warranted. We duplicate the removal of
# children, but that's all we needed from the base class.
elem = self.ownerElement
if elem is not None:
del elem._attrs[self.nodeName]
del elem._attrsNS[(self.namespaceURI, self.localName)]
if self._is_id:
self._is_id = False
elem._magic_id_nodes -= 1
self.ownerDocument._magic_id_count -= 1
for child in self.childNodes:
child.unlink()
del self.childNodes[:]
def _get_isId(self):
if self._is_id:
return True
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return False
info = doc._get_elem_info(elem)
if info is None:
return False
if self.namespaceURI:
return info.isIdNS(self.namespaceURI, self.localName)
else:
return info.isId(self.nodeName)
def _get_schemaType(self):
doc = self.ownerDocument
elem = self.ownerElement
if doc is None or elem is None:
return _no_type
info = doc._get_elem_info(elem)
if info is None:
return _no_type
if self.namespaceURI:
return info.getAttributeTypeNS(self.namespaceURI, self.localName)
else:
return info.getAttributeType(self.nodeName)
defproperty(Attr, "isId", doc="True if this attribute is an ID.")
defproperty(Attr, "localName", doc="Namespace-local name of this attribute.")
defproperty(Attr, "schemaType", doc="Schema type for this attribute.")
class NamedNodeMap(object):
"""The attribute list is a transient interface to the underlying
dictionaries. Mutations here will change the underlying element's
dictionary.
Ordering is imposed artificially and does not reflect the order of
attributes as found in an input document.
"""
__slots__ = ('_attrs', '_attrsNS', '_ownerElement')
def __init__(self, attrs, attrsNS, ownerElement):
self._attrs = attrs
self._attrsNS = attrsNS
self._ownerElement = ownerElement
def _get_length(self):
return len(self._attrs)
def item(self, index):
try:
return self[self._attrs.keys()[index]]
except IndexError:
return None
def items(self):
L = []
for node in self._attrs.values():
L.append((node.nodeName, node.value))
return L
def itemsNS(self):
L = []
for node in self._attrs.values():
L.append(((node.namespaceURI, node.localName), node.value))
return L
def has_key(self, key):
if isinstance(key, StringTypes):
return self._attrs.has_key(key)
else:
return self._attrsNS.has_key(key)
def keys(self):
return self._attrs.keys()
def keysNS(self):
return self._attrsNS.keys()
def values(self):
return self._attrs.values()
def get(self, name, value=None):
return self._attrs.get(name, value)
__len__ = _get_length
__hash__ = None # Mutable type can't be correctly hashed
def __cmp__(self, other):
if self._attrs is getattr(other, "_attrs", None):
return 0
else:
return cmp(id(self), id(other))
def __getitem__(self, attname_or_tuple):
if isinstance(attname_or_tuple, tuple):
return self._attrsNS[attname_or_tuple]
else:
return self._attrs[attname_or_tuple]
# same as set
def __setitem__(self, attname, value):
if isinstance(value, StringTypes):
try:
node = self._attrs[attname]
except KeyError:
node = Attr(attname)
node.ownerDocument = self._ownerElement.ownerDocument
self.setNamedItem(node)
node.value = value
else:
if not isinstance(value, Attr):
raise TypeError, "value must be a string or Attr object"
node = value
self.setNamedItem(node)
def getNamedItem(self, name):
try:
return self._attrs[name]
except KeyError:
return None
def getNamedItemNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)]
except KeyError:
return None
def removeNamedItem(self, name):
n = self.getNamedItem(name)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrs[n.nodeName]
del self._attrsNS[(n.namespaceURI, n.localName)]
if 'ownerElement' in n.__dict__:
n.__dict__['ownerElement'] = None
return n
else:
raise xml.dom.NotFoundErr()
def removeNamedItemNS(self, namespaceURI, localName):
n = self.getNamedItemNS(namespaceURI, localName)
if n is not None:
_clear_id_cache(self._ownerElement)
del self._attrsNS[(n.namespaceURI, n.localName)]
del self._attrs[n.nodeName]
if 'ownerElement' in n.__dict__:
n.__dict__['ownerElement'] = None
return n
else:
raise xml.dom.NotFoundErr()
def setNamedItem(self, node):
if not isinstance(node, Attr):
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
old = self._attrs.get(node.name)
if old:
old.unlink()
self._attrs[node.name] = node
self._attrsNS[(node.namespaceURI, node.localName)] = node
node.ownerElement = self._ownerElement
_clear_id_cache(node.ownerElement)
return old
def setNamedItemNS(self, node):
return self.setNamedItem(node)
def __delitem__(self, attname_or_tuple):
node = self[attname_or_tuple]
_clear_id_cache(node.ownerElement)
node.unlink()
def __getstate__(self):
return self._attrs, self._attrsNS, self._ownerElement
def __setstate__(self, state):
self._attrs, self._attrsNS, self._ownerElement = state
defproperty(NamedNodeMap, "length",
doc="Number of nodes in the NamedNodeMap.")
AttributeList = NamedNodeMap
class TypeInfo(object):
__slots__ = 'namespace', 'name'
def __init__(self, namespace, name):
self.namespace = namespace
self.name = name
def __repr__(self):
if self.namespace:
return "<TypeInfo %r (from %r)>" % (self.name, self.namespace)
else:
return "<TypeInfo %r>" % self.name
def _get_name(self):
return self.name
def _get_namespace(self):
return self.namespace
_no_type = TypeInfo(None, None)
class Element(Node):
nodeType = Node.ELEMENT_NODE
nodeValue = None
schemaType = _no_type
_magic_id_nodes = 0
_child_node_types = (Node.ELEMENT_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE,
Node.TEXT_NODE,
Node.CDATA_SECTION_NODE,
Node.ENTITY_REFERENCE_NODE)
def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
localName=None):
self.tagName = self.nodeName = tagName
self.prefix = prefix
self.namespaceURI = namespaceURI
self.childNodes = NodeList()
self._attrs = {} # attributes are double-indexed:
self._attrsNS = {} # tagName -> Attribute
# URI,localName -> Attribute
# in the future: consider lazy generation
# of attribute objects this is too tricky
# for now because of headaches with
# namespaces.
def _get_localName(self):
return self.tagName.split(":", 1)[-1]
def _get_tagName(self):
return self.tagName
def unlink(self):
for attr in self._attrs.values():
attr.unlink()
self._attrs = None
self._attrsNS = None
Node.unlink(self)
def getAttribute(self, attname):
try:
return self._attrs[attname].value
except KeyError:
return ""
def getAttributeNS(self, namespaceURI, localName):
try:
return self._attrsNS[(namespaceURI, localName)].value
except KeyError:
return ""
def setAttribute(self, attname, value):
attr = self.getAttributeNode(attname)
if attr is None:
attr = Attr(attname)
# for performance
d = attr.__dict__
d["value"] = d["nodeValue"] = value
d["ownerDocument"] = self.ownerDocument
self.setAttributeNode(attr)
elif value != attr.value:
d = attr.__dict__
d["value"] = d["nodeValue"] = value
if attr.isId:
_clear_id_cache(self)
def setAttributeNS(self, namespaceURI, qualifiedName, value):
prefix, localname = _nssplit(qualifiedName)
attr = self.getAttributeNodeNS(namespaceURI, localname)
if attr is None:
# for performance
attr = Attr(qualifiedName, namespaceURI, localname, prefix)
d = attr.__dict__
d["prefix"] = prefix
d["nodeName"] = qualifiedName
d["value"] = d["nodeValue"] = value
d["ownerDocument"] = self.ownerDocument
self.setAttributeNode(attr)
else:
d = attr.__dict__
if value != attr.value:
d["value"] = d["nodeValue"] = value
if attr.isId:
_clear_id_cache(self)
if attr.prefix != prefix:
d["prefix"] = prefix
d["nodeName"] = qualifiedName
def getAttributeNode(self, attrname):
return self._attrs.get(attrname)
def getAttributeNodeNS(self, namespaceURI, localName):
return self._attrsNS.get((namespaceURI, localName))
def setAttributeNode(self, attr):
if attr.ownerElement not in (None, self):
raise xml.dom.InuseAttributeErr("attribute node already owned")
old1 = self._attrs.get(attr.name, None)
if old1 is not None:
self.removeAttributeNode(old1)
old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None)
if old2 is not None and old2 is not old1:
self.removeAttributeNode(old2)
_set_attribute_node(self, attr)
if old1 is not attr:
# It might have already been part of this node, in which case
# it doesn't represent a change, and should not be returned.
return old1
if old2 is not attr:
return old2
setAttributeNodeNS = setAttributeNode
def removeAttribute(self, name):
try:
attr = self._attrs[name]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNS(self, namespaceURI, localName):
try:
attr = self._attrsNS[(namespaceURI, localName)]
except KeyError:
raise xml.dom.NotFoundErr()
self.removeAttributeNode(attr)
def removeAttributeNode(self, node):
if node is None:
raise xml.dom.NotFoundErr()
try:
self._attrs[node.name]
except KeyError:
raise xml.dom.NotFoundErr()
_clear_id_cache(self)
node.unlink()
# Restore this since the node is still useful and otherwise
# unlinked
node.ownerDocument = self.ownerDocument
removeAttributeNodeNS = removeAttributeNode
def hasAttribute(self, name):
return self._attrs.has_key(name)
def hasAttributeNS(self, namespaceURI, localName):
return self._attrsNS.has_key((namespaceURI, localName))
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def __repr__(self):
return "<DOM Element: %s at %#x>" % (self.tagName, id(self))
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">%s"%(newl))
for node in self.childNodes:
node.writexml(writer,indent+addindent,addindent,newl)
writer.write("%s</%s>%s" % (indent,self.tagName,newl))
else:
writer.write("/>%s"%(newl))
def _get_attributes(self):
return NamedNodeMap(self._attrs, self._attrsNS, self)
def hasAttributes(self):
if self._attrs:
return True
else:
return False
# DOM Level 3 attributes, based on the 22 Oct 2002 draft
def setIdAttribute(self, name):
idAttr = self.getAttributeNode(name)
self.setIdAttributeNode(idAttr)
def setIdAttributeNS(self, namespaceURI, localName):
idAttr = self.getAttributeNodeNS(namespaceURI, localName)
self.setIdAttributeNode(idAttr)
def setIdAttributeNode(self, idAttr):
if idAttr is None or not self.isSameNode(idAttr.ownerElement):
raise xml.dom.NotFoundErr()
if _get_containing_entref(self) is not None:
raise xml.dom.NoModificationAllowedErr()
if not idAttr._is_id:
idAttr.__dict__['_is_id'] = True
self._magic_id_nodes += 1
self.ownerDocument._magic_id_count += 1
_clear_id_cache(self)
defproperty(Element, "attributes",
doc="NamedNodeMap of attributes on the element.")
defproperty(Element, "localName",
doc="Namespace-local name of this element.")
def _set_attribute_node(element, attr):
_clear_id_cache(element)
element._attrs[attr.name] = attr
element._attrsNS[(attr.namespaceURI, attr.localName)] = attr
# This creates a circular reference, but Element.unlink()
# breaks the cycle since the references to the attribute
# dictionaries are tossed.
attr.__dict__['ownerElement'] = element
class Childless:
"""Mixin that makes childless-ness easy to implement and avoids
the complexity of the Node methods that deal with children.
"""
attributes = None
childNodes = EmptyNodeList()
firstChild = None
lastChild = None
def _get_firstChild(self):
return None
def _get_lastChild(self):
return None
def appendChild(self, node):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes cannot have children")
def hasChildNodes(self):
return False
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
def removeChild(self, oldChild):
raise xml.dom.NotFoundErr(
self.nodeName + " nodes do not have children")
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
self.nodeName + " nodes do not have children")
class ProcessingInstruction(Childless, Node):
nodeType = Node.PROCESSING_INSTRUCTION_NODE
def __init__(self, target, data):
self.target = self.nodeName = target
self.data = self.nodeValue = data
def _get_data(self):
return self.data
def _set_data(self, value):
d = self.__dict__
d['data'] = d['nodeValue'] = value
def _get_target(self):
return self.target
def _set_target(self, value):
d = self.__dict__
d['target'] = d['nodeName'] = value
def __setattr__(self, name, value):
if name == "data" or name == "nodeValue":
self.__dict__['data'] = self.__dict__['nodeValue'] = value
elif name == "target" or name == "nodeName":
self.__dict__['target'] = self.__dict__['nodeName'] = value
else:
self.__dict__[name] = value
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl))
class CharacterData(Childless, Node):
def _get_length(self):
return len(self.data)
__len__ = _get_length
def _get_data(self):
return self.__dict__['data']
def _set_data(self, data):
d = self.__dict__
d['data'] = d['nodeValue'] = data
_get_nodeValue = _get_data
_set_nodeValue = _set_data
def __setattr__(self, name, value):
if name == "data" or name == "nodeValue":
self.__dict__['data'] = self.__dict__['nodeValue'] = value
else:
self.__dict__[name] = value
def __repr__(self):
data = self.data
if len(data) > 10:
dotdotdot = "..."
else:
dotdotdot = ""
return '<DOM %s node "%r%s">' % (
self.__class__.__name__, data[0:10], dotdotdot)
def substringData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
return self.data[offset:offset+count]
def appendData(self, arg):
self.data = self.data + arg
def insertData(self, offset, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if arg:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset:])
def deleteData(self, offset, count):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = self.data[:offset] + self.data[offset+count:]
def replaceData(self, offset, count, arg):
if offset < 0:
raise xml.dom.IndexSizeErr("offset cannot be negative")
if offset >= len(self.data):
raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise xml.dom.IndexSizeErr("count cannot be negative")
if count:
self.data = "%s%s%s" % (
self.data[:offset], arg, self.data[offset+count:])
defproperty(CharacterData, "length", doc="Length of the string data.")
class Text(CharacterData):
# Make sure we don't add an instance __dict__ if we don't already
# have one, at least when that's possible:
# XXX this does not work, CharacterData is an old-style class
# __slots__ = ()
nodeType = Node.TEXT_NODE
nodeName = "#text"
attributes = None
def splitText(self, offset):
if offset < 0 or offset > len(self.data):
raise xml.dom.IndexSizeErr("illegal offset value")
newText = self.__class__()
newText.data = self.data[offset:]
newText.ownerDocument = self.ownerDocument
next = self.nextSibling
if self.parentNode and self in self.parentNode.childNodes:
if next is None:
self.parentNode.appendChild(newText)
else:
self.parentNode.insertBefore(newText, next)
self.data = self.data[:offset]
return newText
def writexml(self, writer, indent="", addindent="", newl=""):
_write_data(writer, "%s%s%s"%(indent, self.data, newl))
# DOM Level 3 (WD 9 April 2002)
def _get_wholeText(self):
L = [self.data]
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.insert(0, n.data)
n = n.previousSibling
else:
break
n = self.nextSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
L.append(n.data)
n = n.nextSibling
else:
break
return ''.join(L)
def replaceWholeText(self, content):
# XXX This needs to be seriously changed if minidom ever
# supports EntityReference nodes.
parent = self.parentNode
n = self.previousSibling
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.previousSibling
parent.removeChild(n)
n = next
else:
break
n = self.nextSibling
if not content:
parent.removeChild(self)
while n is not None:
if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
next = n.nextSibling
parent.removeChild(n)
n = next
else:
break
if content:
d = self.__dict__
d['data'] = content
d['nodeValue'] = content
return self
else:
return None
def _get_isWhitespaceInElementContent(self):
if self.data.strip():
return False
elem = _get_containing_element(self)
if elem is None:
return False
info = self.ownerDocument._get_elem_info(elem)
if info is None:
return False
else:
return info.isElementContent()
defproperty(Text, "isWhitespaceInElementContent",
doc="True iff this text node contains only whitespace"
" and is in element content.")
defproperty(Text, "wholeText",
doc="The text of all logically-adjacent text nodes.")
def _get_containing_element(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ELEMENT_NODE:
return c
c = c.parentNode
return None
def _get_containing_entref(node):
c = node.parentNode
while c is not None:
if c.nodeType == Node.ENTITY_REFERENCE_NODE:
return c
c = c.parentNode
return None
class Comment(Childless, CharacterData):
nodeType = Node.COMMENT_NODE
nodeName = "#comment"
def __init__(self, data):
self.data = self.nodeValue = data
def writexml(self, writer, indent="", addindent="", newl=""):
if "--" in self.data:
raise ValueError("'--' is not allowed in a comment node")
writer.write("%s<!--%s-->%s" % (indent, self.data, newl))
class CDATASection(Text):
# Make sure we don't add an instance __dict__ if we don't already
# have one, at least when that's possible:
# XXX this does not work, Text is an old-style class
# __slots__ = ()
nodeType = Node.CDATA_SECTION_NODE
nodeName = "#cdata-section"
def writexml(self, writer, indent="", addindent="", newl=""):
if self.data.find("]]>") >= 0:
raise ValueError("']]>' not allowed in a CDATA section")
writer.write("<![CDATA[%s]]>" % self.data)
class ReadOnlySequentialNamedNodeMap(object):
__slots__ = '_seq',
def __init__(self, seq=()):
# seq should be a list or tuple
self._seq = seq
def __len__(self):
return len(self._seq)
def _get_length(self):
return len(self._seq)
def getNamedItem(self, name):
for n in self._seq:
if n.nodeName == name:
return n
def getNamedItemNS(self, namespaceURI, localName):
for n in self._seq:
if n.namespaceURI == namespaceURI and n.localName == localName:
return n
def __getitem__(self, name_or_tuple):
if isinstance(name_or_tuple, tuple):
node = self.getNamedItemNS(*name_or_tuple)
else:
node = self.getNamedItem(name_or_tuple)
if node is None:
raise KeyError, name_or_tuple
return node
def item(self, index):
if index < 0:
return None
try:
return self._seq[index]
except IndexError:
return None
def removeNamedItem(self, name):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def removeNamedItemNS(self, namespaceURI, localName):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItem(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def setNamedItemNS(self, node):
raise xml.dom.NoModificationAllowedErr(
"NamedNodeMap instance is read-only")
def __getstate__(self):
return [self._seq]
def __setstate__(self, state):
self._seq = state[0]
defproperty(ReadOnlySequentialNamedNodeMap, "length",
doc="Number of entries in the NamedNodeMap.")
class Identified:
"""Mix-in class that supports the publicId and systemId attributes."""
# XXX this does not work, this is an old-style class
# __slots__ = 'publicId', 'systemId'
def _identified_mixin_init(self, publicId, systemId):
self.publicId = publicId
self.systemId = systemId
def _get_publicId(self):
return self.publicId
def _get_systemId(self):
return self.systemId
class DocumentType(Identified, Childless, Node):
nodeType = Node.DOCUMENT_TYPE_NODE
nodeValue = None
name = None
publicId = None
systemId = None
internalSubset = None
def __init__(self, qualifiedName):
self.entities = ReadOnlySequentialNamedNodeMap()
self.notations = ReadOnlySequentialNamedNodeMap()
if qualifiedName:
prefix, localname = _nssplit(qualifiedName)
self.name = localname
self.nodeName = self.name
def _get_internalSubset(self):
return self.internalSubset
def cloneNode(self, deep):
if self.ownerDocument is None:
# it's ok
clone = DocumentType(None)
clone.name = self.name
clone.nodeName = self.name
operation = xml.dom.UserDataHandler.NODE_CLONED
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in self.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
clone.notations._seq.append(notation)
n._call_user_data_handler(operation, n, notation)
for e in self.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
clone.entities._seq.append(entity)
e._call_user_data_handler(operation, n, entity)
self._call_user_data_handler(operation, self, clone)
return clone
else:
return None
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write("<!DOCTYPE ")
writer.write(self.name)
if self.publicId:
writer.write("%s PUBLIC '%s'%s '%s'"
% (newl, self.publicId, newl, self.systemId))
elif self.systemId:
writer.write("%s SYSTEM '%s'" % (newl, self.systemId))
if self.internalSubset is not None:
writer.write(" [")
writer.write(self.internalSubset)
writer.write("]")
writer.write(">"+newl)
class Entity(Identified, Node):
attributes = None
nodeType = Node.ENTITY_NODE
nodeValue = None
actualEncoding = None
encoding = None
version = None
def __init__(self, name, publicId, systemId, notation):
self.nodeName = name
self.notationName = notation
self.childNodes = NodeList()
self._identified_mixin_init(publicId, systemId)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_encoding(self):
return self.encoding
def _get_version(self):
return self.version
def appendChild(self, newChild):
raise xml.dom.HierarchyRequestErr(
"cannot append children to an entity node")
def insertBefore(self, newChild, refChild):
raise xml.dom.HierarchyRequestErr(
"cannot insert children below an entity node")
def removeChild(self, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot remove children from an entity node")
def replaceChild(self, newChild, oldChild):
raise xml.dom.HierarchyRequestErr(
"cannot replace children of an entity node")
class Notation(Identified, Childless, Node):
nodeType = Node.NOTATION_NODE
nodeValue = None
def __init__(self, name, publicId, systemId):
self.nodeName = name
self._identified_mixin_init(publicId, systemId)
class DOMImplementation(DOMImplementationLS):
_features = [("core", "1.0"),
("core", "2.0"),
("core", "3.0"),
("core", None),
("xml", "1.0"),
("xml", "2.0"),
("xml", "3.0"),
("xml", None),
("ls-load", "3.0"),
("ls-load", None),
]
def hasFeature(self, feature, version):
if version == "":
version = None
return (feature.lower(), version) in self._features
def createDocument(self, namespaceURI, qualifiedName, doctype):
if doctype and doctype.parentNode is not None:
raise xml.dom.WrongDocumentErr(
"doctype object owned by another DOM tree")
doc = self._create_document()
add_root_element = not (namespaceURI is None
and qualifiedName is None
and doctype is None)
if not qualifiedName and add_root_element:
# The spec is unclear what to raise here; SyntaxErr
# would be the other obvious candidate. Since Xerces raises
# InvalidCharacterErr, and since SyntaxErr is not listed
# for createDocument, that seems to be the better choice.
# XXX: need to check for illegal characters here and in
# createElement.
# DOM Level III clears this up when talking about the return value
# of this function. If namespaceURI, qName and DocType are
# Null the document is returned without a document element
# Otherwise if doctype or namespaceURI are not None
# Then we go back to the above problem
raise xml.dom.InvalidCharacterErr("Element with no name")
if add_root_element:
prefix, localname = _nssplit(qualifiedName)
if prefix == "xml" \
and namespaceURI != "http://www.w3.org/XML/1998/namespace":
raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
if prefix and not namespaceURI:
raise xml.dom.NamespaceErr(
"illegal use of prefix without namespaces")
element = doc.createElementNS(namespaceURI, qualifiedName)
if doctype:
doc.appendChild(doctype)
doc.appendChild(element)
if doctype:
doctype.parentNode = doctype.ownerDocument = doc
doc.doctype = doctype
doc.implementation = self
return doc
def createDocumentType(self, qualifiedName, publicId, systemId):
doctype = DocumentType(qualifiedName)
doctype.publicId = publicId
doctype.systemId = systemId
return doctype
# DOM Level 3 (WD 9 April 2002)
def getInterface(self, feature):
if self.hasFeature(feature, None):
return self
else:
return None
# internal
def _create_document(self):
return Document()
class ElementInfo(object):
"""Object that represents content-model information for an element.
This implementation is not expected to be used in practice; DOM
builders should provide implementations which do the right thing
using information available to it.
"""
__slots__ = 'tagName',
def __init__(self, name):
self.tagName = name
def getAttributeType(self, aname):
return _no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return _no_type
def isElementContent(self):
return False
def isEmpty(self):
"""Returns true iff this element is declared to have an EMPTY
content model."""
return False
def isId(self, aname):
"""Returns true iff the named attribte is a DTD-style ID."""
return False
def isIdNS(self, namespaceURI, localName):
"""Returns true iff the identified attribute is a DTD-style ID."""
return False
def __getstate__(self):
return self.tagName
def __setstate__(self, state):
self.tagName = state
def _clear_id_cache(node):
if node.nodeType == Node.DOCUMENT_NODE:
node._id_cache.clear()
node._id_search_stack = None
elif _in_document(node):
node.ownerDocument._id_cache.clear()
node.ownerDocument._id_search_stack= None
class Document(Node, DocumentLS):
_child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
nodeType = Node.DOCUMENT_NODE
nodeName = "#document"
nodeValue = None
attributes = None
doctype = None
parentNode = None
previousSibling = nextSibling = None
implementation = DOMImplementation()
# Document attributes from Level 3 (WD 9 April 2002)
actualEncoding = None
encoding = None
standalone = None
version = None
strictErrorChecking = False
errorHandler = None
documentURI = None
_magic_id_count = 0
def __init__(self):
self.childNodes = NodeList()
# mapping of (namespaceURI, localName) -> ElementInfo
# and tagName -> ElementInfo
self._elem_info = {}
self._id_cache = {}
self._id_search_stack = None
def _get_elem_info(self, element):
if element.namespaceURI:
key = element.namespaceURI, element.localName
else:
key = element.tagName
return self._elem_info.get(key)
def _get_actualEncoding(self):
return self.actualEncoding
def _get_doctype(self):
return self.doctype
def _get_documentURI(self):
return self.documentURI
def _get_encoding(self):
return self.encoding
def _get_errorHandler(self):
return self.errorHandler
def _get_standalone(self):
return self.standalone
def _get_strictErrorChecking(self):
return self.strictErrorChecking
def _get_version(self):
return self.version
def appendChild(self, node):
if node.nodeType not in self._child_node_types:
raise xml.dom.HierarchyRequestErr(
"%s cannot be child of %s" % (repr(node), repr(self)))
if node.parentNode is not None:
# This needs to be done before the next test since this
# may *be* the document element, in which case it should
# end up re-ordered to the end.
node.parentNode.removeChild(node)
if node.nodeType == Node.ELEMENT_NODE \
and self._get_documentElement():
raise xml.dom.HierarchyRequestErr(
"two document elements disallowed")
return Node.appendChild(self, node)
def removeChild(self, oldChild):
try:
self.childNodes.remove(oldChild)
except ValueError:
raise xml.dom.NotFoundErr()
oldChild.nextSibling = oldChild.previousSibling = None
oldChild.parentNode = None
if self.documentElement is oldChild:
self.documentElement = None
return oldChild
def _get_documentElement(self):
for node in self.childNodes:
if node.nodeType == Node.ELEMENT_NODE:
return node
def unlink(self):
if self.doctype is not None:
self.doctype.unlink()
self.doctype = None
Node.unlink(self)
def cloneNode(self, deep):
if not deep:
return None
clone = self.implementation.createDocument(None, None, None)
clone.encoding = self.encoding
clone.standalone = self.standalone
clone.version = self.version
for n in self.childNodes:
childclone = _clone_node(n, deep, clone)
assert childclone.ownerDocument.isSameNode(clone)
clone.childNodes.append(childclone)
if childclone.nodeType == Node.DOCUMENT_NODE:
assert clone.documentElement is None
elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE:
assert clone.doctype is None
clone.doctype = childclone
childclone.parentNode = clone
self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED,
self, clone)
return clone
def createDocumentFragment(self):
d = DocumentFragment()
d.ownerDocument = self
return d
def createElement(self, tagName):
e = Element(tagName)
e.ownerDocument = self
return e
def createTextNode(self, data):
if not isinstance(data, StringTypes):
raise TypeError, "node contents must be a string"
t = Text()
t.data = data
t.ownerDocument = self
return t
def createCDATASection(self, data):
if not isinstance(data, StringTypes):
raise TypeError, "node contents must be a string"
c = CDATASection()
c.data = data
c.ownerDocument = self
return c
def createComment(self, data):
c = Comment(data)
c.ownerDocument = self
return c
def createProcessingInstruction(self, target, data):
p = ProcessingInstruction(target, data)
p.ownerDocument = self
return p
def createAttribute(self, qName):
a = Attr(qName)
a.ownerDocument = self
a.value = ""
return a
def createElementNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
e = Element(qualifiedName, namespaceURI, prefix)
e.ownerDocument = self
return e
def createAttributeNS(self, namespaceURI, qualifiedName):
prefix, localName = _nssplit(qualifiedName)
a = Attr(qualifiedName, namespaceURI, localName, prefix)
a.ownerDocument = self
a.value = ""
return a
# A couple of implementation-specific helpers to create node types
# not supported by the W3C DOM specs:
def _create_entity(self, name, publicId, systemId, notationName):
e = Entity(name, publicId, systemId, notationName)
e.ownerDocument = self
return e
def _create_notation(self, name, publicId, systemId):
n = Notation(name, publicId, systemId)
n.ownerDocument = self
return n
def getElementById(self, id):
if id in self._id_cache:
return self._id_cache[id]
if not (self._elem_info or self._magic_id_count):
return None
stack = self._id_search_stack
if stack is None:
# we never searched before, or the cache has been cleared
stack = [self.documentElement]
self._id_search_stack = stack
elif not stack:
# Previous search was completed and cache is still valid;
# no matching node.
return None
result = None
while stack:
node = stack.pop()
# add child elements to stack for continued searching
stack.extend([child for child in node.childNodes
if child.nodeType in _nodeTypes_with_children])
# check this node
info = self._get_elem_info(node)
if info:
# We have to process all ID attributes before
# returning in order to get all the attributes set to
# be IDs using Element.setIdAttribute*().
for attr in node.attributes.values():
if attr.namespaceURI:
if info.isIdNS(attr.namespaceURI, attr.localName):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif info.isId(attr.name):
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif not node._magic_id_nodes:
break
elif attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
elif node._magic_id_nodes == 1:
break
elif node._magic_id_nodes:
for attr in node.attributes.values():
if attr._is_id:
self._id_cache[attr.value] = node
if attr.value == id:
result = node
if result is not None:
break
return result
def getElementsByTagName(self, name):
return _get_elements_by_tagName_helper(self, name, NodeList())
def getElementsByTagNameNS(self, namespaceURI, localName):
return _get_elements_by_tagName_ns_helper(
self, namespaceURI, localName, NodeList())
def isSupported(self, feature, version):
return self.implementation.hasFeature(feature, version)
def importNode(self, node, deep):
if node.nodeType == Node.DOCUMENT_NODE:
raise xml.dom.NotSupportedErr("cannot import document nodes")
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
raise xml.dom.NotSupportedErr("cannot import document type nodes")
return _clone_node(node, deep, self)
def writexml(self, writer, indent="", addindent="", newl="",
encoding = None):
if encoding is None:
writer.write('<?xml version="1.0" ?>'+newl)
else:
writer.write('<?xml version="1.0" encoding="%s"?>%s' % (encoding, newl))
for node in self.childNodes:
node.writexml(writer, indent, addindent, newl)
# DOM Level 3 (WD 9 April 2002)
def renameNode(self, n, namespaceURI, name):
if n.ownerDocument is not self:
raise xml.dom.WrongDocumentErr(
"cannot rename nodes from other documents;\n"
"expected %s,\nfound %s" % (self, n.ownerDocument))
if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
raise xml.dom.NotSupportedErr(
"renameNode() only applies to element and attribute nodes")
if namespaceURI != EMPTY_NAMESPACE:
if ':' in name:
prefix, localName = name.split(':', 1)
if ( prefix == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE):
raise xml.dom.NamespaceErr(
"illegal use of 'xmlns' prefix")
else:
if ( name == "xmlns"
and namespaceURI != xml.dom.XMLNS_NAMESPACE
and n.nodeType == Node.ATTRIBUTE_NODE):
raise xml.dom.NamespaceErr(
"illegal use of the 'xmlns' attribute")
prefix = None
localName = name
else:
prefix = None
localName = None
if n.nodeType == Node.ATTRIBUTE_NODE:
element = n.ownerElement
if element is not None:
is_id = n._is_id
element.removeAttributeNode(n)
else:
element = None
# avoid __setattr__
d = n.__dict__
d['prefix'] = prefix
d['localName'] = localName
d['namespaceURI'] = namespaceURI
d['nodeName'] = name
if n.nodeType == Node.ELEMENT_NODE:
d['tagName'] = name
else:
# attribute node
d['name'] = name
if element is not None:
element.setAttributeNode(n)
if is_id:
element.setIdAttributeNode(n)
# It's not clear from a semantic perspective whether we should
# call the user data handlers for the NODE_RENAMED event since
# we're re-using the existing node. The draft spec has been
# interpreted as meaning "no, don't call the handler unless a
# new node is created."
return n
defproperty(Document, "documentElement",
doc="Top-level element of this document.")
def _clone_node(node, deep, newOwnerDocument):
"""
Clone a node and give it the new owner document.
Called by Node.cloneNode and Document.importNode
"""
if node.ownerDocument.isSameNode(newOwnerDocument):
operation = xml.dom.UserDataHandler.NODE_CLONED
else:
operation = xml.dom.UserDataHandler.NODE_IMPORTED
if node.nodeType == Node.ELEMENT_NODE:
clone = newOwnerDocument.createElementNS(node.namespaceURI,
node.nodeName)
for attr in node.attributes.values():
clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName)
a.specified = attr.specified
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
clone = newOwnerDocument.createDocumentFragment()
if deep:
for child in node.childNodes:
c = _clone_node(child, deep, newOwnerDocument)
clone.appendChild(c)
elif node.nodeType == Node.TEXT_NODE:
clone = newOwnerDocument.createTextNode(node.data)
elif node.nodeType == Node.CDATA_SECTION_NODE:
clone = newOwnerDocument.createCDATASection(node.data)
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
clone = newOwnerDocument.createProcessingInstruction(node.target,
node.data)
elif node.nodeType == Node.COMMENT_NODE:
clone = newOwnerDocument.createComment(node.data)
elif node.nodeType == Node.ATTRIBUTE_NODE:
clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
node.nodeName)
clone.specified = True
clone.value = node.value
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
assert node.ownerDocument is not newOwnerDocument
operation = xml.dom.UserDataHandler.NODE_IMPORTED
clone = newOwnerDocument.implementation.createDocumentType(
node.name, node.publicId, node.systemId)
clone.ownerDocument = newOwnerDocument
if deep:
clone.entities._seq = []
clone.notations._seq = []
for n in node.notations._seq:
notation = Notation(n.nodeName, n.publicId, n.systemId)
notation.ownerDocument = newOwnerDocument
clone.notations._seq.append(notation)
if hasattr(n, '_call_user_data_handler'):
n._call_user_data_handler(operation, n, notation)
for e in node.entities._seq:
entity = Entity(e.nodeName, e.publicId, e.systemId,
e.notationName)
entity.actualEncoding = e.actualEncoding
entity.encoding = e.encoding
entity.version = e.version
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
e._call_user_data_handler(operation, n, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implemenetation specific. minidom handles those cases
# directly in the cloneNode() methods.
raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
# Check for _call_user_data_handler() since this could conceivably
# used with other DOM implementations (one of the FourThought
# DOMs, perhaps?).
if hasattr(node, '_call_user_data_handler'):
node._call_user_data_handler(operation, node, clone)
return clone
def _nssplit(qualifiedName):
fields = qualifiedName.split(':', 1)
if len(fields) == 2:
return fields
else:
return (None, fields[0])
def _get_StringIO():
# we can't use cStringIO since it doesn't support Unicode strings
from StringIO import StringIO
return StringIO()
def _do_pulldom_parse(func, args, kwargs):
events = func(*args, **kwargs)
toktype, rootNode = events.getEvent()
events.expandNode(rootNode)
events.clear()
return rootNode
def parse(file, parser=None, bufsize=None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
from xml.dom import expatbuilder
return expatbuilder.parse(file)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parse, (file,),
{'parser': parser, 'bufsize': bufsize})
def parseString(string, parser=None):
"""Parse a file into a DOM from a string."""
if parser is None:
from xml.dom import expatbuilder
return expatbuilder.parseString(string)
else:
from xml.dom import pulldom
return _do_pulldom_parse(pulldom.parseString, (string,),
{'parser': parser})
def getDOMImplementation(features=None):
if features:
if isinstance(features, StringTypes):
features = domreg._parse_feature_string(features)
for f, v in features:
if not Document.implementation.hasFeature(f, v):
return None
return Document.implementation
|
oasiswork/odoo
|
refs/heads/8.0
|
addons/stock_picking_wave/__init__.py
|
374
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_picking_wave
import wizard
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
assad2012/ggplot
|
refs/heads/master
|
ggplot/stats/stat_bar.py
|
12
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pandas as pd
from .stat import stat
_MSG_LABELS = """There are more than 30 unique values mapped to x.
If you want a histogram instead, use 'geom_histogram()'.
"""
class stat_bar(stat):
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'geom': 'bar', 'position': 'stack',
'width': 0.9, 'drop': False,
'origin': None, 'labels': None}
def _calculate(self, data):
# reorder x according to the labels
new_data = pd.DataFrame()
new_data["x"] = self.labels
for column in set(data.columns) - set('x'):
column_dict = dict(zip(data["x"],data[column]))
default = 0 if column == "y" else data[column].values[0]
new_data[column] = [column_dict.get(val, default)
for val in self.labels]
return new_data
def _calculate_global(self, data):
labels = self.params['labels']
if labels == None:
labels = sorted(set(data['x'].values))
# For a lot of labels, put out a warning
if len(labels) > 30:
self._print_warning(_MSG_LABELS)
# Check if there is a mapping
self.labels = labels
|
ansible/ansible
|
refs/heads/devel
|
lib/ansible/collections/list.py
|
22
|
# (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from collections import defaultdict
from ansible.errors import AnsibleError
from ansible.collections import is_collection_path
from ansible.module_utils._text import to_bytes
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.display import Display
display = Display()
def list_valid_collection_paths(search_paths=None, warn=False):
"""
Filter out non existing or invalid search_paths for collections
:param search_paths: list of text-string paths, if none load default config
:param warn: display warning if search_path does not exist
:return: subset of original list
"""
if search_paths is None:
search_paths = []
search_paths.extend(AnsibleCollectionConfig.collection_paths)
for path in search_paths:
b_path = to_bytes(path)
if not os.path.exists(b_path):
# warn for missing, but not if default
if warn:
display.warning("The configured collection path {0} does not exist.".format(path))
continue
if not os.path.isdir(b_path):
if warn:
display.warning("The configured collection path {0}, exists, but it is not a directory.".format(path))
continue
yield path
def list_collection_dirs(search_paths=None, coll_filter=None):
"""
Return paths for the specific collections found in passed or configured search paths
:param search_paths: list of text-string paths, if none load default config
:param coll_filter: limit collections to just the specific namespace or collection, if None all are returned
:return: list of collection directory paths
"""
collection = None
namespace = None
if coll_filter is not None:
if '.' in coll_filter:
try:
(namespace, collection) = coll_filter.split('.')
except ValueError:
raise AnsibleError("Invalid collection pattern supplied: %s" % coll_filter)
else:
namespace = coll_filter
collections = defaultdict(dict)
for path in list_valid_collection_paths(search_paths):
if os.path.basename(path) != 'ansible_collections':
path = os.path.join(path, 'ansible_collections')
b_coll_root = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_coll_root) and os.path.isdir(b_coll_root):
if namespace is None:
namespaces = os.listdir(b_coll_root)
else:
namespaces = [namespace]
for ns in namespaces:
b_namespace_dir = os.path.join(b_coll_root, to_bytes(ns))
if os.path.isdir(b_namespace_dir):
if collection is None:
colls = os.listdir(b_namespace_dir)
else:
colls = [collection]
for mycoll in colls:
# skip dupe collections as they will be masked in execution
if mycoll not in collections[ns]:
b_coll = to_bytes(mycoll)
b_coll_dir = os.path.join(b_namespace_dir, b_coll)
if is_collection_path(b_coll_dir):
collections[ns][mycoll] = b_coll_dir
yield b_coll_dir
|
chunyang-wen/dynet
|
refs/heads/master
|
examples/tensorboard/rnnlm-batch.py
|
6
|
import dynet as dy
import time
import random
from pycrayon import CrayonClient
LAYERS = 2
INPUT_DIM = 256 #50 #256
HIDDEN_DIM = 256 # 50 #1024
VOCAB_SIZE = 0
MB_SIZE = 50 # mini batch size
import argparse
from collections import defaultdict
from itertools import count
import sys
import util
class RNNLanguageModel:
def __init__(self, model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder):
# Char-level LSTM (layers=2, input=256, hidden=128, model)
self.builder = builder(LAYERS, INPUT_DIM, HIDDEN_DIM, model)
# Lookup parameters for word embeddings
self.lookup = model.add_lookup_parameters((VOCAB_SIZE, INPUT_DIM))
# Softmax weights/biases on top of LSTM outputs
self.R = model.add_parameters((VOCAB_SIZE, HIDDEN_DIM))
self.bias = model.add_parameters((VOCAB_SIZE))
# Build the language model graph
def BuildLMGraph(self, sents):
dy.renew_cg()
# initialize the RNN
init_state = self.builder.initial_state()
# parameters -> expressions
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
S = vocab.w2i["<s>"]
# get the cids and masks for each step
tot_chars = 0
cids = []
masks = []
for i in range(len(sents[0])):
cids.append([(vocab.w2i[sent[i]] if len(sent) > i else S) for sent in sents])
mask = [(1 if len(sent)>i else 0) for sent in sents]
masks.append(mask)
tot_chars += sum(mask)
# start the rnn with "<s>"
init_ids = cids[0]
s = init_state.add_input(lookup_batch(self.lookup, init_ids))
losses = []
# feed char vectors into the RNN and predict the next char
for cid, mask in zip(cids[1:], masks[1:]):
score = dy.affine_transform([bias, R, s.output()])
loss = dy.pickneglogsoftmax_batch(score, cid)
# mask the loss if at least one sentence is shorter
if mask[-1] != 1:
mask_expr = dy.inputVector(mask)
mask_expr = dy.reshape(mask_expr, (1,), len(sents))
loss = loss * mask_expr
losses.append(loss)
# update the state of the RNN
cemb = dy.lookup_batch(self.lookup, cid)
s = s.add_input(cemb)
return dy.sum_batches(dy.esum(losses)), tot_chars
def sample(self, first=1, nchars=0, stop=-1):
res = [first]
dy.renew_cg()
state = self.builder.initial_state()
R = dy.parameter(self.R)
bias = dy.parameter(self.bias)
cw = first
while True:
x_t = dy.lookup(self.lookup, cw)
state = state.add_input(x_t)
y_t = state.output()
r_t = bias + (R * y_t)
ydist = dy.softmax(r_t)
dist = ydist.vec_value()
rnd = random.random()
for i,p in enumerate(dist):
rnd -= p
if rnd <= 0: break
res.append(i)
cw = i
if cw == stop: break
if nchars and len(res) > nchars: break
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('corpus', help='Path to the corpus file.')
parser.add_argument('crayserver', help='Server location for crayon.')
parser.add_argument('expname', help='Experiment name')
args = parser.parse_args()
# Connect to the server
cc = CrayonClient(hostname=args.crayserver)
#Create a new experiment
myexp = cc.create_experiment(args.expname)
train = util.CharsCorpusReader(args.corpus, begin="<s>")
vocab = util.Vocab.from_corpus(train)
VOCAB_SIZE = vocab.size()
model = dy.ParameterCollection()
trainer = dy.SimpleSGDTrainer(model)
#lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.SimpleRNNBuilder)
lm = RNNLanguageModel(model, LAYERS, INPUT_DIM, HIDDEN_DIM, VOCAB_SIZE, builder=dy.LSTMBuilder)
train = list(train)
# Sort training sentences in descending order and count minibatches
train.sort(key=lambda x: -len(x))
train_order = [x*MB_SIZE for x in range(int((len(train)-1)/MB_SIZE + 1))]
# Perform training
i = 0
chars = loss = 0.0
for ITER in range(100):
random.shuffle(train_order)
#_start = time.time()
for sid in train_order:
i += 1
#if i % int(50) == 0:
trainer.status()
if chars > 0: print(loss / chars,)
for _ in range(1):
samp = lm.sample(first=vocab.w2i["<s>"],stop=vocab.w2i["\n"])
print("".join([vocab.i2w[c] for c in samp]).strip())
loss = 0.0
chars = 0.0
# train on the minibatch
errs, mb_chars = lm.BuildLMGraph(train[sid: sid + MB_SIZE])
loss += errs.scalar_value()
# Add a scalar value to the experiment for the set of data points named loss evolution
myexp.add_scalar_value("lossevolution", loss)
chars += mb_chars
errs.backward()
trainer.update()
#print "TM:",(time.time() - _start)/chars
print("ITER",ITER,loss)
#print(loss / chars,)
#print "TM:",(time.time() - _start)/len(train)
trainer.status()
# To save the experiment
filename = myexp.to_zip()
print("Save tensorboard experiment at {}".format(filename))
|
odoomrp/odoomrp-wip
|
refs/heads/8.0
|
mrp_subcontracting/models/procurement_order.py
|
25
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
mrp_operation = fields.Many2one(
'mrp.production.workcenter.line', 'MRP Operation')
@api.multi
def make_po(self):
purchase_line_obj = self.env['purchase.order.line']
res = super(ProcurementOrder, self).make_po()
for procurement in self:
if res[procurement.id]:
purchase_line = purchase_line_obj.browse(res[procurement.id])
if (procurement.mrp_operation and
(not purchase_line.order_id.mrp_operation or
procurement.mrp_operation.id !=
purchase_line.order_id.mrp_operation.id)):
purchase_line.order_id.mrp_operation = (
procurement.mrp_operation.id)
procurement.mrp_operation.purchase_order = (
purchase_line.order_id.id)
return res
|
mzdaniel/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/django/http/__init__.py
|
48
|
import datetime
import os
import re
import time
from pprint import pformat
from urllib import urlencode, quote
from urlparse import urljoin
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
# The mod_python version is more efficient, so try importing it first.
from mod_python.util import parse_qsl
except ImportError:
try:
# Python 2.6 and greater
from urlparse import parse_qsl
except ImportError:
# Python 2.5, 2.4. Works on Python 2.6 but raises
# PendingDeprecationWarning
from cgi import parse_qsl
import Cookie
# httponly support exists in Python 2.6's Cookie library,
# but not in Python 2.4 or 2.5.
_morsel_supports_httponly = Cookie.Morsel._reserved.has_key('httponly')
# Some versions of Python 2.7 and later won't need this encoding bug fix:
_cookie_encodes_correctly = Cookie.SimpleCookie().value_encode(';') == (';', '"\\073"')
# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
_tc = Cookie.SimpleCookie()
_tc.load('f:oo')
_cookie_allows_colon_in_names = 'Set-Cookie: f:oo=' in _tc.output()
if _morsel_supports_httponly and _cookie_encodes_correctly and _cookie_allows_colon_in_names:
SimpleCookie = Cookie.SimpleCookie
else:
if not _morsel_supports_httponly:
class Morsel(Cookie.Morsel):
def __setitem__(self, K, V):
K = K.lower()
if K == "httponly":
if V:
# The superclass rejects httponly as a key,
# so we jump to the grandparent.
super(Cookie.Morsel, self).__setitem__(K, V)
else:
super(Morsel, self).__setitem__(K, V)
def OutputString(self, attrs=None):
output = super(Morsel, self).OutputString(attrs)
if "httponly" in self:
output += "; httponly"
return output
class SimpleCookie(Cookie.SimpleCookie):
if not _morsel_supports_httponly:
def __set(self, key, real_value, coded_value):
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
if not _cookie_encodes_correctly:
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(SimpleCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",","\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
if not _cookie_allows_colon_in_names:
def load(self, rawdata, ignore_parse_errors=False):
if ignore_parse_errors:
self.bad_cookies = []
self._BaseCookie__set = self._loose_set
super(SimpleCookie, self).load(rawdata)
if ignore_parse_errors:
self._BaseCookie__set = self._strict_set
for key in self.bad_cookies:
del self[key]
_strict_set = Cookie.BaseCookie._BaseCookie__set
def _loose_set(self, key, real_value, coded_value):
try:
self._strict_set(key, real_value, coded_value)
except Cookie.CookieError:
self.bad_cookies.append(key)
dict.__setitem__(self, key, None)
class CompatCookie(SimpleCookie):
def __init__(self, *args, **kwargs):
super(CompatCookie, self).__init__(*args, **kwargs)
import warnings
warnings.warn("CompatCookie is deprecated, use django.http.SimpleCookie instead.",
PendingDeprecationWarning)
from django.utils.datastructures import MultiValueDict, ImmutableList
from django.utils.encoding import smart_str, iri_to_uri, force_unicode
from django.utils.http import cookie_date
from django.http.multipartparser import MultiPartParser
from django.conf import settings
from django.core.files import uploadhandler
from utils import *
RESERVED_CHARS="!*'();:@&=+$,/?%#[]"
absolute_http_url_re = re.compile(r"^https?://", re.I)
class Http404(Exception):
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
self.path = ''
self.path_info = ''
self.method = None
def __repr__(self):
return '<HttpRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
(pformat(self.GET), pformat(self.POST), pformat(self.COOKIES),
pformat(self.META))
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != (self.is_secure() and '443' or '80'):
host = '%s:%s' % (host, server_port)
return host
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '')
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no location is specified, the absolute URI is built on
``request.get_full_path()``.
"""
if not location:
location = self.get_full_path()
if not absolute_http_url_re.match(location):
current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http',
self.get_host(), self.path)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def is_secure(self):
return os.environ.get("HTTPS") == "on"
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
def _set_encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _get_encoding(self):
return self._encoding
encoding = property(_get_encoding, _set_encoding)
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
def _set_upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def _get_upload_handlers(self):
if not self._upload_handlers:
# If thre are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
upload_handlers = property(_get_upload_handlers, _set_upload_handlers)
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning = "You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
def _get_raw_post_data(self):
if not hasattr(self, '_raw_post_data'):
if self._read_started:
raise Exception("You cannot access raw_post_data after reading from request's data stream")
try:
content_length = int(self.META.get('CONTENT_LENGTH', 0))
except (ValueError, TypeError):
# If CONTENT_LENGTH was empty string or not an integer, don't
# error out. We've also seen None passed in here (against all
# specs, but see ticket #8259), so we handle TypeError as well.
content_length = 0
if content_length:
self._raw_post_data = self.read(content_length)
else:
self._raw_post_data = self.read()
self._stream = StringIO(self._raw_post_data)
return self._raw_post_data
raw_post_data = property(_get_raw_post_data)
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
# Populates self._post and self._files
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_raw_post_data'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart'):
if hasattr(self, '_raw_post_data'):
# Use already read data
data = StringIO(self._raw_post_data)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except:
# An error occured while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occured. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
else:
self._post, self._files = QueryDict(self.raw_post_data, encoding=self._encoding), MultiValueDict()
## File-like and iterator interface.
##
## Expects self._stream to be set to an appropriate source of bytes by
## a corresponding request subclass (WSGIRequest or ModPythonRequest).
## Also when request data has already been read by request.POST or
## request.raw_post_data, self._stream points to a StringIO instance
## containing that data.
def read(self, *args, **kwargs):
self._read_started = True
return self._stream.read(*args, **kwargs)
def readline(self, *args, **kwargs):
self._read_started = True
return self._stream.readline(*args, **kwargs)
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict that takes a query string when initialized.
This is immutable unless you create a copy of it.
Values retrieved from this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string, mutable=False, encoding=None):
MultiValueDict.__init__(self)
if not encoding:
# *Important*: do not import settings any earlier because of note
# in core.handlers.modpython.
from django.conf import settings
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
for key, value in parse_qsl((query_string or ''), True): # keep_blank_values=True
self.appendlist(force_unicode(key, encoding, errors='replace'),
force_unicode(value, encoding, errors='replace'))
self._mutable = mutable
def _get_encoding(self):
if self._encoding is None:
# *Important*: do not import settings at the module level because
# of the note in core.handlers.modpython.
from django.conf import settings
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
def _set_encoding(self, value):
self._encoding = value
encoding = property(_get_encoding, _set_encoding)
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.__setitem__(self, key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in dict.items(self):
dict.__setitem__(result, key, value)
return result
def __deepcopy__(self, memo):
import django.utils.copycompat as copy
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
list_ = [str_to_unicode(elt, self.encoding) for elt in list_]
MultiValueDict.setlist(self, key, list_)
def setlistdefault(self, key, default_list=()):
self._assert_mutable()
if key not in self:
self.setlist(key, default_list)
return MultiValueDict.getlist(self, key)
def appendlist(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.appendlist(self, key, value)
def update(self, other_dict):
self._assert_mutable()
f = lambda s: str_to_unicode(s, self.encoding)
if hasattr(other_dict, 'lists'):
for key, valuelist in other_dict.lists():
for value in valuelist:
MultiValueDict.update(self, {f(key): f(value)})
else:
d = dict([(f(k), f(v)) for k, v in other_dict.items()])
MultiValueDict.update(self, d)
def pop(self, key, *args):
self._assert_mutable()
return MultiValueDict.pop(self, key, *args)
def popitem(self):
self._assert_mutable()
return MultiValueDict.popitem(self)
def clear(self):
self._assert_mutable()
MultiValueDict.clear(self)
def setdefault(self, key, default=None):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
default = str_to_unicode(default, self.encoding)
return MultiValueDict.setdefault(self, key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = smart_str(k, self.encoding)
output.extend([encode(k, smart_str(v, self.encoding))
for v in list_])
return '&'.join(output)
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, Cookie.BaseCookie):
try:
c = SimpleCookie()
c.load(cookie, ignore_parse_errors=True)
except Cookie.CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
class BadHeaderError(ValueError):
pass
class HttpResponse(object):
"""A basic HTTP response, with content and dictionary-accessed headers."""
status_code = 200
def __init__(self, content='', mimetype=None, status=None,
content_type=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._charset = settings.DEFAULT_CHARSET
if mimetype:
content_type = mimetype # For backwards compatibility
if not content_type:
content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE,
self._charset)
if not isinstance(content, basestring) and hasattr(content, '__iter__'):
self._container = content
self._is_string = False
else:
self._container = [content]
self._is_string = True
self.cookies = SimpleCookie()
if status:
self.status_code = status
self['Content-Type'] = content_type
def __str__(self):
"""Full HTTP message, including headers."""
return '\n'.join(['%s: %s' % (key, value)
for key, value in self._headers.values()]) \
+ '\n\n' + self.content
def _convert_to_ascii(self, *values):
"""Converts all values to ascii strings."""
for value in values:
if isinstance(value, unicode):
try:
value = value.encode('us-ascii')
except UnicodeError, e:
e.reason += ', HTTP response headers must be in US-ASCII format'
raise
else:
value = str(value)
if '\n' in value or '\r' in value:
raise BadHeaderError("Header values can't contain newlines (got %r)" % (value))
yield value
def __setitem__(self, header, value):
header, value = self._convert_to_ascii(header, value)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def has_header(self, header):
"""Case-insensitive check for a header."""
return self._headers.has_key(header.lower())
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
Sets a cookie.
``expires`` can be a string in the correct format or a
``datetime.datetime`` object in UTC. If ``expires`` is a datetime
object then ``max_age`` will be calculated.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
def _get_content(self):
if self.has_header('Content-Encoding'):
return ''.join(self._container)
return smart_str(''.join(self._container), self._charset)
def _set_content(self, value):
self._container = [value]
self._is_string = True
content = property(_get_content, _set_content)
def __iter__(self):
self._iterator = iter(self._container)
return self
def next(self):
chunk = self._iterator.next()
if isinstance(chunk, unicode):
chunk = chunk.encode(self._charset)
return str(chunk)
def close(self):
if hasattr(self._container, 'close'):
self._container.close()
# The remaining methods partially implement the file-like object interface.
# See http://docs.python.org/lib/bltin-file-objects.html
def write(self, content):
if not self._is_string:
raise Exception("This %s instance is not writable" % self.__class__)
self._container.append(content)
def flush(self):
pass
def tell(self):
if not self._is_string:
raise Exception("This %s instance cannot tell its position" % self.__class__)
return sum([len(chunk) for chunk in self._container])
class HttpResponseRedirect(HttpResponse):
status_code = 302
def __init__(self, redirect_to):
super(HttpResponseRedirect, self).__init__()
self['Location'] = iri_to_uri(redirect_to)
class HttpResponsePermanentRedirect(HttpResponse):
status_code = 301
def __init__(self, redirect_to):
super(HttpResponsePermanentRedirect, self).__init__()
self['Location'] = iri_to_uri(redirect_to)
class HttpResponseNotModified(HttpResponse):
status_code = 304
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods):
super(HttpResponseNotAllowed, self).__init__()
self['Allow'] = ', '.join(permitted_methods)
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
# A backwards compatible alias for HttpRequest.get_host.
def get_host(request):
return request.get_host()
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_unicode for parsing URLs and form inputs. Thus,
# this slightly more restricted function.
def str_to_unicode(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, str):
return unicode(s, encoding, 'replace')
else:
return s
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.