repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
saltduck/python-bitcoinlib | refs/heads/master | examples/spend-p2sh-txout.py | 19 | #!/usr/bin/env python3
# Copyright (C) 2014 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
"""Low-level example of how to spend a P2SH/BIP16 txout"""
import sys
if sys.version_info.major < 3:
sys.stderr.write('Sorry, Python 3.x required by this example.\n')
sys.exit(1)
import hashlib
from bitcoin import SelectParams
from bitcoin.core import b2x, lx, COIN, COutPoint, CMutableTxOut, CMutableTxIn, CMutableTransaction, Hash160
from bitcoin.core.script import CScript, OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG, SignatureHash, SIGHASH_ALL
from bitcoin.core.scripteval import VerifyScript, SCRIPT_VERIFY_P2SH
from bitcoin.wallet import CBitcoinAddress, CBitcoinSecret
SelectParams('mainnet')
# Create the (in)famous correct brainwallet secret key.
h = hashlib.sha256(b'correct horse battery staple').digest()
seckey = CBitcoinSecret.from_secret_bytes(h)
# Create a redeemScript. Similar to a scriptPubKey the redeemScript must be
# satisfied for the funds to be spent.
txin_redeemScript = CScript([seckey.pub, OP_CHECKSIG])
print(b2x(txin_redeemScript))
# Create the magic P2SH scriptPubKey format from that redeemScript. You should
# look at the CScript.to_p2sh_scriptPubKey() function in bitcoin.core.script to
# understand what's happening, as well as read BIP16:
# https://github.com/bitcoin/bips/blob/master/bip-0016.mediawiki
txin_scriptPubKey = txin_redeemScript.to_p2sh_scriptPubKey()
# Convert the P2SH scriptPubKey to a base58 Bitcoin address and print it.
# You'll need to send some funds to it to create a txout to spend.
txin_p2sh_address = CBitcoinAddress.from_scriptPubKey(txin_scriptPubKey)
print('Pay to:',str(txin_p2sh_address))
# Same as the txid:vout the createrawtransaction RPC call requires
#
# lx() takes *little-endian* hex and converts it to bytes; in Bitcoin
# transaction hashes are shown little-endian rather than the usual big-endian.
# There's also a corresponding x() convenience function that takes big-endian
# hex and converts it to bytes.
txid = lx('bff785da9f8169f49be92fa95e31f0890c385bfb1bd24d6b94d7900057c617ae')
vout = 0
# Create the txin structure, which includes the outpoint. The scriptSig
# defaults to being empty.
txin = CMutableTxIn(COutPoint(txid, vout))
# Create the txout. This time we create the scriptPubKey from a Bitcoin
# address.
txout = CMutableTxOut(0.0005*COIN, CBitcoinAddress('323uf9MgLaSn9T7vDaK1cGAZ2qpvYUuqSp').to_scriptPubKey())
# Create the unsigned transaction.
tx = CMutableTransaction([txin], [txout])
# Calculate the signature hash for that transaction. Note how the script we use
# is the redeemScript, not the scriptPubKey. That's because when the CHECKSIG
# operation happens EvalScript() will be evaluating the redeemScript, so the
# corresponding SignatureHash() function will use that same script when it
# replaces the scriptSig in the transaction being hashed with the script being
# executed.
sighash = SignatureHash(txin_redeemScript, tx, 0, SIGHASH_ALL)
# Now sign it. We have to append the type of signature we want to the end, in
# this case the usual SIGHASH_ALL.
sig = seckey.sign(sighash) + bytes([SIGHASH_ALL])
# Set the scriptSig of our transaction input appropriately.
txin.scriptSig = CScript([sig, txin_redeemScript])
# Verify the signature worked. This calls EvalScript() and actually executes
# the opcodes in the scripts to see if everything worked out. If it doesn't an
# exception will be raised.
VerifyScript(txin.scriptSig, txin_scriptPubKey, tx, 0, (SCRIPT_VERIFY_P2SH,))
# Done! Print the transaction to standard output with the bytes-to-hex
# function.
print(b2x(tx.serialize()))
|
2013Commons/HUE-SHARK | refs/heads/master | desktop/core/ext-py/Django-1.2.3/build/lib.linux-i686-2.7/django/conf/locale/lv/formats.py | 35 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = r'Y. \g\a\d\a j. F'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'Y. \g\a\d\a j. F, H:i:s'
YEAR_MONTH_FORMAT = r'Y. \g. F'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = r'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 #Monday
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59'
'%d.%m.%y %H.%M', # '25.10.06 14.30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
|
sampadsaha5/sympy | refs/heads/master | sympy/polys/tests/test_partfrac.py | 29 | """Tests for algorithms for partial fraction decomposition of rational
functions. """
from sympy.polys.partfrac import (
apart_undetermined_coeffs,
apart,
apart_list, assemble_partfrac_list
)
from sympy import (S, Poly, E, pi, I, Matrix, Eq, RootSum, Lambda,
Symbol, Dummy, factor, together, sqrt, Expr)
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import x, y, a, b, c
def test_apart():
assert apart(1) == 1
assert apart(1, x) == 1
f, g = (x**2 + 1)/(x + 1), 2/(x + 1) + x - 1
assert apart(f, full=False) == g
assert apart(f, full=True) == g
f, g = 1/(x + 2)/(x + 1), 1/(1 + x) - 1/(2 + x)
assert apart(f, full=False) == g
assert apart(f, full=True) == g
f, g = 1/(x + 1)/(x + 5), -1/(5 + x)/4 + 1/(1 + x)/4
assert apart(f, full=False) == g
assert apart(f, full=True) == g
assert apart((E*x + 2)/(x - pi)*(x - 1), x) == \
2 - E + E*pi + E*x + (E*pi + 2)*(pi - 1)/(x - pi)
assert apart(Eq((x**2 + 1)/(x + 1), x), x) == Eq(x - 1 + 2/(x + 1), x)
raises(NotImplementedError, lambda: apart(1/(x + 1)/(y + 2)))
def test_apart_matrix():
M = Matrix(2, 2, lambda i, j: 1/(x + i + 1)/(x + j))
assert apart(M) == Matrix([
[1/x - 1/(x + 1), (x + 1)**(-2)],
[1/(2*x) - (S(1)/2)/(x + 2), 1/(x + 1) - 1/(x + 2)],
])
def test_apart_symbolic():
f = a*x**4 + (2*b + 2*a*c)*x**3 + (4*b*c - a**2 + a*c**2)*x**2 + \
(-2*a*b + 2*b*c**2)*x - b**2
g = a**2*x**4 + (2*a*b + 2*c*a**2)*x**3 + (4*a*b*c + b**2 +
a**2*c**2)*x**2 + (2*c*b**2 + 2*a*b*c**2)*x + b**2*c**2
assert apart(f/g, x) == 1/a - 1/(x + c)**2 - b**2/(a*(a*x + b)**2)
assert apart(1/((x + a)*(x + b)*(x + c)), x) == \
1/((a - c)*(b - c)*(c + x)) - 1/((a - b)*(b - c)*(b + x)) + \
1/((a - b)*(a - c)*(a + x))
def test_apart_extension():
f = 2/(x**2 + 1)
g = I/(x + I) - I/(x - I)
assert apart(f, extension=I) == g
assert apart(f, gaussian=True) == g
f = x/((x - 2)*(x + I))
assert factor(together(apart(f))) == f
def test_apart_full():
f = 1/(x**2 + 1)
assert apart(f, full=False) == f
assert apart(f, full=True) == \
-RootSum(x**2 + 1, Lambda(a, a/(x - a)), auto=False)/2
f = 1/(x**3 + x + 1)
assert apart(f, full=False) == f
assert apart(f, full=True) == \
RootSum(x**3 + x + 1,
Lambda(a, (6*a**2/31 - 9*a/31 + S(4)/31)/(x - a)), auto=False)
f = 1/(x**5 + 1)
assert apart(f, full=False) == \
(-S(1)/5)*((x**3 - 2*x**2 + 3*x - 4)/(x**4 - x**3 + x**2 -
x + 1)) + (S(1)/5)/(x + 1)
assert apart(f, full=True) == \
-RootSum(x**4 - x**3 + x**2 - x + 1,
Lambda(a, a/(x - a)), auto=False)/5 + (S(1)/5)/(x + 1)
def test_apart_undetermined_coeffs():
p = Poly(2*x - 3)
q = Poly(x**9 - x**8 - x**6 + x**5 - 2*x**2 + 3*x - 1)
r = (-x**7 - x**6 - x**5 + 4)/(x**8 - x**5 - 2*x + 1) + 1/(x - 1)
assert apart_undetermined_coeffs(p, q) == r
p = Poly(1, x, domain='ZZ[a,b]')
q = Poly((x + a)*(x + b), x, domain='ZZ[a,b]')
r = 1/((a - b)*(b + x)) - 1/((a - b)*(a + x))
assert apart_undetermined_coeffs(p, q) == r
def test_apart_list():
from sympy.utilities.iterables import numbered_symbols
w0, w1, w2 = Symbol("w0"), Symbol("w1"), Symbol("w2")
_a = Dummy("a")
f = (-2*x - 2*x**2) / (3*x**2 - 6*x)
assert apart_list(f, x, dummies=numbered_symbols("w")) == (-1,
Poly(S(2)/3, x, domain='QQ'),
[(Poly(w0 - 2, w0, domain='ZZ'), Lambda(_a, 2), Lambda(_a, -_a + x), 1)])
assert apart_list(2/(x**2-2), x, dummies=numbered_symbols("w")) == (1,
Poly(0, x, domain='ZZ'),
[(Poly(w0**2 - 2, w0, domain='ZZ'),
Lambda(_a, _a/2),
Lambda(_a, -_a + x), 1)])
f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
assert apart_list(f, x, dummies=numbered_symbols("w")) == (1,
Poly(0, x, domain='ZZ'),
[(Poly(w0 - 2, w0, domain='ZZ'), Lambda(_a, 4), Lambda(_a, -_a + x), 1),
(Poly(w1**2 - 1, w1, domain='ZZ'), Lambda(_a, -3*_a - 6), Lambda(_a, -_a + x), 2),
(Poly(w2 + 1, w2, domain='ZZ'), Lambda(_a, -4), Lambda(_a, -_a + x), 1)])
def test_assemble_partfrac_list():
f = 36 / (x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2)
pfd = apart_list(f)
assert assemble_partfrac_list(pfd) == -4/(x + 1) - 3/(x + 1)**2 - 9/(x - 1)**2 + 4/(x - 2)
a = Dummy("a")
pfd = (1, Poly(0, x, domain='ZZ'), [([sqrt(2),-sqrt(2)], Lambda(a, a/2), Lambda(a, -a + x), 1)])
assert assemble_partfrac_list(pfd) == -1/(sqrt(2)*(x + sqrt(2))) + 1/(sqrt(2)*(x - sqrt(2)))
@XFAIL
def test_noncommutative_pseudomultivariate():
# apart doesn't go inside noncommutative expressions
class foo(Expr):
is_commutative=False
e = x/(x + x*y)
c = 1/(1 + y)
assert apart(e + foo(e)) == c + foo(c)
assert apart(e*foo(e)) == c*foo(c)
def test_noncommutative():
class foo(Expr):
is_commutative=False
e = x/(x + x*y)
c = 1/(1 + y)
assert apart(e + foo()) == c + foo()
def test_issue_5798():
assert apart(
2*x/(x**2 + 1) - (x - 1)/(2*(x**2 + 1)) + 1/(2*(x + 1)) - 2/x) == \
(3*x + 1)/(x**2 + 1)/2 + 1/(x + 1)/2 - 2/x
|
kmoocdev2/edx-platform | refs/heads/real_2019 | lms/djangoapps/certificates/management/commands/regenerate_user.py | 15 | """Django management command to force certificate regeneration for one user"""
import copy
import logging
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from opaque_keys.edx.keys import CourseKey
from six import text_type
from badges.events.course_complete import get_completion_badge
from badges.utils import badges_enabled
from lms.djangoapps.certificates.api import regenerate_user_certificates
from xmodule.modulestore.django import modulestore
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Management command to recreate the certificate for
a given user in a given course.
"""
help = """Put a request on the queue to recreate the certificate for a particular user in a particular course."""
def add_arguments(self, parser):
parser.add_argument('-n', '--noop',
action='store_true',
dest='noop',
help="Don't grade or add certificate requests to the queue")
parser.add_argument('--insecure',
action='store_true',
dest='insecure',
help="Don't use https for the callback url to the LMS, useful in http test environments")
parser.add_argument('-c', '--course',
metavar='COURSE_ID',
dest='course',
required=True,
help='The course id (e.g., mit/6-002x/circuits-and-electronics) for which the student '
'named in <username> should be graded')
parser.add_argument('-u', '--user',
metavar='USERNAME',
dest='username',
required=True,
help='The username or email address for whom grading and certification should be requested')
parser.add_argument('-G', '--grade',
metavar='GRADE',
dest='grade_value',
default=None,
help='The grade string, such as "Distinction", which is passed to the certificate agent')
parser.add_argument('-T', '--template',
metavar='TEMPLATE',
dest='template_file',
default=None,
help='The template file used to render this certificate, like "QMSE01-distinction.pdf"')
def handle(self, *args, **options):
# Scrub the username from the log message
cleaned_options = copy.copy(options)
if 'username' in cleaned_options:
cleaned_options['username'] = '<USERNAME>'
LOGGER.info(
(
u"Starting to create tasks to regenerate certificates "
u"with arguments %s and options %s"
),
text_type(args),
text_type(cleaned_options)
)
# try to parse out the course from the serialized form
course_id = CourseKey.from_string(options['course'])
user = options['username']
if '@' in user:
student = User.objects.get(email=user, courseenrollment__course_id=course_id)
else:
student = User.objects.get(username=user, courseenrollment__course_id=course_id)
course = modulestore().get_course(course_id, depth=2)
if not options['noop']:
LOGGER.info(
(
u"Adding task to the XQueue to generate a certificate "
u"for student %s in course '%s'."
),
student.id,
course_id
)
if badges_enabled() and course.issue_badges:
badge_class = get_completion_badge(course_id, student)
badge = badge_class.get_for_user(student)
if badge:
badge.delete()
LOGGER.info(u"Cleared badge for student %s.", student.id)
# Add the certificate request to the queue
ret = regenerate_user_certificates(
student, course_id, course=course,
forced_grade=options['grade_value'],
template_file=options['template_file'],
insecure=options['insecure']
)
LOGGER.info(
(
u"Added a certificate regeneration task to the XQueue "
u"for student %s in course '%s'. "
u"The new certificate status is '%s'."
),
student.id,
text_type(course_id),
ret
)
else:
LOGGER.info(
(
u"Skipping certificate generation for "
u"student %s in course '%s' "
u"because the noop flag is set."
),
student.id,
text_type(course_id)
)
LOGGER.info(
(
u"Finished regenerating certificates command for "
u"user %s and course '%s'."
),
student.id,
text_type(course_id)
)
|
FlorentChamault/My_sickbeard | refs/heads/master | lib/bencode/BTL.py | 278 | class BTFailure(Exception):
pass
|
2014c2g5/cd0505 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/unittest/suite.py | 748 | """TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda: None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
def __init__(self, tests=()):
self._tests = []
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __ne__(self, other):
return not self == other
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = 0
for test in self:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not callable(test):
raise TypeError("{} is not callable".format(repr(test)))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, str):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for test in self:
if result.shouldStop:
break
test(result)
return result
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for test in self:
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if not debug:
test(result)
else:
test.debug()
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
################################
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
# Inspired by the ErrorHolder from Twisted:
# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
# attribute used by TestResult._exc_info_to_string
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return "<ErrorHolder description=%r>" % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
# could call result.addError(...) - but this test-like object
# shouldn't be run anyway
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
|
IPVL/Arun-SocketExample | refs/heads/master | __init__.py | 1 | __author__ = 'arun'
|
allotria/intellij-community | refs/heads/master | python/testData/paramInfo/ReassignedInstanceMethod.py | 83 | class Foo:
def moo(self, a, b, c):
pass
foo = Foo()
qoo = foo
qoo.moo(<arg1>1, <arg2>2, <arg3>"bamboo")
|
google-research/language | refs/heads/master | language/labs/exemplar_decoding/__init__.py | 8 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
|
ybellavance/python-for-android | refs/heads/master | python3-alpha/python3-src/Tools/i18n/makelocalealias.py | 48 | #!/usr/bin/env python3
"""
Convert the X11 locale.alias file into a mapping dictionary suitable
for locale.py.
Written by Marc-Andre Lemburg <mal@genix.com>, 2004-12-10.
"""
import locale
# Location of the alias file
LOCALE_ALIAS = '/usr/share/X11/locale/locale.alias'
def parse(filename):
f = open(filename)
lines = f.read().splitlines()
data = {}
for line in lines:
line = line.strip()
if not line:
continue
if line[:1] == '#':
continue
locale, alias = line.split()
# Strip ':'
if locale[-1] == ':':
locale = locale[:-1]
# Lower-case locale
locale = locale.lower()
# Ignore one letter locale mappings (except for 'c')
if len(locale) == 1 and locale != 'c':
continue
# Normalize encoding, if given
if '.' in locale:
lang, encoding = locale.split('.')[:2]
encoding = encoding.replace('-', '')
encoding = encoding.replace('_', '')
locale = lang + '.' + encoding
if encoding.lower() == 'utf8':
# Ignore UTF-8 mappings - this encoding should be
# available for all locales
continue
data[locale] = alias
return data
def pprint(data):
items = sorted(data.items())
for k, v in items:
print(' %-40s%r,' % ('%r:' % k, v))
def print_differences(data, olddata):
items = sorted(olddata.items())
for k, v in items:
if k not in data:
print('# removed %r' % k)
elif olddata[k] != data[k]:
print('# updated %r -> %r to %r' % \
(k, olddata[k], data[k]))
# Additions are not mentioned
if __name__ == '__main__':
data = locale.locale_alias.copy()
data.update(parse(LOCALE_ALIAS))
print_differences(data, locale.locale_alias)
print()
print('locale_alias = {')
pprint(data)
print('}')
|
sandeepkoduri/GAE-html-to-pdf | refs/heads/master | libs/PIL/FpxImagePlugin.py | 14 | #
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library.
# $Id$
#
# FlashPix support for PIL
#
# History:
# 97-01-25 fl Created (reads uncompressed RGB images only)
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from PIL import Image, ImageFile, _binary
import olefile
__version__ = "0.1"
i32 = _binary.i32le
i8 = _binary.i8
# we map from colour field tuples to (mode, rawmode) descriptors
MODES = {
# opacity
(0x00007ffe): ("A", "L"),
# monochrome
(0x00010000,): ("L", "L"),
(0x00018000, 0x00017ffe): ("RGBA", "LA"),
# photo YCC
(0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"),
(0x00028000, 0x00028001, 0x00028002, 0x00027ffe): ("RGBA", "YCCA;P"),
# standard RGB (NIFRGB)
(0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"),
(0x00038000, 0x00038001, 0x00038002, 0x00037ffe): ("RGBA", "RGBA"),
}
#
# --------------------------------------------------------------------
def _accept(prefix):
return prefix[:8] == olefile.MAGIC
##
# Image plugin for the FlashPix images.
class FpxImageFile(ImageFile.ImageFile):
format = "FPX"
format_description = "FlashPix"
def _open(self):
#
# read the OLE directory and see if this is a likely
# to be a FlashPix file
try:
self.ole = olefile.OleFileIO(self.fp)
except IOError:
raise SyntaxError("not an FPX file; invalid OLE file")
if self.ole.root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B":
raise SyntaxError("not an FPX file; bad root CLSID")
self._open_index(1)
def _open_index(self, index=1):
#
# get the Image Contents Property Set
prop = self.ole.getproperties([
"Data Object Store %06d" % index,
"\005Image Contents"
])
# size (highest resolution)
self.size = prop[0x1000002], prop[0x1000003]
size = max(self.size)
i = 1
while size > 64:
size = size / 2
i += 1
self.maxid = i - 1
# mode. instead of using a single field for this, flashpix
# requires you to specify the mode for each channel in each
# resolution subimage, and leaves it to the decoder to make
# sure that they all match. for now, we'll cheat and assume
# that this is always the case.
id = self.maxid << 16
s = prop[0x2000002 | id]
colors = []
for i in range(i32(s, 4)):
# note: for now, we ignore the "uncalibrated" flag
colors.append(i32(s, 8+i*4) & 0x7fffffff)
self.mode, self.rawmode = MODES[tuple(colors)]
# load JPEG tables, if any
self.jpeg = {}
for i in range(256):
id = 0x3000001 | (i << 16)
if id in prop:
self.jpeg[i] = prop[id]
# print(len(self.jpeg), "tables loaded")
self._open_subimage(1, self.maxid)
def _open_subimage(self, index=1, subimage=0):
#
# setup tile descriptors for a given subimage
stream = [
"Data Object Store %06d" % index,
"Resolution %04d" % subimage,
"Subimage 0000 Header"
]
fp = self.ole.openstream(stream)
# skip prefix
fp.read(28)
# header stream
s = fp.read(36)
size = i32(s, 4), i32(s, 8)
# tilecount = i32(s, 12)
tilesize = i32(s, 16), i32(s, 20)
# channels = i32(s, 24)
offset = i32(s, 28)
length = i32(s, 32)
# print(size, self.mode, self.rawmode)
if size != self.size:
raise IOError("subimage mismatch")
# get tile descriptors
fp.seek(28 + offset)
s = fp.read(i32(s, 12) * length)
x = y = 0
xsize, ysize = size
xtile, ytile = tilesize
self.tile = []
for i in range(0, len(s), length):
compression = i32(s, i+8)
if compression == 0:
self.tile.append(("raw", (x, y, x+xtile, y+ytile),
i32(s, i) + 28, (self.rawmode)))
elif compression == 1:
# FIXME: the fill decoder is not implemented
self.tile.append(("fill", (x, y, x+xtile, y+ytile),
i32(s, i) + 28, (self.rawmode, s[12:16])))
elif compression == 2:
internal_color_conversion = i8(s[14])
jpeg_tables = i8(s[15])
rawmode = self.rawmode
if internal_color_conversion:
# The image is stored as usual (usually YCbCr).
if rawmode == "RGBA":
# For "RGBA", data is stored as YCbCrA based on
# negative RGB. The following trick works around
# this problem :
jpegmode, rawmode = "YCbCrK", "CMYK"
else:
jpegmode = None # let the decoder decide
else:
# The image is stored as defined by rawmode
jpegmode = rawmode
self.tile.append(("jpeg", (x, y, x+xtile, y+ytile),
i32(s, i) + 28, (rawmode, jpegmode)))
# FIXME: jpeg tables are tile dependent; the prefix
# data must be placed in the tile descriptor itself!
if jpeg_tables:
self.tile_prefix = self.jpeg[jpeg_tables]
else:
raise IOError("unknown/invalid compression")
x = x + xtile
if x >= xsize:
x, y = 0, y + ytile
if y >= ysize:
break # isn't really required
self.stream = stream
self.fp = None
def load(self):
if not self.fp:
self.fp = self.ole.openstream(self.stream[:2] +
["Subimage 0000 Data"])
return ImageFile.ImageFile.load(self)
#
# --------------------------------------------------------------------
Image.register_open(FpxImageFile.format, FpxImageFile, _accept)
Image.register_extension(FpxImageFile.format, ".fpx")
|
wangyum/tensorflow | refs/heads/master | tensorflow/python/ops/tensor_array_ops.py | 10 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorArray: a dynamically sized array of Tensors.
@@TensorArray
"""
# Mixture of pep8 and non-pep8 names, so disable pylint bad-name
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import tf_should_use
def _maybe_set_device(handle_op, value_t):
# NOTE(ebrevdo): Do not try this at home, kids
# _______________________________________________
# | I WILL NOT ACCESS PRIVATE METHODS ^^^^^^^^\ |
# | I WILL NOT ACCESS PRIVATE METHODS | | |
# | I WILL NOT ACCESS PRIVATE METHODS |_ __ | |
# | I WILL NOT ACCESS PRIVATE METHODS (.(. ) | |
# | I WILL NOT ACCESS PRIVATE (_ ) |
# | \\ /___/' / |
# | _\\_ \ | |
# | (( ) /====| |
# | \ <.__._- \ |
# |___________________________ <//___. ||
#
if not handle_op.device and value_t.device:
handle_op._set_device(value_t.device) # pylint: disable=protected-access
# TensorArray object accesses many of the hidden generated ops, but is
# in fact built to wrap these methods.
# pylint: disable=protected-access
class TensorArray(object):
"""Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays.
This class is meant to be used with dynamic iteration primitives such as
`while_loop` and `map_fn`. It supports gradient back-propagation via special
"flow" control flow dependencies.
"""
def __init__(self,
dtype,
size=None,
dynamic_size=None,
clear_after_read=None,
tensor_array_name=None,
handle=None,
flow=None,
infer_shape=True,
element_shape=None,
name=None):
"""Construct a new TensorArray or wrap an existing TensorArray handle.
A note about the parameter `name`:
The name of the `TensorArray` (even if passed in) is uniquified: each time
a new `TensorArray` is created at runtime it is assigned its own name for
the duration of the run. This avoids name collisions if a `TensorArray`
is created within a `while_loop`.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
clear_after_read: Boolean (optional, default: True). If True, clear
TensorArray values after reading them. This disables read-many
semantics, but allows early release of memory.
tensor_array_name: (optional) Python string: the name of the TensorArray.
This is used when creating the TensorArray handle. If this value is
set, handle should be None.
handle: (optional) A `Tensor` handle to an existing TensorArray. If this
is set, tensor_array_name should be None.
flow: (optional) A float `Tensor` scalar coming from an existing
`TensorArray.flow`.
infer_shape: (optional, default: True) If True, shape inference
is enabled. In this case, all elements must have the same shape.
element_shape: (optional, default: None) A `TensorShape` object specifying
the shape constraints of each of the elements of the TensorArray.
Need not be fully defined.
name: A name for the operation (optional).
Raises:
ValueError: if both handle and tensor_array_name are provided.
TypeError: if handle is provided but is not a Tensor.
"""
if handle is not None and tensor_array_name:
raise ValueError(
"Cannot construct with both handle and tensor_array_name")
if handle is not None and not isinstance(handle, ops.Tensor):
raise TypeError("Handle must be a Tensor")
if handle is None and size is None:
raise ValueError("Size must be provided if handle is not provided")
if handle is not None and size is not None:
raise ValueError("Cannot provide both a handle and size "
"at the same time")
if handle is not None and element_shape is not None:
raise ValueError("Cannot provide both a handle and element_shape "
"at the same time")
if handle is not None and dynamic_size is not None:
raise ValueError("Cannot provide both a handle and dynamic_size "
"at the same time")
if handle is not None and clear_after_read is not None:
raise ValueError("Cannot provide both a handle and clear_after_read "
"at the same time")
if clear_after_read is None:
clear_after_read = True
dynamic_size = dynamic_size or False
self._dtype = dtype
# Record the current static shape for the array elements. The element
# shape is defined either by `element_shape` or the shape of the tensor
# of the first write. If `infer_shape` is true, all writes checks for
# shape equality.
if element_shape is None:
self._infer_shape = infer_shape
self._element_shape = []
else:
self._infer_shape = True
self._element_shape = [tensor_shape.TensorShape(element_shape)]
with ops.name_scope(name, "TensorArray", [handle, size, flow]) as scope:
if handle is not None:
self._handle = handle
if flow is None:
raise ValueError("flow must not be None if handle is not None.")
self._flow = flow
else:
# Construct the TensorArray with an empty device. The first
# write into the TensorArray from a Tensor with a set device
# will retroactively set the device value of this op.
with ops.device(None), ops.colocate_with(None, ignore_existing=True):
self._handle, self._flow = gen_data_flow_ops._tensor_array_v3(
dtype=dtype,
size=size,
element_shape=element_shape,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
tensor_array_name=tensor_array_name,
name=scope)
@property
def flow(self):
"""The flow `Tensor` forcing ops leading to this TensorArray state."""
return self._flow
@property
def dtype(self):
"""The data type of this TensorArray."""
return self._dtype
@property
def handle(self):
"""The reference to the TensorArray."""
return self._handle
def _merge_element_shape(self, shape):
"""Changes the element shape of the array given a shape to merge with.
Args:
shape: A `TensorShape` object to merge with.
Raises:
ValueError: if the provided shape is incompatible with the current
element shape of the `TensorArray`.
"""
if self._element_shape:
if not shape.is_compatible_with(self._element_shape[0]):
raise ValueError(
"Inconsistent shapes: saw %s but expected %s "
"(and infer_shape=True)" % (shape, self._element_shape[0]))
self._element_shape[0] = self._element_shape[0].merge_with(shape)
else:
self._element_shape.append(shape)
def identity(self):
"""Returns a TensorArray with the same content and properties.
Returns:
A new TensorArray object with flow that ensures the control dependencies
from the contexts will become control dependencies for writes, reads, etc.
Use this object all for subsequent operations.
"""
flow = array_ops.identity(self._flow)
ta = TensorArray(dtype=self._dtype, handle=self._handle, flow=flow,
infer_shape=self._infer_shape)
ta._element_shape = self._element_shape
return ta
def grad(self, source, flow=None, name=None):
# tensor_array_grad requires a flow input when forward
# TensorArrays are dynamically sized. This forces the creation
# of the grad TensorArray only once the final forward array's size
# is fixed.
if flow is None:
flow = self.flow
with ops.name_scope(name, "TensorArrayGrad", [self._handle]):
with ops.colocate_with(self._handle):
g_handle, unused_flow = gen_data_flow_ops._tensor_array_grad_v3(
handle=self._handle, source=source, flow_in=flow, name=name)
with ops.control_dependencies([g_handle]):
flow = array_ops.identity(flow, name="gradient_flow")
g = TensorArray(
dtype=self._dtype,
handle=g_handle,
flow=flow,
infer_shape=self._infer_shape)
g._element_shape = self._element_shape
return g
def read(self, index, name=None):
"""Read the value at location `index` in the TensorArray.
Args:
index: 0-D. int32 tensor with the index to read from.
name: A name for the operation (optional).
Returns:
The tensor at index `index`.
"""
with ops.colocate_with(self._handle):
value = gen_data_flow_ops._tensor_array_read_v3(
handle=self._handle,
index=index,
flow_in=self._flow,
dtype=self._dtype,
name=name)
if self._element_shape:
value.set_shape(self._element_shape[0].dims)
return value
@tf_should_use.should_use_result
def write(self, index, value, name=None):
"""Write `value` into index `index` of the TensorArray.
Args:
index: 0-D. int32 scalar with the index to write to.
value: N-D. Tensor of type `dtype`. The Tensor to write to this index.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the write occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if there are more writers than specified.
"""
with ops.name_scope(name, "TensorArrayWrite", [self._handle, index, value]):
value = ops.convert_to_tensor(value, name="value")
_maybe_set_device(self._handle.op, value)
with ops.colocate_with(self._handle):
flow_out = gen_data_flow_ops._tensor_array_write_v3(
handle=self._handle,
index=index,
value=value,
flow_in=self._flow,
name=name)
ta = TensorArray(dtype=self._dtype, handle=self._handle, flow=flow_out)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
if ta._infer_shape:
ta._merge_element_shape(value.get_shape())
return ta
def stack(self, name=None):
"""Return the values in the TensorArray as a stacked `Tensor`.
All of the values must have been written and their shapes must all match.
If input shapes have rank-`R`, then output shape will have rank-`(R+1)`.
Args:
name: A name for the operation (optional).
Returns:
All the tensors in the TensorArray stacked into one tensor.
"""
with ops.colocate_with(self._handle):
with ops.name_scope(name, "TensorArrayStack", [self._handle]):
return self.gather(math_ops.range(0, self.size()), name=name)
def gather(self, indices, name=None):
"""Return selected values in the TensorArray as a packed `Tensor`.
All of selected values must have been written and their shapes
must all match.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If
the `TensorArray` is not dynamic, `max_value=size()`.
name: A name for the operation (optional).
Returns:
The in the `TensorArray` selected by `indices`, packed into one tensor.
"""
with ops.colocate_with(self._handle):
if self._element_shape:
element_shape = self._element_shape[0]
else:
element_shape = tensor_shape.TensorShape(None)
value = gen_data_flow_ops._tensor_array_gather_v3(
handle=self._handle,
indices=indices,
flow_in=self._flow,
dtype=self._dtype,
name=name,
element_shape=element_shape)
if self._element_shape and self._element_shape[0].dims is not None:
value.set_shape([None] + self._element_shape[0].dims)
return value
def concat(self, name=None):
"""Return the values in the TensorArray as a concatenated `Tensor`.
All of the values must have been written, their ranks must match, and
and their shapes must all match for all dimensions except the first.
Args:
name: A name for the operation (optional).
Returns:
All the tensors in the TensorArray concatenated into one tensor.
"""
if self._element_shape and self._element_shape[0].dims is not None:
element_shape_except0 = (
tensor_shape.TensorShape(self._element_shape[0].dims[1:]))
else:
element_shape_except0 = tensor_shape.TensorShape(None)
with ops.colocate_with(self._handle):
value, _ = gen_data_flow_ops._tensor_array_concat_v3(
handle=self._handle,
flow_in=self._flow,
dtype=self._dtype,
name=name,
element_shape_except0=element_shape_except0)
if self._element_shape and self._element_shape[0].dims is not None:
value.set_shape([None] + self._element_shape[0].dims[1:])
return value
@tf_should_use.should_use_result
def unstack(self, value, name=None):
"""Unstack the values of a `Tensor` in the TensorArray.
If input value shapes have rank-`R`, then the output TensorArray will
contain elements whose shapes are rank-`(R-1)`.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unstack.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the unstack occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
with ops.name_scope(name, "TensorArrayUnstack", [self._handle, value]):
num_elements = array_ops.shape(value)[0]
return self.scatter(
indices=math_ops.range(0, num_elements), value=value, name=name)
@tf_should_use.should_use_result
def scatter(self, indices, value, name=None):
"""Scatter the values of a `Tensor` in specific indices of a `TensorArray`.
Args:
indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If
the `TensorArray` is not dynamic, `max_value=size()`.
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unpack.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the scatter occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
with ops.name_scope(name, "TensorArrayScatter",
[self._handle, value, indices]):
value = ops.convert_to_tensor(value, name="value")
_maybe_set_device(self._handle.op, value)
with ops.colocate_with(self._handle):
flow_out = gen_data_flow_ops._tensor_array_scatter_v3(
handle=self._handle,
indices=indices,
value=value,
flow_in=self._flow,
name=name)
ta = TensorArray(dtype=self._dtype, handle=self._handle, flow=flow_out)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
if ta._infer_shape:
val_shape = flow_out.op.inputs[2].get_shape()
element_shape = tensor_shape.unknown_shape()
if val_shape.dims is not None:
element_shape = tensor_shape.TensorShape(val_shape.dims[1:])
ta._merge_element_shape(element_shape)
return ta
@tf_should_use.should_use_result
def split(self, value, lengths, name=None):
"""Split the values of a `Tensor` into the TensorArray.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to split.
lengths: 1-D. int32 vector with the lengths to use when splitting
`value` along its first dimension.
name: A name for the operation (optional).
Returns:
A new TensorArray object with flow that ensures the split occurs.
Use this object all for subsequent operations.
Raises:
ValueError: if the shape inference fails.
"""
with ops.name_scope(name, "TensorArraySplit",
[self._handle, value, lengths]):
value = ops.convert_to_tensor(value, name="value")
_maybe_set_device(self._handle.op, value)
lengths_64 = math_ops.to_int64(lengths)
with ops.colocate_with(self._handle):
flow_out = gen_data_flow_ops._tensor_array_split_v3(
handle=self._handle,
value=value,
lengths=lengths_64,
flow_in=self._flow,
name=name)
ta = TensorArray(dtype=self._dtype, handle=self._handle, flow=flow_out)
ta._infer_shape = self._infer_shape
ta._element_shape = self._element_shape
if ta._infer_shape:
val_shape = flow_out.op.inputs[1].get_shape()
clengths = tensor_util.constant_value(flow_out.op.inputs[2])
element_shape = tensor_shape.unknown_shape()
if val_shape.dims is not None:
if clengths is not None and clengths.max() == clengths.min():
element_shape = tensor_shape.TensorShape([clengths[0]] +
val_shape.dims[1:])
ta._merge_element_shape(element_shape)
return ta
def size(self, name=None):
"""Return the size of the TensorArray."""
with ops.colocate_with(self._handle):
return gen_data_flow_ops._tensor_array_size_v3(
handle=self._handle, flow_in=self.flow, name=name)
@tf_should_use.should_use_result
def close(self, name=None):
"""Close the current TensorArray."""
with ops.colocate_with(self._handle):
return gen_data_flow_ops._tensor_array_close_v3(
handle=self._handle, name=name)
# pylint: enable=protected-access
|
fontify/fontify | refs/heads/master | wsgi.py | 1 | from hello import app as application
if __name__ == "__main__":
application.run()
|
xmbcrios/xmbcrios.repository | refs/heads/master | plugin.video.SportsDevil/lib/utils/github/InputGitTreeElement.py | 7 | # -*- coding: utf-8 -*-
# Copyright 2012 Vincent Jacques
# vincent@vincent-jacques.net
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import GithubObject
class InputGitTreeElement(object):
def __init__(self, path, mode, type, content=GithubObject.NotSet, sha=GithubObject.NotSet):
self.__path = path
self.__mode = mode
self.__type = type
self.__content = content
self.__sha = sha
@property
def _identity(self):
identity = {
"path": self.__path,
"mode": self.__mode,
"type": self.__type,
}
if self.__sha is not GithubObject.NotSet:
identity["sha"] = self.__sha
if self.__content is not GithubObject.NotSet:
identity["content"] = self.__content
return identity
|
omni5cience/django-inlineformfield | refs/heads/master | .tox/py27/lib/python2.7/site-packages/IPython/core/magics/__init__.py | 23 | """Implementation of all the magic functions built into IPython.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from ..magic import Magics, magics_class
from .auto import AutoMagics
from .basic import BasicMagics
from .code import CodeMagics, MacroToEdit
from .config import ConfigMagics
from .deprecated import DeprecatedMagics
from .display import DisplayMagics
from .execution import ExecutionMagics
from .extension import ExtensionMagics
from .history import HistoryMagics
from .logging import LoggingMagics
from .namespace import NamespaceMagics
from .osm import OSMagics
from .pylab import PylabMagics
from .script import ScriptMagics
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
@magics_class
class UserMagics(Magics):
"""Placeholder for user-defined magics to be added at runtime.
All magics are eventually merged into a single namespace at runtime, but we
use this class to isolate the magics defined dynamically by the user into
their own class.
"""
|
SummaLabs/DLS | refs/heads/master | app/backend/core/models-keras-2x-api/lightweight_layers/layers_basic.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import keras.layers.merge
###############################################
# default_data_format = 'channels_last' #'th'
#FIXME: temporary Theano-channel ordering (channels_first), in feature needd merge to tensorflow ordering (channels_last)
default_data_format = 'channels_first'
###############################################
class LW_Layer(object):
input_shape=None
def get_config(self):
pass
def get_output_shape_for(self, input_shape):
return input_shape
###############################################
class LW_InputLayer(LW_Layer):
def __init__(self, input_shape=None):
self.input_shape = input_shape
###############################################
class LW_Merge(LW_Layer):
def __init__(self, layers=None, mode='sum', concat_axis=-1, dot_axes=-1):
self.layers = layers
self.mode = mode
self.concat_axis = concat_axis
self.dot_axes = dot_axes
def get_output_shape_for(self, input_shape):
input_shapes = input_shape
if self.mode in ['sum', 'mul', 'ave', 'max']:
# All tuples in input_shapes should be the same.
return tuple(input_shapes[0])
elif self.mode == 'concat':
output_shape = list(input_shapes[0])
for shape in input_shapes[1:]:
if output_shape[self.concat_axis] is None or shape[self.concat_axis] is None:
output_shape[self.concat_axis] = None
break
output_shape[self.concat_axis] += shape[self.concat_axis]
return tuple(output_shape)
elif self.mode in ['dot', 'cos']:
shape1 = list(input_shapes[0])
shape2 = list(input_shapes[1])
#
if isinstance(self.dot_axes, int):
if self.dot_axes < 0:
axes = [self.dot_axes % len(shape1), self.dot_axes % len(shape2)]
else:
axes = [self.dot_axes] * 2
else:
axes = self.dot_axes
#
shape1.pop(axes[0])
shape2.pop(axes[1])
shape2.pop(0)
output_shape = shape1 + shape2
if len(output_shape) == 1:
output_shape += [1]
return tuple(output_shape)
###############################################
class LW_Flatten(LW_Layer):
def get_output_shape_for(self, input_shape):
if not all(input_shape[1:]):
raise Exception('The shape of the input to "Flatten" '
'is not fully defined '
'(got ' + str(input_shape[1:]) + '. '
'Make sure to pass a complete "input_shape" '
'or "batch_input_shape" argument to the first '
'layer in your model.')
tprod = 1
for ii in input_shape[1:]:
tprod *= ii
return (input_shape[0], tprod)
###############################################
class LW_Dense(LW_Layer):
def __init__(self, units):
self.output_dim = units
def get_output_shape_for(self, input_shape):
assert input_shape and len(input_shape) == 2
return (input_shape[0], self.output_dim)
class LW_Activation(LW_Layer):
pass
###############################################
if __name__ == '__main__':
testMergeLayer = LW_Merge(mode='concat')
print ('----') |
medixdev/electron | refs/heads/master | script/install-sysroot.py | 140 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script to install a Debian Wheezy sysroot for making official Google Chrome
# Linux builds.
# The sysroot is needed to make Chrome work for Debian Wheezy.
# This script can be run manually but is more often run as part of gclient
# hooks. When run from hooks this script should be a no-op on non-linux
# platforms.
# The sysroot image could be constructed from scratch based on the current
# state or Debian Wheezy but for consistency we currently use a pre-built root
# image. The image will normally need to be rebuilt every time chrome's build
# dependancies are changed.
import hashlib
import platform
import optparse
import os
import re
import shutil
import subprocess
import sys
from lib.util import get_host_arch
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
URL_PREFIX = 'https://github.com'
URL_PATH = 'atom/debian-sysroot-image-creator/releases/download'
REVISION_AMD64 = 264817
REVISION_I386 = 'v0.2.0'
REVISION_ARM = 'v0.1.0'
TARBALL_AMD64 = 'debian_wheezy_amd64_sysroot.tgz'
TARBALL_I386 = 'debian_wheezy_i386_sysroot.tgz'
TARBALL_ARM = 'debian_wheezy_arm_sysroot.tgz'
TARBALL_AMD64_SHA1SUM = '74b7231e12aaf45c5c5489d9aebb56bd6abb3653'
TARBALL_I386_SHA1SUM = 'f5b2ceaeb3f7e6bc2058733585fe877d002b5fa7'
TARBALL_ARM_SHA1SUM = '72e668c57b8591e108759584942ddb6f6cee1322'
SYSROOT_DIR_AMD64 = 'debian_wheezy_amd64-sysroot'
SYSROOT_DIR_I386 = 'debian_wheezy_i386-sysroot'
SYSROOT_DIR_ARM = 'debian_wheezy_arm-sysroot'
valid_archs = ('arm', 'i386', 'amd64')
def GetSha1(filename):
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024*1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
def DetectArch(gyp_defines):
# Check for optional target_arch and only install for that architecture.
# If target_arch is not specified, then only install for the host
# architecture.
if 'target_arch=x64' in gyp_defines:
return 'amd64'
elif 'target_arch=ia32' in gyp_defines:
return 'i386'
elif 'target_arch=arm' in gyp_defines:
return 'arm'
detected_host_arch = get_host_arch()
if detected_host_arch == 'x64':
return 'amd64'
elif detected_host_arch == 'ia32':
return 'i386'
elif detected_host_arch == 'arm':
return 'arm'
else:
print "Unknown host arch: %s" % detected_host_arch
return None
def main():
if options.linux_only:
# This argument is passed when run from the gclient hooks.
# In this case we return early on non-linux platforms.
if not sys.platform.startswith('linux'):
return 0
gyp_defines = os.environ.get('GYP_DEFINES', '')
if options.arch:
target_arch = options.arch
else:
target_arch = DetectArch(gyp_defines)
if not target_arch:
print 'Unable to detect host architecture'
return 1
if options.linux_only and target_arch != 'arm':
# When run from runhooks, only install the sysroot for an Official Chrome
# Linux build, except on ARM where we always use a sysroot.
defined = ['branding=Chrome', 'buildtype=Official']
undefined = ['chromeos=1']
for option in defined:
if option not in gyp_defines:
return 0
for option in undefined:
if option in gyp_defines:
return 0
# The sysroot directory should match the one specified in build/common.gypi.
# TODO(thestig) Consider putting this else where to avoid having to recreate
# it on every build.
linux_dir = os.path.join(SCRIPT_DIR, '..', 'vendor')
if target_arch == 'amd64':
sysroot = os.path.join(linux_dir, SYSROOT_DIR_AMD64)
tarball_filename = TARBALL_AMD64
tarball_sha1sum = TARBALL_AMD64_SHA1SUM
revision = REVISION_AMD64
elif target_arch == 'arm':
sysroot = os.path.join(linux_dir, SYSROOT_DIR_ARM)
tarball_filename = TARBALL_ARM
tarball_sha1sum = TARBALL_ARM_SHA1SUM
revision = REVISION_ARM
elif target_arch == 'i386':
sysroot = os.path.join(linux_dir, SYSROOT_DIR_I386)
tarball_filename = TARBALL_I386
tarball_sha1sum = TARBALL_I386_SHA1SUM
revision = REVISION_I386
else:
print 'Unknown architecture: %s' % target_arch
assert(False)
url = '%s/%s/%s/%s' % (URL_PREFIX, URL_PATH, revision, tarball_filename)
stamp = os.path.join(sysroot, '.stamp')
if os.path.exists(stamp):
with open(stamp) as s:
if s.read() == url:
print 'Debian Wheezy %s root image already up-to-date: %s' % \
(target_arch, sysroot)
return 0
print 'Installing Debian Wheezy %s root image: %s' % (target_arch, sysroot)
if os.path.isdir(sysroot):
shutil.rmtree(sysroot)
os.mkdir(sysroot)
tarball = os.path.join(sysroot, tarball_filename)
print 'Downloading %s' % url
sys.stdout.flush()
sys.stderr.flush()
subprocess.check_call(['curl', '--fail', '-L', url, '-o', tarball])
sha1sum = GetSha1(tarball)
if sha1sum != tarball_sha1sum:
print 'Tarball sha1sum is wrong.'
print 'Expected %s, actual: %s' % (tarball_sha1sum, sha1sum)
return 1
subprocess.check_call(['tar', 'xf', tarball, '-C', sysroot])
os.remove(tarball)
with open(stamp, 'w') as s:
s.write(url)
return 0
if __name__ == '__main__':
parser = optparse.OptionParser('usage: %prog [OPTIONS]')
parser.add_option('--linux-only', action='store_true',
default=False, help='Only install sysroot for official '
'Linux builds')
parser.add_option('--arch', type='choice', choices=valid_archs,
help='Sysroot architecture: %s' % ', '.join(valid_archs))
options, _ = parser.parse_args()
sys.exit(main())
|
MayankGo/ec2-api | refs/heads/stable/kilo | ec2api/tests/functional/scenario/base.py | 1 | # Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log
from tempest_lib.common.utils import data_utils
from ec2api.tests.functional import base
from ec2api.tests.functional import config
CONF = config.CONF
LOG = log.getLogger(__name__)
class BaseScenarioTest(base.EC2TestCase):
def run_instance(self, **kwargs):
kwargs.setdefault('ImageId', CONF.aws.image_id)
kwargs.setdefault('InstanceType', CONF.aws.instance_type)
kwargs.setdefault('Placement', {'AvailabilityZone': CONF.aws.aws_zone})
kwargs['MinCount'] = 1
kwargs['MaxCount'] = 1
resp, data = self.client.RunInstances(*[], **kwargs)
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
instance_id = data['Instances'][0]['InstanceId']
self.addResourceCleanUp(self.client.TerminateInstances,
InstanceIds=[instance_id])
self.get_instance_waiter().wait_available(instance_id,
final_set=('running'))
return instance_id
def get_instance_ip(self, instance_id):
instance = self.get_instance(instance_id)
public_ip = instance.get('PublicIpAddress')
if public_ip:
return public_ip
alloc_id, public_ip = self.allocate_address('VpcId' in instance)
kwargs = {'InstanceId': instance_id}
if 'VpcId' in instance:
kwargs['AllocationId'] = alloc_id
else:
kwargs['PublicIp'] = public_ip
resp, data = self.client.AssociateAddress(*[], **kwargs)
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
if 'VpcId' in instance:
self.addResourceCleanUp(self.client.DisassociateAddress,
AssociationId=data['AssociationId'])
else:
self.addResourceCleanUp(self.client.DisassociateAddress,
PublicIp=public_ip)
return public_ip
def allocate_address(self, is_vpc):
kwargs = dict()
if is_vpc:
kwargs['Domain'] = 'vpc'
resp, data = self.client.AllocateAddress(*[], **kwargs)
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
alloc_id = data.get('AllocationId')
public_ip = data['PublicIp']
if is_vpc:
self.addResourceCleanUp(self.client.ReleaseAddress,
AllocationId=alloc_id)
else:
self.addResourceCleanUp(self.client.ReleaseAddress,
PublicIp=public_ip)
return alloc_id, public_ip
def create_key_pair(self, key_name):
resp, data = self.client.CreateKeyPair(KeyName=key_name)
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
self.addResourceCleanUp(self.client.DeleteKeyPair, KeyName=key_name)
return data.get('KeyMaterial')
def create_standard_security_group(self):
name = data_utils.rand_name('sgName')
desc = data_utils.rand_name('sgDesc')
kwargs = {'GroupName': name, 'Description': desc}
resp, data = self.client.CreateSecurityGroup(*[], **kwargs)
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
self.addResourceCleanUp(self.client.DeleteSecurityGroup,
GroupName=name)
time.sleep(2)
kwargs = {
'GroupName': name,
'IpPermissions': [{
'IpProtocol': 'icmp',
'FromPort': -1,
'ToPort': -1,
'IpRanges': [{
'CidrIp': '0.0.0.0/0'
}],
}, {
'IpProtocol': 'tcp',
'FromPort': 22,
'ToPort': 22,
'IpRanges': [{
'CidrIp': '0.0.0.0/0'
}],
}]
}
resp, data = self.client.AuthorizeSecurityGroupIngress(*[], **kwargs)
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
return name
def prepare_vpc_default_security_group(self, vpc_id):
resp, data = self.client.DescribeSecurityGroups(
Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])
self.assertEqual(200, resp.status_code)
self.assertEqual(1, len(data['SecurityGroups']))
group_id = data['SecurityGroups'][0]['GroupId']
kwargs = {
'GroupId': group_id,
'IpPermissions': [{
'IpProtocol': '-1',
'FromPort': -1,
'ToPort': -1,
'IpRanges': [{
'CidrIp': '0.0.0.0/0'
}],
}]
}
resp, data = self.client.AuthorizeSecurityGroupIngress(*[], **kwargs)
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
def prepare_route(self, vpc_id, gw_id):
resp, data = self.client.DescribeRouteTables(
Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])
self.assertEqual(200, resp.status_code)
self.assertEqual(1, len(data['RouteTables']))
kwargs = {
'DestinationCidrBlock': '0.0.0.0/0',
'RouteTableId': data['RouteTables'][0]['RouteTableId'],
'GatewayId': gw_id
}
resp, data = self.client.CreateRoute(*[], **kwargs)
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
def create_vpc_and_subnet(self, cidr):
resp, data = self.client.CreateVpc(CidrBlock=cidr)
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
vpc_id = data['Vpc']['VpcId']
self.addResourceCleanUp(self.client.DeleteVpc, VpcId=vpc_id)
self.get_vpc_waiter().wait_available(vpc_id)
resp, data = self.client.CreateSubnet(VpcId=vpc_id, CidrBlock=cidr,
AvailabilityZone=CONF.aws.aws_zone)
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
subnet_id = data['Subnet']['SubnetId']
self.addResourceCleanUp(self.client.DeleteSubnet, SubnetId=subnet_id)
return vpc_id, subnet_id
def create_network_interface(self, subnet_id):
resp, data = self.client.CreateNetworkInterface(SubnetId=subnet_id)
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
ni_id = data['NetworkInterface']['NetworkInterfaceId']
self.addResourceCleanUp(self.client.DeleteNetworkInterface,
NetworkInterfaceId=ni_id)
self.get_network_interface_waiter().wait_available(ni_id)
return ni_id
def create_and_attach_internet_gateway(self, vpc_id):
resp, data = self.client.CreateInternetGateway()
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
gw_id = data['InternetGateway']['InternetGatewayId']
self.addResourceCleanUp(self.client.DeleteInternetGateway,
InternetGatewayId=gw_id)
resp, data = self.client.AttachInternetGateway(VpcId=vpc_id,
InternetGatewayId=gw_id)
self.assertEqual(200, resp.status_code, base.EC2ErrorConverter(data))
self.addResourceCleanUp(self.client.DetachInternetGateway,
VpcId=vpc_id,
InternetGatewayId=gw_id)
return gw_id
|
jmartu/testing | refs/heads/master | venv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__init__.py | 1178 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from .__about__ import (
__author__, __copyright__, __email__, __license__, __summary__, __title__,
__uri__, __version__
)
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
|
BillKeenan/lets-encrypt-preview | refs/heads/master | letsencrypt/cli.py | 1 | """Let's Encrypt CLI."""
# TODO: Sanity check all input. Be sure to avoid shell code etc...
import argparse
import atexit
import functools
import logging
import logging.handlers
import os
import pkg_resources
import sys
import time
import traceback
import configargparse
import OpenSSL
import zope.component
import zope.interface.exceptions
import zope.interface.verify
from acme import client as acme_client
from acme import jose
import letsencrypt
from letsencrypt import account
from letsencrypt import colored_logging
from letsencrypt import configuration
from letsencrypt import constants
from letsencrypt import client
from letsencrypt import crypto_util
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt import log
from letsencrypt import reporter
from letsencrypt import storage
from letsencrypt.display import util as display_util
from letsencrypt.display import ops as display_ops
from letsencrypt.errors import Error, PluginSelectionError, CertStorageError
from letsencrypt.plugins import disco as plugins_disco
logger = logging.getLogger(__name__)
# Argparse's help formatting has a lot of unhelpful peculiarities, so we want
# to replace as much of it as we can...
# This is the stub to include in help generated by argparse
SHORT_USAGE = """
letsencrypt [SUBCOMMAND] [options] [-d domain] [-d domain] ...
The Let's Encrypt agent can obtain and install HTTPS/TLS/SSL certificates. By
default, it will attempt to use a webserver both for obtaining and installing
the cert. Major SUBCOMMANDS are:
(default) run Obtain & install a cert in your current webserver
certonly Obtain cert, but do not install it (aka "auth")
install Install a previously obtained cert in a server
revoke Revoke a previously obtained certificate
rollback Rollback server configuration changes made during install
config_changes Show changes made to server config during installation
"""
# This is the short help for letsencrypt --help, where we disable argparse
# altogether
USAGE = SHORT_USAGE + """Choice of server plugins for obtaining and installing cert:
%s
--standalone Run a standalone webserver for authentication
%s
OR use different servers to obtain (authenticate) the cert and then install it:
--authenticator standalone --installer apache
More detailed help:
-h, --help [topic] print this message, or detailed help on a topic;
the available topics are:
all, automation, paths, security, testing, or any of the subcommands or
plugins (certonly, install, nginx, apache, standalone, etc)
"""
def usage_strings(plugins):
"""Make usage strings late so that plugins can be initialised late"""
if "nginx" in plugins:
nginx_doc = "--nginx Use the Nginx plugin for authentication & installation"
else:
nginx_doc = "(nginx support is experimental, buggy, and not installed by default)"
if "apache" in plugins:
apache_doc = "--apache Use the Apache plugin for authentication & installation"
else:
apache_doc = "(the apache plugin is not installed)"
return USAGE % (apache_doc, nginx_doc), SHORT_USAGE
def _find_domains(args, installer):
if args.domains is None:
domains = display_ops.choose_names(installer)
else:
domains = args.domains
if not domains:
raise Error("Please specify --domains, or --installer that "
"will help in domain names autodiscovery")
return domains
def _determine_account(args, config):
"""Determine which account to use.
In order to make the renewer (configuration de/serialization) happy,
if ``args.account`` is ``None``, it will be updated based on the
user input. Same for ``args.email``.
:param argparse.Namespace args: CLI arguments
:param letsencrypt.interface.IConfig config: Configuration object
:param .AccountStorage account_storage: Account storage.
:returns: Account and optionally ACME client API (biproduct of new
registration).
:rtype: `tuple` of `letsencrypt.account.Account` and
`acme.client.Client`
"""
account_storage = account.AccountFileStorage(config)
acme = None
if args.account is not None:
acc = account_storage.load(args.account)
else:
accounts = account_storage.find_all()
if len(accounts) > 1:
acc = display_ops.choose_account(accounts)
elif len(accounts) == 1:
acc = accounts[0]
else: # no account registered yet
if args.email is None:
args.email = display_ops.get_email()
if not args.email: # get_email might return ""
args.email = None
def _tos_cb(regr):
if args.tos:
return True
msg = ("Please read the Terms of Service at {0}. You "
"must agree in order to register with the ACME "
"server at {1}".format(
regr.terms_of_service, config.server))
return zope.component.getUtility(interfaces.IDisplay).yesno(
msg, "Agree", "Cancel")
try:
acc, acme = client.register(
config, account_storage, tos_cb=_tos_cb)
except Error as error:
logger.debug(error, exc_info=True)
raise Error(
"Unable to register an account with ACME server")
args.account = acc.id
return acc, acme
def _init_le_client(args, config, authenticator, installer):
if authenticator is not None:
# if authenticator was given, then we will need account...
acc, acme = _determine_account(args, config)
logger.debug("Picked account: %r", acc)
# XXX
#crypto_util.validate_key_csr(acc.key)
else:
acc, acme = None, None
return client.Client(config, acc, authenticator, installer, acme=acme)
def _find_duplicative_certs(config, domains):
"""Find existing certs that duplicate the request."""
identical_names_cert, subset_names_cert = None, None
cli_config = configuration.RenewerConfiguration(config)
configs_dir = cli_config.renewal_configs_dir
# Verify the directory is there
le_util.make_or_verify_dir(configs_dir, mode=0o755, uid=os.geteuid())
for renewal_file in os.listdir(configs_dir):
try:
full_path = os.path.join(configs_dir, renewal_file)
candidate_lineage = storage.RenewableCert(full_path, cli_config)
except (CertStorageError, IOError):
logger.warning("Renewal configuration file %s is broken. "
"Skipping.", full_path)
continue
# TODO: Handle these differently depending on whether they are
# expired or still valid?
candidate_names = set(candidate_lineage.names())
if candidate_names == set(domains):
identical_names_cert = candidate_lineage
elif candidate_names.issubset(set(domains)):
subset_names_cert = candidate_lineage
return identical_names_cert, subset_names_cert
def _treat_as_renewal(config, domains):
"""Determine whether or not the call should be treated as a renewal.
:returns: RenewableCert or None if renewal shouldn't occur.
:rtype: :class:`.storage.RenewableCert`
:raises .Error: If the user would like to rerun the client again.
"""
renewal = False
# Considering the possibility that the requested certificate is
# related to an existing certificate. (config.duplicate, which
# is set with --duplicate, skips all of this logic and forces any
# kind of certificate to be obtained with renewal = False.)
if not config.duplicate:
ident_names_cert, subset_names_cert = _find_duplicative_certs(
config, domains)
# I am not sure whether that correctly reads the systemwide
# configuration file.
question = None
if ident_names_cert is not None:
question = (
"You have an existing certificate that contains exactly the "
"same domains you requested (ref: {0}){br}{br}Do you want to "
"renew and replace this certificate with a newly-issued one?"
).format(ident_names_cert.configfile.filename, br=os.linesep)
elif subset_names_cert is not None:
question = (
"You have an existing certificate that contains a portion of "
"the domains you requested (ref: {0}){br}{br}It contains these "
"names: {1}{br}{br}You requested these names for the new "
"certificate: {2}.{br}{br}Do you want to replace this existing "
"certificate with the new certificate?"
).format(subset_names_cert.configfile.filename,
", ".join(subset_names_cert.names()),
", ".join(domains),
br=os.linesep)
if question is None:
# We aren't in a duplicative-names situation at all, so we don't
# have to tell or ask the user anything about this.
pass
elif config.renew_by_default or zope.component.getUtility(
interfaces.IDisplay).yesno(question, "Replace", "Cancel"):
renewal = True
else:
reporter_util = zope.component.getUtility(interfaces.IReporter)
reporter_util.add_message(
"To obtain a new certificate that {0} an existing certificate "
"in its domain-name coverage, you must use the --duplicate "
"option.{br}{br}For example:{br}{br}{1} --duplicate {2}".format(
"duplicates" if ident_names_cert is not None else
"overlaps with",
sys.argv[0], " ".join(sys.argv[1:]),
br=os.linesep
),
reporter_util.HIGH_PRIORITY)
raise Error(
"User did not use proper CLI and would like "
"to reinvoke the client.")
if renewal:
return ident_names_cert if ident_names_cert is not None else subset_names_cert
return None
def _report_new_cert(cert_path, fullchain_path):
"""Reports the creation of a new certificate to the user.
:param str cert_path: path to cert
:param str fullchain_path: path to full chain
"""
expiry = crypto_util.notAfter(cert_path).date()
reporter_util = zope.component.getUtility(interfaces.IReporter)
if fullchain_path:
# Print the path to fullchain.pem because that's what modern webservers
# (Nginx and Apache2.4) will want.
and_chain = "and chain have"
path = fullchain_path
else:
# Unless we're in .csr mode and there really isn't one
and_chain = "has "
path = cert_path
# XXX Perhaps one day we could detect the presence of known old webservers
# and say something more informative here.
msg = ("Congratulations! Your certificate {0} been saved at {1}."
" Your cert will expire on {2}. To obtain a new version of the "
"certificate in the future, simply run Let's Encrypt again."
.format(and_chain, path, expiry))
reporter_util.add_message(msg, reporter_util.MEDIUM_PRIORITY)
def _auth_from_domains(le_client, config, domains, plugins):
"""Authenticate and enroll certificate."""
# Note: This can raise errors... caught above us though.
lineage = _treat_as_renewal(config, domains)
if lineage is not None:
# TODO: schoen wishes to reuse key - discussion
# https://github.com/letsencrypt/letsencrypt/pull/777/files#r40498574
new_certr, new_chain, new_key, _ = le_client.obtain_certificate(domains)
# TODO: Check whether it worked! <- or make sure errors are thrown (jdk)
lineage.save_successor(
lineage.latest_common_version(), OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, new_certr.body),
new_key.pem, crypto_util.dump_pyopenssl_chain(new_chain))
lineage.update_all_links_to(lineage.latest_common_version())
# TODO: Check return value of save_successor
# TODO: Also update lineage renewal config with any relevant
# configuration values from this attempt? <- Absolutely (jdkasten)
else:
# TREAT AS NEW REQUEST
lineage = le_client.obtain_and_enroll_certificate(domains, plugins)
if not lineage:
raise Error("Certificate could not be obtained")
_report_new_cert(lineage.cert, lineage.fullchain)
return lineage
def set_configurator(previously, now):
"""
Setting configurators multiple ways is okay, as long as they all agree
:param str previously: previously identified request for the installer/authenticator
:param str requested: the request currently being processed
"""
if now is None:
# we're not actually setting anything
return previously
if previously:
if previously != now:
msg = "Too many flags setting configurators/installers/authenticators {0} -> {1}"
raise PluginSelectionError(msg.format(repr(previously), repr(now)))
return now
def diagnose_configurator_problem(cfg_type, requested, plugins):
"""
Raise the most helpful error message about a plugin being unavailable
:param str cfg_type: either "installer" or "authenticator"
:param str requested: the plugin that was requested
:param .PluginsRegistry plugins: available plugins
:raises error.PluginSelectionError: if there was a problem
"""
if requested:
if requested not in plugins:
msg = "The requested {0} plugin does not appear to be installed".format(requested)
else:
msg = ("The {0} plugin is not working; there may be problems with "
"your existing configuration.\nThe error was: {1!r}"
.format(requested, plugins[requested].problem))
elif cfg_type == "installer":
if os.path.exists("/etc/debian_version"):
# Debian... installers are at least possible
msg = ('No installers seem to be present and working on your system; '
'fix that or try running letsencrypt with the "certonly" command')
else:
# XXX update this logic as we make progress on #788 and nginx support
msg = ('No installers are available on your OS yet; try running '
'"letsencrypt-auto certonly" to get a cert you can install manually')
else:
msg = "{0} could not be determined or is not installed".format(cfg_type)
raise PluginSelectionError(msg)
def choose_configurator_plugins(args, config, plugins, verb):
"""
Figure out which configurator we're going to use
:raises error.PluginSelectionError if there was a problem
"""
# Which plugins do we need?
need_inst = need_auth = (verb == "run")
if verb == "certonly":
need_auth = True
if verb == "install":
need_inst = True
if args.authenticator:
logger.warn("Specifying an authenticator doesn't make sense in install mode")
# Which plugins did the user request?
req_inst = req_auth = args.configurator
req_inst = set_configurator(req_inst, args.installer)
req_auth = set_configurator(req_auth, args.authenticator)
if args.nginx:
req_inst = set_configurator(req_inst, "nginx")
req_auth = set_configurator(req_auth, "nginx")
if args.apache:
req_inst = set_configurator(req_inst, "apache")
req_auth = set_configurator(req_auth, "apache")
if args.standalone:
req_auth = set_configurator(req_auth, "standalone")
logger.debug("Requested authenticator %s and installer %s", req_auth, req_inst)
# Try to meet the user's request and/or ask them to pick plugins
authenticator = installer = None
if verb == "run" and req_auth == req_inst:
# Unless the user has explicitly asked for different auth/install,
# only consider offering a single choice
authenticator = installer = display_ops.pick_configurator(config, req_inst, plugins)
else:
if need_inst or req_inst:
installer = display_ops.pick_installer(config, req_inst, plugins)
if need_auth:
authenticator = display_ops.pick_authenticator(config, req_auth, plugins)
logger.debug("Selected authenticator %s and installer %s", authenticator, installer)
if need_inst and not installer:
diagnose_configurator_problem("installer", req_inst, plugins)
if need_auth and not authenticator:
diagnose_configurator_problem("authenticator", req_auth, plugins)
return installer, authenticator
# TODO: Make run as close to auth + install as possible
# Possible difficulties: args.csr was hacked into auth
def run(args, config, plugins): # pylint: disable=too-many-branches,too-many-locals
"""Obtain a certificate and install."""
try:
installer, authenticator = choose_configurator_plugins(args, config, plugins, "run")
except PluginSelectionError, e:
return e.message
domains = _find_domains(args, installer)
# TODO: Handle errors from _init_le_client?
le_client = _init_le_client(args, config, authenticator, installer)
lineage = _auth_from_domains(le_client, config, domains, plugins)
le_client.deploy_certificate(
domains, lineage.privkey, lineage.cert,
lineage.chain, lineage.fullchain)
le_client.enhance_config(domains, args.redirect)
if len(lineage.available_versions("cert")) == 1:
display_ops.success_installation(domains)
else:
display_ops.success_renewal(domains)
def obtaincert(args, config, plugins):
"""Authenticate & obtain cert, but do not install it."""
if args.domains is not None and args.csr is not None:
# TODO: --csr could have a priority, when --domains is
# supplied, check if CSR matches given domains?
return "--domains and --csr are mutually exclusive"
try:
# installers are used in auth mode to determine domain names
installer, authenticator = choose_configurator_plugins(args, config, plugins, "certonly")
except PluginSelectionError, e:
return e.message
# TODO: Handle errors from _init_le_client?
le_client = _init_le_client(args, config, authenticator, installer)
# This is a special case; cert and chain are simply saved
if args.csr is not None:
certr, chain = le_client.obtain_certificate_from_csr(le_util.CSR(
file=args.csr[0], data=args.csr[1], form="der"))
cert_path, _, cert_fullchain = le_client.save_certificate(
certr, chain, args.cert_path, args.chain_path, args.fullchain_path)
_report_new_cert(cert_path, cert_fullchain)
else:
domains = _find_domains(args, installer)
_auth_from_domains(le_client, config, domains, plugins)
def install(args, config, plugins):
"""Install a previously obtained cert in a server."""
# XXX: Update for renewer/RenewableCert
try:
installer, _ = choose_configurator_plugins(args, config,
plugins, "install")
except PluginSelectionError, e:
return e.message
domains = _find_domains(args, installer)
le_client = _init_le_client(
args, config, authenticator=None, installer=installer)
assert args.cert_path is not None # required=True in the subparser
le_client.deploy_certificate(
domains, args.key_path, args.cert_path, args.chain_path,
args.fullchain_path)
le_client.enhance_config(domains, args.redirect)
def revoke(args, config, unused_plugins): # TODO: coop with renewal config
"""Revoke a previously obtained certificate."""
if args.key_path is not None: # revocation by cert key
logger.debug("Revoking %s using cert key %s",
args.cert_path[0], args.key_path[0])
acme = acme_client.Client(
config.server, key=jose.JWK.load(args.key_path[1]))
else: # revocation by account key
logger.debug("Revoking %s using Account Key", args.cert_path[0])
acc, _ = _determine_account(args, config)
# pylint: disable=protected-access
acme = client._acme_from_config_key(config, acc.key)
acme.revoke(jose.ComparableX509(crypto_util.pyopenssl_load_certificate(
args.cert_path[1])[0]))
def rollback(args, config, plugins):
"""Rollback server configuration changes made during install."""
client.rollback(args.installer, args.checkpoints, config, plugins)
def config_changes(unused_args, config, unused_plugins):
"""Show changes made to server config during installation
View checkpoints and associated configuration changes.
"""
client.view_config_changes(config)
def plugins_cmd(args, config, plugins): # TODO: Use IDisplay rather than print
"""List server software plugins."""
logger.debug("Expected interfaces: %s", args.ifaces)
ifaces = [] if args.ifaces is None else args.ifaces
filtered = plugins.visible().ifaces(ifaces)
logger.debug("Filtered plugins: %r", filtered)
if not args.init and not args.prepare:
return
filtered.init(config)
verified = filtered.verify(ifaces)
logger.debug("Verified plugins: %r", verified)
if not args.prepare:
return
verified.prepare()
available = verified.available()
logger.debug("Prepared plugins: %s", available)
def read_file(filename, mode="rb"):
"""Returns the given file's contents.
:param str filename: Filename
:param str mode: open mode (see `open`)
:returns: A tuple of filename and its contents
:rtype: tuple
:raises argparse.ArgumentTypeError: File does not exist or is not readable.
"""
try:
return filename, open(filename, mode).read()
except IOError as exc:
raise argparse.ArgumentTypeError(exc.strerror)
def flag_default(name):
"""Default value for CLI flag."""
return constants.CLI_DEFAULTS[name]
def config_help(name, hidden=False):
"""Help message for `.IConfig` attribute."""
if hidden:
return argparse.SUPPRESS
else:
return interfaces.IConfig[name].__doc__
class SilentParser(object): # pylint: disable=too-few-public-methods
"""Silent wrapper around argparse.
A mini parser wrapper that doesn't print help for its
arguments. This is needed for the use of callbacks to define
arguments within plugins.
"""
def __init__(self, parser):
self.parser = parser
def add_argument(self, *args, **kwargs):
"""Wrap, but silence help"""
kwargs["help"] = argparse.SUPPRESS
self.parser.add_argument(*args, **kwargs)
class HelpfulArgumentParser(object):
"""Argparse Wrapper.
This class wraps argparse, adding the ability to make --help less
verbose, and request help on specific subcategories at a time, eg
'letsencrypt --help security' for security options.
"""
# Maps verbs/subcommands to the functions that implement them
VERBS = {"auth": obtaincert, "certonly": obtaincert,
"config_changes": config_changes, "everything": run,
"install": install, "plugins": plugins_cmd,
"revoke": revoke, "rollback": rollback, "run": run}
# List of topics for which additional help can be provided
HELP_TOPICS = ["all", "security",
"paths", "automation", "testing"] + VERBS.keys()
def __init__(self, args, plugins):
plugin_names = [name for name, _p in plugins.iteritems()]
self.help_topics = self.HELP_TOPICS + plugin_names + [None]
usage, short_usage = usage_strings(plugins)
self.parser = configargparse.ArgParser(
usage=short_usage,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
args_for_setting_config_path=["-c", "--config"],
default_config_files=flag_default("config_files"))
# This is the only way to turn off overly verbose config flag documentation
self.parser._add_config_file_help = False # pylint: disable=protected-access
self.silent_parser = SilentParser(self.parser)
self.args = args
self.determine_verb()
help1 = self.prescan_for_flag("-h", self.help_topics)
help2 = self.prescan_for_flag("--help", self.help_topics)
assert max(True, "a") == "a", "Gravity changed direction"
self.help_arg = max(help1, help2)
if self.help_arg is True:
# just --help with no topic; avoid argparse altogether
print usage
sys.exit(0)
self.visible_topics = self.determine_help_topics(self.help_arg)
#print self.visible_topics
self.groups = {} # elements are added by .add_group()
def parse_args(self):
"""Parses command line arguments and returns the result.
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
parsed_args = self.parser.parse_args(self.args)
parsed_args.func = self.VERBS[self.verb]
return parsed_args
def determine_verb(self):
"""Determines the verb/subcommand provided by the user.
This function works around some of the limitations of argparse.
"""
if "-h" in self.args or "--help" in self.args:
# all verbs double as help arguments; don't get them confused
self.verb = "help"
return
for i, token in enumerate(self.args):
if token in self.VERBS:
verb = token
if verb == "auth":
verb = "certonly"
if verb == "everything":
verb = "run"
self.verb = verb
self.args.pop(i)
return
self.verb = "run"
def prescan_for_flag(self, flag, possible_arguments):
"""Checks cli input for flags.
Check for a flag, which accepts a fixed set of possible arguments, in
the command line; we will use this information to configure argparse's
help correctly. Return the flag's argument, if it has one that matches
the sequence @possible_arguments; otherwise return whether the flag is
present.
"""
if flag not in self.args:
return False
pos = self.args.index(flag)
try:
nxt = self.args[pos + 1]
if nxt in possible_arguments:
return nxt
except IndexError:
pass
return True
def add(self, topic, *args, **kwargs):
"""Add a new command line argument.
@topic is required, to indicate which part of the help will document
it, but can be None for `always documented'.
"""
if self.visible_topics[topic]:
if topic in self.groups:
group = self.groups[topic]
group.add_argument(*args, **kwargs)
else:
self.parser.add_argument(*args, **kwargs)
else:
kwargs["help"] = argparse.SUPPRESS
self.parser.add_argument(*args, **kwargs)
def add_group(self, topic, **kwargs):
"""
This has to be called once for every topic; but we leave those calls
next to the argument definitions for clarity. Return something
arguments can be added to if necessary, either the parser or an argument
group.
"""
if self.visible_topics[topic]:
#print "Adding visible group " + topic
group = self.parser.add_argument_group(topic, **kwargs)
self.groups[topic] = group
return group
else:
#print "Invisible group " + topic
return self.silent_parser
def add_plugin_args(self, plugins):
"""
Let each of the plugins add its own command line arguments, which
may or may not be displayed as help topics.
"""
for name, plugin_ep in plugins.iteritems():
parser_or_group = self.add_group(name, description=plugin_ep.description)
#print parser_or_group
plugin_ep.plugin_cls.inject_parser_options(parser_or_group, name)
def determine_help_topics(self, chosen_topic):
"""
The user may have requested help on a topic, return a dict of which
topics to display. @chosen_topic has prescan_for_flag's return type
:returns: dict
"""
# topics maps each topic to whether it should be documented by
# argparse on the command line
if chosen_topic == "auth":
chosen_topic = "certonly"
if chosen_topic == "everything":
chosen_topic = "run"
if chosen_topic == "all":
return dict([(t, True) for t in self.help_topics])
elif not chosen_topic:
return dict([(t, False) for t in self.help_topics])
else:
return dict([(t, t == chosen_topic) for t in self.help_topics])
def prepare_and_parse_args(plugins, args):
"""Returns parsed command line arguments.
:param .PluginsRegistry plugins: available plugins
:param list args: command line arguments with the program name removed
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
helpful = HelpfulArgumentParser(args, plugins)
# --help is automatically provided by argparse
helpful.add(
None, "-v", "--verbose", dest="verbose_count", action="count",
default=flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
helpful.add(
None, "-t", "--text", dest="text_mode", action="store_true",
help="Use the text output instead of the curses UI.")
helpful.add(None, "-m", "--email", help=config_help("email"))
# positional arg shadows --domains, instead of appending, and
# --domains is useful, because it can be stored in config
#for subparser in parser_run, parser_auth, parser_install:
# subparser.add_argument("domains", nargs="*", metavar="domain")
helpful.add(None, "-d", "--domains", metavar="DOMAIN", action="append")
helpful.add(
None, "--duplicate", dest="duplicate", action="store_true",
help="Allow getting a certificate that duplicates an existing one")
helpful.add_group(
"automation",
description="Arguments for automating execution & other tweaks")
helpful.add(
"automation", "--version", action="version",
version="%(prog)s {0}".format(letsencrypt.__version__),
help="show program's version number and exit")
helpful.add(
"automation", "--renew-by-default", action="store_true",
help="Select renewal by default when domains are a superset of a "
"a previously attained cert")
helpful.add(
"automation", "--agree-dev-preview", action="store_true",
help="Agree to the Let's Encrypt Developer Preview Disclaimer")
helpful.add(
"automation", "--agree-tos", dest="tos", action="store_true",
help="Agree to the Let's Encrypt Subscriber Agreement")
helpful.add(
"automation", "--account", metavar="ACCOUNT_ID",
help="Account ID to use")
helpful.add_group(
"testing", description="The following flags are meant for "
"testing purposes only! Do NOT change them, unless you "
"really know what you're doing!")
helpful.add(
"testing", "--debug", action="store_true",
help="Show tracebacks if the program exits abnormally")
helpful.add(
"testing", "--no-verify-ssl", action="store_true",
help=config_help("no_verify_ssl"),
default=flag_default("no_verify_ssl"))
helpful.add(
"testing", "--dvsni-port", type=int, default=flag_default("dvsni_port"),
help=config_help("dvsni_port"))
helpful.add("testing", "--http-01-port", dest="http01_port", type=int,
help=config_help("http01_port"))
helpful.add_group(
"security", description="Security parameters & server settings")
helpful.add(
"security", "--rsa-key-size", type=int, metavar="N",
default=flag_default("rsa_key_size"), help=config_help("rsa_key_size"))
helpful.add(
"security", "--redirect", action="store_true",
help="Automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost.", dest="redirect", default=None)
helpful.add(
"security", "--no-redirect", action="store_false",
help="Do not automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost.", dest="redirect", default=None)
helpful.add(
"security", "--strict-permissions", action="store_true",
help="Require that all configuration files are owned by the current "
"user; only needed if your config is somewhere unsafe like /tmp/")
_create_subparsers(helpful)
_paths_parser(helpful)
# _plugins_parsing should be the last thing to act upon the main
# parser (--help should display plugin-specific options last)
_plugins_parsing(helpful, plugins)
return helpful.parse_args()
def _create_subparsers(helpful):
helpful.add_group("certonly", description="Options for modifying how a cert is obtained")
helpful.add_group("install", description="Options for modifying how a cert is deployed")
helpful.add_group("revoke", description="Options for revocation of certs")
helpful.add_group("rollback", description="Options for reverting config changes")
helpful.add_group("plugins", description="Plugin options")
helpful.add("certonly",
"--csr", type=read_file,
help="Path to a Certificate Signing Request (CSR) in DER"
" format; note that the .csr file *must* contain a Subject"
" Alternative Name field for each domain you want certified.")
helpful.add("rollback",
"--checkpoints", type=int, metavar="N",
default=flag_default("rollback_checkpoints"),
help="Revert configuration N number of checkpoints.")
helpful.add("plugins",
"--init", action="store_true", help="Initialize plugins.")
helpful.add("plugins",
"--prepare", action="store_true", help="Initialize and prepare plugins.")
helpful.add("plugins",
"--authenticators", action="append_const", dest="ifaces",
const=interfaces.IAuthenticator, help="Limit to authenticator plugins only.")
helpful.add("plugins",
"--installers", action="append_const", dest="ifaces",
const=interfaces.IInstaller, help="Limit to installer plugins only.")
def _paths_parser(helpful):
add = helpful.add
verb = helpful.verb
if verb == "help":
verb = helpful.help_arg
helpful.add_group(
"paths", description="Arguments changing execution paths & servers")
cph = "Path to where cert is saved (with auth --csr), installed from or revoked."
section = "paths"
if verb in ("install", "revoke", "certonly"):
section = verb
if verb == "certonly":
add(section, "--cert-path", default=flag_default("auth_cert_path"), help=cph)
elif verb == "revoke":
add(section, "--cert-path", type=read_file, required=True, help=cph)
else:
add(section, "--cert-path", help=cph, required=(verb == "install"))
section = "paths"
if verb in ("install", "revoke"):
section = verb
# revoke --key-path reads a file, install --key-path takes a string
add(section, "--key-path", type=((verb == "revoke" and read_file) or str),
required=(verb == "install"),
help="Path to private key for cert creation or revocation (if account key is missing)")
default_cp = None
if verb == "certonly":
default_cp = flag_default("auth_chain_path")
add("paths", "--fullchain-path", default=default_cp,
help="Accompanying path to a full certificate chain (cert plus chain).")
add("paths", "--chain-path", default=default_cp,
help="Accompanying path to a certificate chain.")
add("paths", "--config-dir", default=flag_default("config_dir"),
help=config_help("config_dir"))
add("paths", "--work-dir", default=flag_default("work_dir"),
help=config_help("work_dir"))
add("paths", "--logs-dir", default=flag_default("logs_dir"),
help="Logs directory.")
add("paths", "--server", default=flag_default("server"),
help=config_help("server"))
def _plugins_parsing(helpful, plugins):
helpful.add_group(
"plugins", description="Let's Encrypt client supports an "
"extensible plugins architecture. See '%(prog)s plugins' for a "
"list of all available plugins and their names. You can force "
"a particular plugin by setting options provided below. Further "
"down this help message you will find plugin-specific options "
"(prefixed by --{plugin_name}).")
helpful.add(
"plugins", "-a", "--authenticator", help="Authenticator plugin name.")
helpful.add(
"plugins", "-i", "--installer", help="Installer plugin name (also used to find domains).")
helpful.add(
"plugins", "--configurator", help="Name of the plugin that is "
"both an authenticator and an installer. Should not be used "
"together with --authenticator or --installer.")
helpful.add("plugins", "--apache", action="store_true",
help="Obtain and install certs using Apache")
helpful.add("plugins", "--nginx", action="store_true",
help="Obtain and install certs using Nginx")
helpful.add("plugins", "--standalone", action="store_true",
help='Obtain certs using a "standalone" webserver.')
# things should not be reorder past/pre this comment:
# plugins_group should be displayed in --help before plugin
# specific groups (so that plugins_group.description makes sense)
helpful.add_plugin_args(plugins)
def setup_log_file_handler(args, logfile, fmt):
"""Setup file debug logging."""
log_file_path = os.path.join(args.logs_dir, logfile)
handler = logging.handlers.RotatingFileHandler(
log_file_path, maxBytes=2 ** 20, backupCount=10)
# rotate on each invocation, rollover only possible when maxBytes
# is nonzero and backupCount is nonzero, so we set maxBytes as big
# as possible not to overrun in single CLI invocation (1MB).
handler.doRollover() # TODO: creates empty letsencrypt.log.1 file
handler.setLevel(logging.DEBUG)
handler_formatter = logging.Formatter(fmt=fmt)
handler_formatter.converter = time.gmtime # don't use localtime
handler.setFormatter(handler_formatter)
return handler, log_file_path
def _cli_log_handler(args, level, fmt):
if args.text_mode:
handler = colored_logging.StreamHandler()
handler.setFormatter(logging.Formatter(fmt))
else:
handler = log.DialogHandler()
# dialog box is small, display as less as possible
handler.setFormatter(logging.Formatter("%(message)s"))
handler.setLevel(level)
return handler
def setup_logging(args, cli_handler_factory, logfile):
"""Setup logging."""
fmt = "%(asctime)s:%(levelname)s:%(name)s:%(message)s"
level = -args.verbose_count * 10
file_handler, log_file_path = setup_log_file_handler(
args, logfile=logfile, fmt=fmt)
cli_handler = cli_handler_factory(args, level, fmt)
# TODO: use fileConfig?
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG) # send all records to handlers
root_logger.addHandler(cli_handler)
root_logger.addHandler(file_handler)
logger.debug("Root logging level set at %d", level)
logger.info("Saving debug log to %s", log_file_path)
def _handle_exception(exc_type, exc_value, trace, args):
"""Logs exceptions and reports them to the user.
Args is used to determine how to display exceptions to the user. In
general, if args.debug is True, then the full exception and traceback is
shown to the user, otherwise it is suppressed. If args itself is None,
then the traceback and exception is attempted to be written to a logfile.
If this is successful, the traceback is suppressed, otherwise it is shown
to the user. sys.exit is always called with a nonzero status.
"""
logger.debug(
"Exiting abnormally:%s%s",
os.linesep,
"".join(traceback.format_exception(exc_type, exc_value, trace)))
if issubclass(exc_type, Exception) and (args is None or not args.debug):
if args is None:
logfile = "letsencrypt.log"
try:
with open(logfile, "w") as logfd:
traceback.print_exception(
exc_type, exc_value, trace, file=logfd)
except: # pylint: disable=bare-except
sys.exit("".join(
traceback.format_exception(exc_type, exc_value, trace)))
if issubclass(exc_type, Error):
sys.exit(exc_value)
else:
# Tell the user a bit about what happened, without overwhelming
# them with a full traceback
msg = ("An unexpected error occurred.\n" +
traceback.format_exception_only(exc_type, exc_value)[0] +
"Please see the ")
if args is None:
msg += "logfile '{0}' for more details.".format(logfile)
else:
msg += "logfiles in {0} for more details.".format(args.logs_dir)
sys.exit(msg)
else:
sys.exit("".join(
traceback.format_exception(exc_type, exc_value, trace)))
def main(cli_args=sys.argv[1:]):
"""Command line argument parsing and main script execution."""
sys.excepthook = functools.partial(_handle_exception, args=None)
# note: arg parser internally handles --help (and exits afterwards)
plugins = plugins_disco.PluginsRegistry.find_all()
args = prepare_and_parse_args(plugins, cli_args)
config = configuration.NamespaceConfig(args)
zope.component.provideUtility(config)
# Setup logging ASAP, otherwise "No handlers could be found for
# logger ..." TODO: this should be done before plugins discovery
for directory in config.config_dir, config.work_dir:
le_util.make_or_verify_dir(
directory, constants.CONFIG_DIRS_MODE, os.geteuid(),
"--strict-permissions" in cli_args)
# TODO: logs might contain sensitive data such as contents of the
# private key! #525
le_util.make_or_verify_dir(
args.logs_dir, 0o700, os.geteuid(), "--strict-permissions" in cli_args)
setup_logging(args, _cli_log_handler, logfile='letsencrypt.log')
logger.debug("letsencrypt version: %s", letsencrypt.__version__)
# do not log `args`, as it contains sensitive data (e.g. revoke --key)!
logger.debug("Arguments: %r", cli_args)
logger.debug("Discovered plugins: %r", plugins)
sys.excepthook = functools.partial(_handle_exception, args=args)
# Displayer
if args.text_mode:
displayer = display_util.FileDisplay(sys.stdout)
else:
displayer = display_util.NcursesDisplay()
zope.component.provideUtility(displayer)
# Reporter
report = reporter.Reporter()
zope.component.provideUtility(report)
atexit.register(report.atexit_print_messages)
# TODO: remove developer preview prompt for the launch
if not config.agree_dev_preview:
disclaimer = pkg_resources.resource_string("letsencrypt", "DISCLAIMER")
if not zope.component.getUtility(interfaces.IDisplay).yesno(
disclaimer, "Agree", "Cancel"):
raise Error("Must agree to TOS")
if not os.geteuid() == 0:
logger.warning(
"Root (sudo) is required to run most of letsencrypt functionality.")
# check must be done after arg parsing as --help should work
# w/o root; on the other hand, e.g. "letsencrypt run
# --authenticator dns" or "letsencrypt plugins" does not
# require root as well
#return (
# "{0}Root is required to run letsencrypt. Please use sudo.{0}"
# .format(os.linesep))
return args.func(args, config, plugins)
if __name__ == "__main__":
err_string = main()
if err_string:
logger.warn("Exiting with message %s", err_string)
sys.exit(err_string) # pragma: no cover
|
luotao1/Paddle | refs/heads/develop | python/paddle/amp/grad_scaler.py | 1 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.dygraph.amp import AmpScaler
__all__ = ['GradScaler']
class GradScaler(AmpScaler):
"""
GradScaler is used for Auto-Mixed-Precision training in dynamic graph mode.
It controls the scaling of loss, helps avoiding numerical overflow.
The object of this class has two methods `scale()`, `minimize()`.
`scale()` is used to multiply the loss by a scale ratio.
`minimize()` is similar as `optimizer.minimize()`, performs parameters updating.
Commonly, it is used together with `paddle.amp.auto_cast` to achieve Auto-Mixed-Precision in
dynamic graph mode.
Args:
enable(bool, optional): Enable loss scaling or not. Default is True.
init_loss_scaling (float, optional): The initial loss scaling factor. Default is 2**15.
incr_ratio(float, optional): The multiplier to use when increasing the loss
scaling. Default is 2.0.
decr_ratio(float, optional): The less-than-one-multiplier to use when decreasing
the loss scaling. Default is 0.5.
incr_every_n_steps(int, optional): Increases loss scaling every n consecutive
steps with finite gradients. Default is 1000.
decr_every_n_nan_or_inf(int, optional): Decreases loss scaling every n
accumulated steps with nan or inf gradients. Default is 2.
use_dynamic_loss_scaling(bool, optional): Whether to use dynamic loss scaling. If False, fixed loss_scaling is used. If True, the loss scaling is updated dynamicly. Default is True.
Returns:
An GradScaler object.
Examples:
.. code-block:: python
import paddle
model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast():
conv = model(data)
loss = paddle.mean(conv)
scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters
"""
def __init__(self,
enable=True,
init_loss_scaling=2.**15,
incr_ratio=2.0,
decr_ratio=0.5,
incr_every_n_steps=1000,
decr_every_n_nan_or_inf=2,
use_dynamic_loss_scaling=True):
super(GradScaler, self).__init__(enable, init_loss_scaling, incr_ratio,
decr_ratio, incr_every_n_steps,
decr_every_n_nan_or_inf,
use_dynamic_loss_scaling)
def scale(self, var):
"""
Multiplies a Tensor by the scale factor and returns scaled outputs.
If this instance of :class:`GradScaler` is not enabled, output are returned unmodified.
Args:
var (Tensor): The tensor to scale.
Returns:
The scaled tensor or original tensor.
Examples:
.. code-block:: python
import paddle
model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast():
conv = model(data)
loss = paddle.mean(conv)
scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters
"""
return super(GradScaler, self).scale(var)
def minimize(self, optimizer, *args, **kwargs):
"""
This function is similar as `optimizer.minimize()`, which performs parameters updating.
If the scaled gradients of parameters contains NAN or INF, the parameters updating is skipped.
Otherwise, it first unscales the scaled gradients of parameters, then updates the parameters.
Finally, the loss scaling ratio is updated.
Args:
optimizer(Optimizer): The optimizer used to update parameters.
args: Arguments, which will be forward to `optimizer.minimize()`.
kwargs: Keyword arguments, which will be forward to `optimizer.minimize()`.
Examples:
.. code-block:: python
import paddle
model = paddle.nn.Conv2D(3, 2, 3, bias_attr=True)
optimizer = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
data = paddle.rand([10, 3, 32, 32])
with paddle.amp.auto_cast():
conv = model(data)
loss = paddle.mean(conv)
scaled = scaler.scale(loss) # scale the loss
scaled.backward() # do backward
scaler.minimize(optimizer, scaled) # update parameters
"""
return super(GradScaler, self).minimize(optimizer, *args, **kwargs)
|
da1z/intellij-community | refs/heads/master | python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_0/_pkg1_0_0/_pkg1_0_0_1/_pkg1_0_0_1_0/_mod1_0_0_1_0_4.py | 30 | name1_0_0_1_0_4_0 = None
name1_0_0_1_0_4_1 = None
name1_0_0_1_0_4_2 = None
name1_0_0_1_0_4_3 = None
name1_0_0_1_0_4_4 = None |
thebigmunch/gmusicapi | refs/heads/develop | gmusicapi/_version.py | 1 | # -*- coding: utf-8 -*-
__version__ = u"11.0.4-rc.1"
|
procangroup/edx-platform | refs/heads/master | cms/djangoapps/contentstore/debug_file_uploader.py | 25 | import time
from django.core.files.uploadhandler import FileUploadHandler
class DebugFileUploader(FileUploadHandler):
def __init__(self, request=None):
super(DebugFileUploader, self).__init__(request)
self.count = 0
def receive_data_chunk(self, raw_data, start):
time.sleep(1)
self.count = self.count + len(raw_data)
fail_at = None
if 'fail_at' in self.request.GET:
fail_at = int(self.request.GET.get('fail_at'))
if fail_at and self.count > fail_at:
raise Exception('Triggered fail')
return raw_data
def file_complete(self, file_size):
return None
|
jacksonwilliams/arsenalsuite | refs/heads/master | cpp/lib/PyQt4/examples/designer/plugins/python/polygonwidgetplugin.py | 20 | #!/usr/bin/env python
"""
polygonwidgetplugin.py
A polygon widget custom widget plugin for Qt Designer.
Copyright (C) 2006 David Boddie <david@boddie.org.uk>
Copyright (C) 2005-2006 Trolltech ASA. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from PyQt4 import QtGui, QtDesigner
from polygonwidget import PolygonWidget
class PolygonWidgetPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin):
"""PolygonWidgetPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin)
Provides a Python custom plugin for Qt Designer by implementing the
QDesignerCustomWidgetPlugin via a PyQt-specific custom plugin class.
"""
# The __init__() method is only used to set up the plugin and define its
# initialized variable.
def __init__(self, parent=None):
super(PolygonWidgetPlugin, self).__init__(parent)
self.initialized = False
# The initialize() and isInitialized() methods allow the plugin to set up
# any required resources, ensuring that this can only happen once for each
# plugin.
def initialize(self, core):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
# This factory method creates new instances of our custom widget with the
# appropriate parent.
def createWidget(self, parent):
return PolygonWidget(parent)
# This method returns the name of the custom widget class that is provided
# by this plugin.
def name(self):
return "PolygonWidget"
# Returns the name of the group in Qt Designer's widget box that this
# widget belongs to.
def group(self):
return "PyQt Examples"
# Returns the icon used to represent the custom widget in Qt Designer's
# widget box.
def icon(self):
return QtGui.QIcon(_logo_pixmap)
# Returns a short description of the custom widget for use in a tool tip.
def toolTip(self):
return ""
# Returns a short description of the custom widget for use in a "What's
# This?" help message for the widget.
def whatsThis(self):
return ""
# Returns True if the custom widget acts as a container for other widgets;
# otherwise returns False. Note that plugins for custom containers also
# need to provide an implementation of the QDesignerContainerExtension
# interface if they need to add custom editing support to Qt Designer.
def isContainer(self):
return False
# Returns an XML description of a custom widget instance that describes
# default values for its properties. Each custom widget created by this
# plugin will be configured using this description.
def domXml(self):
return '<widget class="PolygonWidget" name="polygonWidget" />\n'
# Returns the module containing the custom widget class. It may include
# a module path.
def includeFile(self):
return "polygonwidget"
# Define the image used for the icon.
_logo_16x16_xpm = [
"16 16 46 1",
". c #a5a5dc",
"l c #a69fd6",
"k c #a7a5da",
"h c #a7a6dc",
"a c #a7a7de",
"Q c #a8a5da",
"s c #a9a7d7",
"R c #a9a9e0",
"z c #abaad4",
"E c #afafda",
"M c #afafdb",
"K c #b0a8e2",
"o c #b1afe4",
"p c #b2b2d7",
"# c #b2b2ed",
"i c #b39eb6",
"F c #b3b3e1",
"e c #b4b4ef",
"t c #b58bab",
"d c #b6b6f2",
"n c #b798b8",
"P c #b798b9",
"c c #b8b6f2",
"D c #b8b89c",
"m c #b9648d",
"J c #ba84b0",
"A c #bdbdfb",
"f c #bfbffe",
"g c #c06996",
"b c #c0c0ff",
"B c #cbb889",
"L c #cbb989",
"O c #cfcf87",
"I c #d09585",
"w c #d0cf86",
"x c #dede81",
"G c #e8e87c",
"q c #edde7b",
"N c #f1e07b",
"v c #f2e07b",
"H c #f6e57c",
"j c #fb917e",
"u c #ffb580",
"r c #ffda80",
"C c #fffe80",
"y c #ffff80",
".##############a",
"#bbbbbbbbcdbbbbe",
"#bbbbbbbfghbbbbe",
"#bbbbbbbijkbbbbe",
"#blmnobpqrsbbbbe",
"#bbtuvwxyyzbbbbe",
"#bbABCyyyyDEfbbe",
"#bbbFGyyyyyHIJKe",
"#bbbFGyyyyyHIJKe",
"#bbALCyyyyDMfbbe",
"#bbtuNOxyyzbbbbe",
"#blmPobpqrsbbbbe",
"#bbbbbbbijQbbbbe",
"#bbbbbbbfghbbbbe",
"#bbbbbbbbcdbbbbe",
"aeeeeeeeeeeeeeeR"]
_logo_pixmap = QtGui.QPixmap(_logo_16x16_xpm)
|
hack4impact/maps4all-jlc-sp2 | refs/heads/master | app/descriptor/__init__.py | 4 | from flask import Blueprint
descriptor = Blueprint('descriptor', __name__)
from . import views # noqa
|
noslenfa/tdjangorest | refs/heads/master | uw/lib/python2.7/site-packages/django/contrib/auth/forms.py | 59 | from __future__ import unicode_literals
from django import forms
from django.forms.util import flatatt
from django.template import loader
from django.utils.datastructures import SortedDict
from django.utils.html import format_html, format_html_join
from django.utils.http import int_to_base36
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth.models import User
from django.contrib.auth.hashers import UNUSABLE_PASSWORD, identify_hasher
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
UNMASKED_DIGITS_TO_SHOW = 6
mask_password = lambda p: "%s%s" % (p[:UNMASKED_DIGITS_TO_SHOW], "*" * max(len(p) - UNMASKED_DIGITS_TO_SHOW, 0))
class ReadOnlyPasswordHashWidget(forms.Widget):
def render(self, name, value, attrs):
encoded = value
final_attrs = self.build_attrs(attrs)
if not encoded or encoded == UNUSABLE_PASSWORD:
summary = mark_safe("<strong>%s</strong>" % ugettext("No password set."))
else:
try:
hasher = identify_hasher(encoded)
except ValueError:
summary = mark_safe("<strong>%s</strong>" % ugettext(
"Invalid password format or unknown hashing algorithm."))
else:
summary = format_html_join('',
"<strong>{0}</strong>: {1} ",
((ugettext(key), value)
for key, value in hasher.safe_summary(encoded).items())
)
return format_html("<div{0}>{1}</div>", flatatt(final_attrs), summary)
class ReadOnlyPasswordHashField(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super(ReadOnlyPasswordHashField, self).__init__(*args, **kwargs)
def bound_data(self, data, initial):
# Always return initial because the widget doesn't
# render an input field.
return initial
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(
label=_("Username"), max_length=30, regex=r"^[\w.@+-]+$",
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
class Meta:
model = User
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(max_length=254)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'no_cookies': _("Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if not self.fields['username'].label:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'] % {
'username': self.username_field.verbose_name
})
elif not self.user_cache.is_active:
raise forms.ValidationError(self.error_messages['inactive'])
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
error_messages = {
'unknown': _("That email address doesn't have an associated "
"user account. Are you sure you've registered?"),
'unusable': _("The user account associated with this email "
"address cannot reset the password."),
}
email = forms.EmailField(label=_("Email"), max_length=254)
def clean_email(self):
"""
Validates that an active user exists with the given email address.
"""
UserModel = get_user_model()
email = self.cleaned_data["email"]
self.users_cache = UserModel._default_manager.filter(email__iexact=email)
if not len(self.users_cache):
raise forms.ValidationError(self.error_messages['unknown'])
if not any(user.is_active for user in self.users_cache):
# none of the filtered users are active
raise forms.ValidationError(self.error_messages['unknown'])
if any((user.password == UNUSABLE_PASSWORD)
for user in self.users_cache):
raise forms.ValidationError(self.error_messages['unusable'])
return email
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
from django.core.mail import send_mail
for user in self.users_cache:
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': int_to_base36(user.pk),
'user': user,
'token': token_generator.make_token(user),
'protocol': use_https and 'https' or 'http',
}
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
send_mail(subject, email, from_email, [user.email])
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set his/her password without entering the
old password
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
new_password1 = forms.CharField(label=_("New password"),
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change his/her password by entering
their old password.
"""
error_messages = dict(SetPasswordForm.error_messages, **{
'password_incorrect': _("Your old password was entered incorrectly. "
"Please enter it again."),
})
old_password = forms.CharField(label=_("Old password"),
widget=forms.PasswordInput)
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'])
return old_password
PasswordChangeForm.base_fields = SortedDict([
(k, PasswordChangeForm.base_fields[k])
for k in ['old_password', 'new_password1', 'new_password2']
])
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password (again)"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
self.user.set_password(self.cleaned_data["password1"])
if commit:
self.user.save()
return self.user
|
ryanbhayward/games-puzzles-algorithms | refs/heads/master | old/lib/games_puzzles_algorithms/games/fake_game_state.py | 1 | class FakeGameState(object):
def __init__(self):
self._player_to_act = 0
self._actions = []
def __str__(self):
return "FakeGameState:\n PTA: {},\n actions taken: {}".format(
self._player_to_act,
self._actions)
def num_legal_actions(self):
return len(list(self.legal_actions()))
def legal_actions(self):
if self.is_terminal():
return []
else:
for a in range(2): yield a
def play(self, action):
'''Apply the given action.
`action` must be in the set of legal actions
(see `legal_actions`).
Return `self`.
'''
self._actions.append(action)
self.set_player_to_act(int(not self._player_to_act))
return self
def __enter__(self):
'''Allows the following type of code:
```
with state.play(action):
# Do something with `state` after `action`
# has been applied to `state`.
# `action` has automatically be undone.
'''
pass
def __exit__(self,
exception_type,
exception_val,
exception_traceback):
'''Allows the following type of code:
```
with state.play(action):
# Do something with `state` after `action`
# has been applied to `state`.
# `action` has automatically be undone.
'''
self.undo()
def undo(self):
'''Reverse the effect of the last action that was played'''
if len(self._actions) > 0:
self._actions.pop()
self.set_player_to_act(int(not self._player_to_act))
def last_action(self):
if len(self._actions) > 0:
return self._actions[-1]
def set_player_to_act(self, player):
self._player_to_act = player
def player_to_act(self):
return self._player_to_act
def is_terminal(self):
return len(self._actions) >= 3
def score(self, player):
'''An example arbitrary score function'''
if self.is_terminal():
if self._actions[0] == 0:
if self._actions[1] == 0:
return [2, -2][player]
else:
return [-3, 3][player]
else:
if self._actions[1] == 0:
return [-3, 3][player]
else:
return [2, -2][player]
def player_who_acted_last(self):
return int(not self._player_to_act)
|
MediaBrowser/Emby.Kodi | refs/heads/master | libraries/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py | 2057 | try:
# Python 3.2+
from ssl import CertificateError, match_hostname
except ImportError:
try:
# Backport of the function from a pypi module
from backports.ssl_match_hostname import CertificateError, match_hostname
except ImportError:
# Our vendored copy
from ._implementation import CertificateError, match_hostname
# Not needed, but documenting what we provide.
__all__ = ('CertificateError', 'match_hostname')
|
dojojon/pygame | refs/heads/master | week5/highscore.py | 1 |
def get_high_score():
# Default high score
high_score = 0
# Try to read the high score from a file
try:
high_score_file = open("high_score.txt", "r")
high_score = int(high_score_file.read())
high_score_file.close()
print("The high score is", high_score)
except IOError:
# Error reading file, no high score
print("There is no high score yet.")
except ValueError:
# There's a file there, but we don't understand the number.
print("I'm confused. Starting with no high score.")
return high_score
def save_high_score(new_high_score):
try:
# Write the file to disk
high_score_file = open("high_score.txt", "w")
high_score_file.write(str(new_high_score))
high_score_file.close()
except IOError:
# Hm, can't write it.
print("Unable to save the high score.")
def is_high_score_and_save(score):
# Get the current high score
high_score = get_high_score()
# set variable to true if high score
is_high_score = score > high_score
# Only save score if its a high score
if is_high_score:
save_high_score(score)
# return the result
return is_high_score
def main():
""" Main program is here. """
# Get the score from the current game
current_score = 0
try:
# Ask the user for his/her score
current_score = int(input("What is your score? "))
except ValueError:
# Error, can't turn what they typed into a number
print("I don't understand what you typed.")
# See if we have a new high score
if is_high_score_and_save(current_score):
# We do! Save to disk
print("Yea! New high score!")
# save_high_score(current_score)
else:
print("Better luck next time.")
# Call the main function, start up the game
if __name__ == "__main__":
main()
|
CMPUT410W15T02/CMPUT410W15-project | refs/heads/master | testenv/lib/python2.7/site-packages/django/contrib/messages/context_processors.py | 705 | from django.contrib.messages.api import get_messages
from django.contrib.messages.constants import DEFAULT_LEVELS
def messages(request):
"""
Returns a lazy 'messages' context variable.
"""
return {
'messages': get_messages(request),
'DEFAULT_MESSAGE_LEVELS': DEFAULT_LEVELS,
}
|
jaruba/chromium.src | refs/heads/nw12 | chrome/common/extensions/docs/server2/api_categorizer_test.py | 87 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from api_categorizer import APICategorizer
from compiled_file_system import CompiledFileSystem
from extensions_paths import CHROME_EXTENSIONS
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
def _ToTestData(obj):
'''Transforms |obj| into test data by turning a list of files into an object
mapping that file to its contents (derived from its name).
'''
return dict((name, name) for name in obj)
_TEST_DATA = {
'api': {
'_api_features.json': '{}',
'_manifest_features.json': '{}',
'_permission_features.json': '{}',
},
'docs': {
'templates': {
'json': {
'api_availabilities.json': '{}',
'manifest.json': '{}',
'permissions.json': '{}',
},
'public': {
'apps': _ToTestData([
'alarms.html',
'app_window.html',
'experimental_bluetooth.html',
'experimental_power.html',
'storage.html',
'sockets_udp.html'
]),
'extensions': _ToTestData([
'alarms.html',
'browserAction.html',
'experimental_history.html',
'experimental_power.html',
'infobars.html',
'storage.html',
'sockets_udp.html'
]),
},
},
}
}
class APICategorizerTest(unittest.TestCase):
def setUp(self):
self._test_file_system = TestFileSystem(
_TEST_DATA, relative_to=CHROME_EXTENSIONS)
self._compiled_file_system = CompiledFileSystem.Factory(
ObjectStoreCreator.ForTest())
def testGetAPICategory(self):
def assertGetEqual(expected, category, only_on=None):
for platform in ('apps', 'extensions'):
get_category = APICategorizer(
self._test_file_system,
self._compiled_file_system,
platform if only_on is None else only_on).GetCategory(category)
self.assertEqual(expected, get_category)
assertGetEqual('chrome', 'alarms')
assertGetEqual('private', 'musicManagerPrivate')
assertGetEqual('private', 'notDocumentedApi')
assertGetEqual('experimental', 'experimental.bluetooth', only_on='apps')
assertGetEqual('experimental', 'experimental.history', only_on='extensions')
if __name__ == '__main__':
unittest.main()
|
daajoe/doorbell | refs/heads/master | pjproject/tests/pjsua/scripts-pesq/200_codec_l16_16000_stereo.py | 59 | # $Id: 200_codec_l16_16000_stereo.py 2075 2008-06-27 16:18:13Z nanang $
#
from inc_cfg import *
ADD_PARAM = ""
if (HAS_SND_DEV == 0):
ADD_PARAM += "--null-audio"
# Call with L16/16000/2 codec
test_param = TestParam(
"PESQ defaults pjsua settings",
[
InstanceParam("UA1", ADD_PARAM + " --stereo --max-calls=1 --clock-rate 16000 --add-codec L16/16000/2 --play-file wavs/input.2.16.wav"),
InstanceParam("UA2", "--null-audio --stereo --max-calls=1 --clock-rate 16000 --add-codec L16/16000/2 --rec-file wavs/tmp.2.16.wav --auto-answer 200")
]
)
pesq_threshold = None
|
gurneyalex/OpenUpgrade | refs/heads/master | addons/hr_evaluation/report/hr_evaluation_report.py | 44 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class hr_evaluation_report(osv.Model):
_name = "hr.evaluation.report"
_description = "Evaluations Statistics"
_auto = False
_columns = {
'create_date': fields.date('Create Date', readonly=True),
'delay_date': fields.float('Delay to Start', digits=(16, 2), readonly=True),
'overpass_delay': fields.float('Overpassed Deadline', digits=(16, 2), readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'deadline': fields.date("Deadline", readonly=True),
'request_id': fields.many2one('survey.user_input', 'Request_id', readonly=True),
'closed': fields.date("closed", readonly=True),
'year': fields.char('Year', size=4, readonly=True),
'month': fields.selection([('01', 'January'), ('02', 'February'), ('03', 'March'), ('04', 'April'),
('05', 'May'), ('06', 'June'), ('07', 'July'), ('08', 'August'), ('09', 'September'),
('10', 'October'), ('11', 'November'), ('12', 'December')], 'Month', readonly=True),
'plan_id': fields.many2one('hr_evaluation.plan', 'Plan', readonly=True),
'employee_id': fields.many2one('hr.employee', "Employee", readonly=True),
'rating': fields.selection([
('0', 'Significantly bellow expectations'),
('1', 'Did not meet expectations'),
('2', 'Meet expectations'),
('3', 'Exceeds expectations'),
('4', 'Significantly exceeds expectations'),
], "Overall Rating", readonly=True),
'nbr': fields.integer('# of Requests', readonly=True),
'state': fields.selection([
('draft', 'Draft'),
('wait', 'Plan In Progress'),
('progress', 'Final Validation'),
('done', 'Done'),
('cancel', 'Cancelled'),
], 'Status', readonly=True),
}
_order = 'create_date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_evaluation_report')
cr.execute("""
create or replace view hr_evaluation_report as (
select
min(l.id) as id,
date_trunc('day',s.create_date) as create_date,
to_char(s.create_date, 'YYYY-MM-DD') as day,
s.employee_id,
l.request_id,
s.plan_id,
s.rating,
s.date as deadline,
s.date_close as closed,
to_char(s.create_date, 'YYYY') as year,
to_char(s.create_date, 'MM') as month,
count(l.*) as nbr,
s.state,
avg(extract('epoch' from age(s.create_date,CURRENT_DATE)))/(3600*24) as delay_date,
avg(extract('epoch' from age(s.date,CURRENT_DATE)))/(3600*24) as overpass_delay
from
hr_evaluation_interview l
LEFT JOIN
hr_evaluation_evaluation s on (s.id=l.evaluation_id)
GROUP BY
s.create_date,
date_trunc('day',s.create_date),
to_char(s.create_date, 'YYYY-MM-DD'),
to_char(s.create_date, 'YYYY'),
to_char(s.create_date, 'MM'),
s.state,
s.employee_id,
s.date,
s.date_close,
l.request_id,
s.rating,
s.plan_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
leonty/ansible-modules-core | refs/heads/devel | cloud/openstack/os_server_actions.py | 54 | #!/usr/bin/python
# coding: utf-8 -*-
# Copyright (c) 2015, Jesse Keating <jlk@derpops.bike>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server_actions
short_description: Perform actions on Compute Instances from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Jesse Keating (@j2sol)"
description:
- Perform server actions on an existing compute instance from OpenStack.
This module does not return any data other than changed true/false.
options:
server:
description:
- Name or ID of the instance
required: true
wait:
description:
- If the module should wait for the instance action to be performed.
required: false
default: 'yes'
timeout:
description:
- The amount of time the module should wait for the instance to perform
the requested action.
required: false
default: 180
action:
description:
- Perform the given action. The lock and unlock actions always return
changed as the servers API does not provide lock status.
choices: [stop, start, pause, unpause, lock, unlock, suspend, resume]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Pauses a compute instance
- os_server_actions:
action: pause
auth:
auth_url: https://mycloud.openstack.blueboxgrid.com:5001/v2.0
username: admin
password: admin
project_name: admin
server: vm1
timeout: 200
'''
_action_map = {'stop': 'SHUTOFF',
'start': 'ACTIVE',
'pause': 'PAUSED',
'unpause': 'ACTIVE',
'lock': 'ACTIVE', # API doesn't show lock/unlock status
'unlock': 'ACTIVE',
'suspend': 'SUSPENDED',
'resume': 'ACTIVE',}
_admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock']
def _wait(timeout, cloud, server, action):
"""Wait for the server to reach the desired state for the given action."""
for count in shade._utils._iterate_timeout(
timeout,
"Timeout waiting for server to complete %s" % action):
try:
server = cloud.get_server(server.id)
except Exception:
continue
if server.status == _action_map[action]:
return
if server.status == 'ERROR':
module.fail_json(msg="Server reached ERROR state while attempting to %s" % action)
def _system_state_change(action, status):
"""Check if system state would change."""
if status == _action_map[action]:
return False
return True
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
action=dict(required=True, choices=['stop', 'start', 'pause', 'unpause',
'lock', 'unlock', 'suspend', 'resume']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
action = module.params['action']
wait = module.params['wait']
timeout = module.params['timeout']
try:
if action in _admin_actions:
cloud = shade.operator_cloud(**module.params)
else:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
if not server:
module.fail_json(msg='Could not find server %s' % server)
status = server.status
if module.check_mode:
module.exit_json(changed=_system_state_change(action, status))
if action == 'stop':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.stop(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
if action == 'start':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.start(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
if action == 'pause':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.pause(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
elif action == 'unpause':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.unpause(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
elif action == 'lock':
# lock doesn't set a state, just do it
cloud.nova_client.servers.lock(server=server.id)
module.exit_json(changed=True)
elif action == 'unlock':
# unlock doesn't set a state, just do it
cloud.nova_client.servers.unlock(server=server.id)
module.exit_json(changed=True)
elif action == 'suspend':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.suspend(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
elif action == 'resume':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.resume(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
crlang/sublime-text---front-end-config | refs/heads/master | Data/Packages/WakaTime/packages/wakatime/packages/chardet/sbcsgroupprober.py | 273 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
# from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
from .langturkishmodel import Latin5TurkishModel
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
super(SBCSGroupProber, self).__init__()
self.probers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
# TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250)
# after we retrain model.
# SingleByteCharSetProber(Latin2HungarianModel),
# SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
SingleByteCharSetProber(Latin5TurkishModel),
]
hebrew_prober = HebrewProber()
logical_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrew_prober)
visual_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrew_prober)
hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober)
self.probers.extend([hebrew_prober, logical_hebrew_prober,
visual_hebrew_prober])
self.reset()
|
jonathan-beard/edx-platform | refs/heads/master | cms/djangoapps/contentstore/views/tests/test_checklists.py | 104 | """ Unit tests for checklist methods in views.py. """
from contentstore.utils import reverse_course_url
from contentstore.views.checklist import expand_checklist_action_url
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.django import modulestore
import json
from contentstore.tests.utils import CourseTestCase
class ChecklistTestCase(CourseTestCase):
""" Test for checklist get and put methods. """
def setUp(self):
""" Creates the test course. """
super(ChecklistTestCase, self).setUp()
self.course = CourseFactory.create(org='mitX', number='333', display_name='Checklists Course')
self.checklists_url = self.get_url()
def get_url(self, checklist_index=None):
url_args = {'checklist_index': checklist_index} if checklist_index else None
return reverse_course_url('checklists_handler', self.course.id, kwargs=url_args)
def get_persisted_checklists(self):
""" Returns the checklists as persisted in the modulestore. """
return modulestore().get_item(self.course.location).checklists
def compare_checklists(self, persisted, request):
"""
Handles url expansion as possible difference and descends into guts
"""
self.assertEqual(persisted['short_description'], request['short_description'])
expanded_checklist = expand_checklist_action_url(self.course, persisted)
for pers, req in zip(expanded_checklist['items'], request['items']):
self.assertEqual(pers['short_description'], req['short_description'])
self.assertEqual(pers['long_description'], req['long_description'])
self.assertEqual(pers['is_checked'], req['is_checked'])
self.assertEqual(pers['action_url'], req['action_url'])
self.assertEqual(pers['action_text'], req['action_text'])
self.assertEqual(pers['action_external'], req['action_external'])
def test_get_checklists(self):
""" Tests the get checklists method and URL expansion. """
response = self.client.get(self.checklists_url)
self.assertContains(response, "Getting Started With Studio")
# Verify expansion of action URL happened.
self.assertContains(response, 'course_team/mitX/333/Checklists_Course')
# Verify persisted checklist does NOT have expanded URL.
checklist_0 = self.get_persisted_checklists()[0]
self.assertEqual('ManageUsers', get_action_url(checklist_0, 0))
payload = response.content
# Now delete the checklists from the course and verify they get repopulated (for courses
# created before checklists were introduced).
self.course.checklists = None
# Save the changed `checklists` to the underlying KeyValueStore before updating the modulestore
self.course.save()
modulestore().update_item(self.course, self.user.id)
self.assertEqual(self.get_persisted_checklists(), None)
response = self.client.get(self.checklists_url)
self.assertEqual(payload, response.content)
def test_get_checklists_html(self):
""" Tests getting the HTML template for the checklists page). """
response = self.client.get(self.checklists_url, HTTP_ACCEPT='text/html')
self.assertContains(response, "Getting Started With Studio")
# The HTML generated will define the handler URL (for use by the Backbone model).
self.assertContains(response, self.checklists_url)
def test_update_checklists_no_index(self):
""" No checklist index, should return all of them. """
returned_checklists = json.loads(self.client.get(self.checklists_url).content)
# Verify that persisted checklists do not have expanded action URLs.
# compare_checklists will verify that returned_checklists DO have expanded action URLs.
pers = self.get_persisted_checklists()
self.assertEqual('CourseOutline', get_first_item(pers[1]).get('action_url'))
for pay, resp in zip(pers, returned_checklists):
self.compare_checklists(pay, resp)
def test_update_checklists_index_ignored_on_get(self):
""" Checklist index ignored on get. """
update_url = self.get_url(1)
returned_checklists = json.loads(self.client.get(update_url).content)
for pay, resp in zip(self.get_persisted_checklists(), returned_checklists):
self.compare_checklists(pay, resp)
def test_update_checklists_post_no_index(self):
""" No checklist index, will error on post. """
response = self.client.post(self.checklists_url)
self.assertContains(response, 'Could not save checklist', status_code=400)
def test_update_checklists_index_out_of_range(self):
""" Checklist index out of range, will error on post. """
update_url = self.get_url(100)
response = self.client.post(update_url)
self.assertContains(response, 'Could not save checklist', status_code=400)
def test_update_checklists_index(self):
""" Check that an update of a particular checklist works. """
update_url = self.get_url(1)
payload = self.course.checklists[1]
self.assertFalse(get_first_item(payload).get('is_checked'))
self.assertEqual('CourseOutline', get_first_item(payload).get('action_url'))
get_first_item(payload)['is_checked'] = True
returned_checklist = json.loads(self.client.ajax_post(update_url, payload).content)
self.assertTrue(get_first_item(returned_checklist).get('is_checked'))
persisted_checklist = self.get_persisted_checklists()[1]
# Verify that persisted checklist does not have expanded action URLs.
# compare_checklists will verify that returned_checklist DOES have expanded action URLs.
self.assertEqual('CourseOutline', get_first_item(persisted_checklist).get('action_url'))
self.compare_checklists(persisted_checklist, returned_checklist)
def test_update_checklists_delete_unsupported(self):
""" Delete operation is not supported. """
update_url = self.get_url(100)
response = self.client.delete(update_url)
self.assertEqual(response.status_code, 405)
def test_expand_checklist_action_url(self):
"""
Tests the method to expand checklist action url.
"""
def test_expansion(checklist, index, stored, expanded):
"""
Tests that the expected expanded value is returned for the item at the given index.
Also verifies that the original checklist is not modified.
"""
self.assertEqual(get_action_url(checklist, index), stored)
expanded_checklist = expand_checklist_action_url(self.course, checklist)
self.assertEqual(get_action_url(expanded_checklist, index), expanded)
# Verify no side effect in the original list.
self.assertEqual(get_action_url(checklist, index), stored)
test_expansion(self.course.checklists[0], 0, 'ManageUsers', '/course_team/mitX/333/Checklists_Course')
test_expansion(self.course.checklists[1], 1, 'CourseOutline', '/course/mitX/333/Checklists_Course')
test_expansion(self.course.checklists[2], 0, 'http://help.edge.edx.org/', 'http://help.edge.edx.org/')
def get_first_item(checklist):
""" Returns the first item from the checklist. """
return checklist['items'][0]
def get_action_url(checklist, index):
"""
Returns the action_url for the item at the specified index in the given checklist.
"""
return checklist['items'][index]['action_url']
|
hoheinzollern/tracker | refs/heads/master | tests/functional-tests/600-applications-camera.py | 3 | #!/usr/bin/python
#
# Copyright (C) 2011, Nokia Corporation <ivan.frade@nokia.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
"""
Tests trying to simulate the behaviour of applications working with tracker
"""
import sys,os,dbus
import unittest
import time
import random
import string
import datetime
import shutil
import fcntl
from common.utils import configuration as cfg
import unittest2 as ut
from common.utils.applicationstest import CommonTrackerApplicationTest as CommonTrackerApplicationTest
from common.utils.helpers import log
MINER_FS_IDLE_TIMEOUT = 30
class TrackerCameraPicturesApplicationTests (CommonTrackerApplicationTest):
def test_01_camera_picture (self):
"""
Camera simulation:
1. Create resource in the store for the new file
2. Write the file
3. Wait for miner-fs to index it
4. Ensure no duplicates are found
"""
fileurn = "tracker://test_camera_picture_01/" + str(random.randint (0,100))
origin_filepath = os.path.join (self.get_data_dir (), self.get_test_image ())
dest_filepath = os.path.join (self.get_dest_dir (), self.get_test_image ())
dest_fileuri = "file://" + dest_filepath
# Insert new resource in the store, including nie:mimeType and nie:url
insert = """
INSERT { <%s> a nie:InformationElement,
nie:DataObject,
nfo:Image,
nfo:Media,
nfo:Visual,
nmm:Photo
}
DELETE { <%s> nie:mimeType ?_1 }
WHERE { <%s> nie:mimeType ?_1 }
INSERT { <%s> a rdfs:Resource ;
nie:mimeType \"image/jpeg\"
}
DELETE { <%s> nie:url ?_2 }
WHERE { <%s> nie:url ?_2 }
INSERT { <%s> a rdfs:Resource ;
nie:url \"%s\"
}
""" % (fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, dest_fileuri)
self.tracker.update (insert)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# Copy the image to the dest path
self.slowcopy_file (origin_filepath, dest_filepath)
assert os.path.exists (dest_filepath)
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# Clean the new file so the test directory is as before
log ("Remove and wait")
os.remove (dest_filepath)
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 0)
def test_02_camera_picture_geolocation (self):
"""
Camera simulation:
1. Create resource in the store for the new file
2. Set nlo:location
2. Write the file
3. Wait for miner-fs to index it
4. Ensure no duplicates are found
"""
fileurn = "tracker://test_camera_picture_02/" + str(random.randint (0,100))
dest_filepath = os.path.join (self.get_dest_dir (), self.get_test_image ())
dest_fileuri = "file://" + dest_filepath
geolocationurn = "tracker://test_camera_picture_02_geolocation/" + str(random.randint (0,100))
postaladdressurn = "tracker://test_camera_picture_02_postaladdress/" + str(random.randint (0,100))
# Insert new resource in the store, including nie:mimeType and nie:url
insert = """
INSERT { <%s> a nie:InformationElement,
nie:DataObject,
nfo:Image,
nfo:Media,
nfo:Visual,
nmm:Photo
}
DELETE { <%s> nie:mimeType ?_1 }
WHERE { <%s> nie:mimeType ?_1 }
INSERT { <%s> a rdfs:Resource ;
nie:mimeType \"image/jpeg\"
}
DELETE { <%s> nie:url ?_2 }
WHERE { <%s> nie:url ?_2 }
INSERT { <%s> a rdfs:Resource ;
nie:url \"%s\"
}
""" % (fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, dest_fileuri)
self.tracker.update (insert)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# FIRST, open the file for writing, and just write some garbage, to simulate that
# we already started recording the video...
fdest = open (dest_filepath, 'wb')
fdest.write ("some garbage written here")
fdest.write ("to simulate we're recording something...")
fdest.seek (0)
# SECOND, set slo:location
location_insert = """
INSERT { <%s> a nco:PostalAddress ;
nco:country \"SPAIN\" ;
nco:locality \"Tres Cantos\"
}
INSERT { <%s> a slo:GeoLocation ;
slo:postalAddress <%s>
}
INSERT { <%s> a rdfs:Resource ;
slo:location <%s>
}
""" % (postaladdressurn, geolocationurn, postaladdressurn, fileurn, geolocationurn)
self.tracker.update (location_insert)
#THIRD, start copying the image to the dest path
original_file = os.path.join (self.get_data_dir (),self.get_test_image ())
self.slowcopy_file_fd (original_file, fdest)
fdest.close ()
assert os.path.exists (dest_filepath)
# FOURTH, ensure we have only 1 resource
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# Clean the new file so the test directory is as before
log ("Remove and wait")
os.remove (dest_filepath)
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 0)
class TrackerCameraVideosApplicationTests (CommonTrackerApplicationTest):
def test_01_camera_video (self):
"""
Camera video recording simulation:
1. Create resource in the store for the new file
2. Write the file
3. Wait for miner-fs to index it
4. Ensure no duplicates are found
"""
fileurn = "tracker://test_camera_video_01/" + str(random.randint (0,100))
origin_filepath = os.path.join (self.get_data_dir (), self.get_test_video ())
dest_filepath = os.path.join (self.get_dest_dir (), self.get_test_video ())
dest_fileuri = "file://" + dest_filepath
# Insert new resource in the store, including nie:mimeType and nie:url
insert = """
INSERT { <%s> a nie:InformationElement,
nie:DataObject,
nfo:Video,
nfo:Media,
nfo:Visual,
nmm:Video
}
DELETE { <%s> nie:mimeType ?_1 }
WHERE { <%s> nie:mimeType ?_1 }
INSERT { <%s> a rdfs:Resource ;
nie:mimeType \"video/mp4\"
}
DELETE { <%s> nie:url ?_2 }
WHERE { <%s> nie:url ?_2 }
INSERT { <%s> a rdfs:Resource ;
nie:url \"%s\"
}
""" % (fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, dest_fileuri)
self.tracker.update (insert)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# Copy the image to the dest path
self.slowcopy_file (origin_filepath, dest_filepath)
assert os.path.exists (dest_filepath)
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# Clean the new file so the test directory is as before
log ("Remove and wait")
os.remove (dest_filepath)
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 0)
def test_02_camera_video_geolocation (self):
"""
Camera simulation:
1. Create resource in the store for the new file
2. Set nlo:location
2. Write the file
3. Wait for miner-fs to index it
4. Ensure no duplicates are found
"""
fileurn = "tracker://test_camera_video_02/" + str(random.randint (0,100))
origin_filepath = os.path.join (self.get_data_dir (), self.get_test_video ())
dest_filepath = os.path.join (self.get_dest_dir (), self.get_test_video ())
dest_fileuri = "file://" + dest_filepath
geolocationurn = "tracker://test_camera_video_02_geolocation/" + str(random.randint (0,100))
postaladdressurn = "tracker://test_camera_video_02_postaladdress/" + str(random.randint (0,100))
# Insert new resource in the store, including nie:mimeType and nie:url
insert = """
INSERT { <%s> a nie:InformationElement,
nie:DataObject,
nfo:Video,
nfo:Media,
nfo:Visual,
nmm:Video
}
DELETE { <%s> nie:mimeType ?_1 }
WHERE { <%s> nie:mimeType ?_1 }
INSERT { <%s> a rdfs:Resource ;
nie:mimeType \"video/mp4\"
}
DELETE { <%s> nie:url ?_2 }
WHERE { <%s> nie:url ?_2 }
INSERT { <%s> a rdfs:Resource ;
nie:url \"%s\"
}
""" % (fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, dest_fileuri)
self.tracker.update (insert)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# FIRST, open the file for writing, and just write some garbage, to simulate that
# we already started recording the video...
fdest = open (dest_filepath, 'wb')
fdest.write ("some garbage written here")
fdest.write ("to simulate we're recording something...")
fdest.seek (0)
# SECOND, set slo:location
location_insert = """
INSERT { <%s> a nco:PostalAddress ;
nco:country \"SPAIN\" ;
nco:locality \"Tres Cantos\"
}
INSERT { <%s> a slo:GeoLocation ;
slo:postalAddress <%s>
}
INSERT { <%s> a rdfs:Resource ;
slo:location <%s>
}
""" % (postaladdressurn, geolocationurn, postaladdressurn, fileurn, geolocationurn)
self.tracker.update (location_insert)
#THIRD, start copying the image to the dest path
self.slowcopy_file_fd (origin_filepath, fdest)
fdest.close ()
assert os.path.exists (dest_filepath)
# FOURTH, ensure we have only 1 resource
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# Clean the new file so the test directory is as before
log ("Remove and wait")
os.remove (dest_filepath)
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 0)
if __name__ == "__main__":
ut.main()
|
allendaicool/thrust | refs/heads/master | performance/build/testsuite.py | 28 | """functions that generate reports and figures using the .xml output from the performance tests"""
__all__ = ['TestSuite', 'parse_testsuite_xml']
class TestSuite:
def __init__(self, name, platform, tests):
self.name = name
self.platform = platform
self.tests = tests
def __repr__(self):
import pprint
return 'TestSuite' + pprint.pformat( (self.name, self.platform, self.tests) )
class Test:
def __init__(self, name, variables, results):
self.name = name
self.variables = variables
self.results = results
def __repr__(self):
return 'Test' + repr( (self.name, self.variables, self.results) )
def scalar_element(element):
value = element.get('value')
try:
return int(value)
except:
try:
return float(value)
except:
return value
def parse_testsuite_platform(et):
testsuite_platform = {}
platform_element = et.find('platform')
device_element = platform_element.find('device')
device = {}
device['name'] = device_element.get('name')
for property_element in device_element.findall('property'):
device[property_element.get('name')] = scalar_element(property_element)
testsuite_platform['device'] = device
return testsuite_platform
def parse_testsuite_tests(et):
testsuite_tests = {}
for test_element in et.findall('test'):
# test name
test_name = test_element.get('name')
# test variables: name -> value
test_variables = {}
for variable_element in test_element.findall('variable'):
test_variables[variable_element.get('name')] = scalar_element(variable_element)
# test results: name -> (value, units)
test_results = {}
for result_element in test_element.findall('result'):
# TODO make this a thing that can be converted to its first element when treated like a number
test_results[result_element.get('name')] = scalar_element(result_element)
testsuite_tests[test_name] = Test(test_name, test_variables, test_results)
return testsuite_tests
def parse_testsuite_xml(filename):
import xml.etree.ElementTree as ET
et = ET.parse(filename)
testsuite_name = et.getroot().get('name')
testsuite_platform = parse_testsuite_platform(et)
testsuite_tests = parse_testsuite_tests(et)
return TestSuite(testsuite_name, testsuite_platform, testsuite_tests)
|
AOSPU/external_chromium_org_tools_gyp | refs/heads/android-5.0/py3 | test/small/gyptest-small.py | 205 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Runs small tests.
"""
import imp
import os
import sys
import unittest
import TestGyp
test = TestGyp.TestGyp()
# Add pylib to the import path (so tests can import their dependencies).
# This is consistant with the path.append done in the top file "gyp".
sys.path.append(os.path.join(test._cwd, 'pylib'))
# Add new test suites here.
files_to_test = [
'pylib/gyp/MSVSSettings_test.py',
'pylib/gyp/easy_xml_test.py',
'pylib/gyp/generator/msvs_test.py',
'pylib/gyp/generator/ninja_test.py',
'pylib/gyp/generator/xcode_test.py',
'pylib/gyp/common_test.py',
'pylib/gyp/input_test.py',
]
# Collect all the suites from the above files.
suites = []
for filename in files_to_test:
# Carve the module name out of the path.
name = os.path.splitext(os.path.split(filename)[1])[0]
# Find the complete module path.
full_filename = os.path.join(test._cwd, filename)
# Load the module.
module = imp.load_source(name, full_filename)
# Add it to the list of test suites.
suites.append(unittest.defaultTestLoader.loadTestsFromModule(module))
# Create combined suite.
all_tests = unittest.TestSuite(suites)
# Run all the tests.
result = unittest.TextTestRunner(verbosity=2).run(all_tests)
if result.failures or result.errors:
test.fail_test()
test.pass_test()
|
nattayamairittha/MeanStack | refs/heads/master | MyProject/node_modules/node-sass-middleware/node_modules/node-sass/node_modules/node-gyp/gyp/PRESUBMIT.py | 1369 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Possible unbalanced tuple unpacking with sequence.
'W0632',
# Attempting to unpack a non-sequence.
'W0633',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# map/filter on lambda could be replaced by comprehension.
'W0110',
# Use of eval.
'W0123',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Cyclic import.
'R0401',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
TRYBOTS = [
'linux_try',
'mac_try',
'win_try',
]
def GetPreferredTryMasters(_, change):
return {
'client.gyp': { t: set(['defaulttests']) for t in TRYBOTS },
}
|
TRUFIL/erpnext | refs/heads/develop | erpnext/patches/v9_2/remove_company_from_patient.py | 12 | import frappe
def execute():
if frappe.db.exists("DocType", "Patient"):
if 'company' in frappe.db.get_table_columns("Patient"):
frappe.db.sql("alter table `tabPatient` drop column company")
|
rahul67/hue | refs/heads/master | apps/pig/src/pig/tests.py | 4 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_equal, assert_false
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access
from hadoop import pseudo_hdfs4
from hadoop.pseudo_hdfs4 import is_live_cluster
from liboozie.oozie_api_tests import OozieServerProvider
from oozie.tests import OozieBase
from pig.models import create_or_update_script, PigScript
from pig.api import OozieApi, get
class TestPigBase(object):
SCRIPT_ATTRS = {
'id': 1000,
'name': 'Test',
'script': 'A = LOAD "$data"; STORE A INTO "$output";',
'parameters': [],
'resources': [],
'hadoopProperties': []
}
def setUp(self):
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "pig")
self.user = User.objects.get(username='test')
def create_script(self):
return create_script(self.user)
def create_script(user, xattrs=None):
attrs = {'user': user}
attrs.update(TestPigBase.SCRIPT_ATTRS)
if xattrs is not None:
attrs.update(xattrs)
return create_or_update_script(**attrs)
class TestMock(TestPigBase):
def test_create_script(self):
pig_script = self.create_script()
assert_equal('Test', pig_script.dict['name'])
def test_check_hcatalogs_sharelib(self):
api = get(None, None, self.user)
pig_script = self.create_script()
# Regular
wf = api._create_workflow(pig_script, '[]')
assert_false({'name': u'oozie.action.sharelib.for.pig', 'value': u'pig,hcatalog'} in wf.find_all_parameters(), wf.find_all_parameters())
# With HCat
pig_script.update_from_dict({
'script':"""
a = LOAD 'sample_07' USING org.apache.hcatalog.pig.HCatLoader();
dump a;
"""})
pig_script.save()
wf = api._create_workflow(pig_script, '[]')
assert_true({'name': u'oozie.action.sharelib.for.pig', 'value': u'pig,hcatalog'} in wf.find_all_parameters(), wf.find_all_parameters())
def test_editor_view(self):
response = self.c.get(reverse('pig:app'))
assert_true('Unsaved script' in response.content)
def test_save(self):
attrs = {'user': self.user,}
attrs.update(TestPigBase.SCRIPT_ATTRS)
attrs['parameters'] = json.dumps(TestPigBase.SCRIPT_ATTRS['parameters'])
attrs['resources'] = json.dumps(TestPigBase.SCRIPT_ATTRS['resources'])
attrs['hadoopProperties'] = json.dumps(TestPigBase.SCRIPT_ATTRS['hadoopProperties'])
# Save
self.c.post(reverse('pig:save'), data=attrs, follow=True)
# Update
self.c.post(reverse('pig:save'), data=attrs, follow=True)
def parse_oozie_logs(self):
api = get(None, None, self.user)
assert_equal(
'''Run pig script using PigRunner.run() for Pig version 0.8+
Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported)
compiled Jun 30 2013, 03:40:22
Run pig script using PigRunner.run() for Pig version 0.8+
2013-10-09 17:30:39,709 [main] INFO org.apache.pig.Main - Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported) compiled Jun 30 2013, 03:40:22
2013-10-09 17:30:39,709 [main] INFO org.apache.pig.Main - Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported) compiled Jun 30 2013, 03:40:22
2013-10-09 17:30:39,710 [main] INFO org.apache.pig.Main - Logging error messages to: /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/pig-job_1381360805876_0001.log
2013-10-09 17:30:39,710 [main] INFO org.apache.pig.Main - Logging error messages to: /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/pig-job_1381360805876_0001.log
2013-10-09 17:30:39,739 [main] WARN org.apache.hadoop.conf.Configuration - dfs.df.interval is deprecated. Instead, use fs.df.interval
2013-10-09 17:30:39,739 [main] WARN org.apache.hadoop.conf.Configuration - mapred.task.tracker.http.address is deprecated. Instead, use mapreduce.tasktracker.http.address
2013-10-09 17:30:39,833 [main] INFO org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to map-reduce job tracker at: localhost:8032
hdfs://localhost:8020/user/romain/.Trash <dir>
hdfs://localhost:8020/user/romain/examples <dir>
hdfs://localhost:8020/user/romain/tweets <dir>
hdfs://localhost:8020/user/romain/wordcount.jar<r 1> 3165
hdfs://localhost:8020/user/romain/words <dir>
hdfs://localhost:8020/user/romain/yelp <dir>''', api._match_logs({'logs': [None, OOZIE_LOGS]}))
class TestWithHadoop(OozieBase):
def setUp(self):
super(TestWithHadoop, self).setUp()
# FIXME (HUE-2562): The tests unfortunately require superuser at the
# moment, but should be rewritten to not need it.
self.c = make_logged_in_client(is_superuser=True)
grant_access("test", "test", "pig")
self.user = User.objects.get(username='test')
self.c.post(reverse('pig:install_examples'))
self.cluster = pseudo_hdfs4.shared_cluster()
self.api = OozieApi(self.cluster.fs, self.cluster.jt, self.user)
def test_create_workflow(self):
xattrs = {
'parameters': [
{'name': 'output', 'value': self.cluster.fs_prefix + '/test_pig_script_workflow'},
{'name': '-param', 'value': 'input=/data'}, # Alternative way for params
{'name': '-optimizer_off', 'value': 'SplitFilter'},
{'name': '-v', 'value': ''},
],
'resources': [
{'type': 'file', 'value': '/tmp/file'},
{'type': 'archive', 'value': '/tmp/file.zip'},
],
'hadoopProperties': [
{'name': 'mapred.map.tasks.speculative.execution', 'value': 'false'},
{'name': 'mapred.job.queue', 'value': 'fast'},
]
}
pig_script = create_script(self.user, xattrs)
output_path = self.cluster.fs_prefix + '/test_pig_script_2'
params = json.dumps([
{'name': 'output', 'value': output_path},
])
workflow = self.api._create_workflow(pig_script, params)
pig_action = workflow.start.get_child('to').get_full_node()
assert_equal([
{u'type': u'argument', u'value': u'-param'}, {u'type': u'argument', u'value': u'output=%s' % output_path},
{u'type': u'argument', u'value': u'-param'}, {u'type': u'argument', u'value': u'input=/data'},
{u'type': u'argument', u'value': u'-optimizer_off'}, {u'type': u'argument', u'value': u'SplitFilter'},
{u'type': u'argument', u'value': u'-v'},
], pig_action.get_params())
assert_equal([
{u'name': u'mapred.map.tasks.speculative.execution', u'value': u'false'},
{u'name': u'mapred.job.queue', u'value': u'fast'},
], pig_action.get_properties())
assert_equal(['/tmp/file'], pig_action.get_files())
assert_equal([
{u'dummy': u'', u'name': u'/tmp/file.zip'},
], pig_action.get_archives())
def wait_until_completion(self, pig_script_id, timeout=300.0, step=5, expected_status='SUCCEEDED'):
script = PigScript.objects.get(id=pig_script_id)
job_id = script.dict['job_id']
response = self.c.get(reverse('pig:watch', args=[job_id]))
response = json.loads(response.content)
start = time.time()
while response['workflow']['status'] in ['PREP', 'RUNNING'] and time.time() - start < timeout:
time.sleep(step)
response = self.c.get(reverse('pig:watch', args=[job_id]))
response = json.loads(response.content)
logs = OozieServerProvider.oozie.get_job_log(job_id)
if response['workflow']['status'] != expected_status:
msg = "[%d] %s took more than %d to complete or %s: %s" % (time.time(), job_id, timeout, response['workflow']['status'], logs)
self.api.stop(job_id)
raise Exception(msg)
return pig_script_id
def test_submit(self):
if is_live_cluster():
raise SkipTest('HUE-2909: Skipping because test is not reentrant')
script = PigScript.objects.get(id=1100713)
script_dict = script.dict
post_data = {
'id': script.id,
'name': script_dict['name'],
'script': script_dict['script'],
'user': script.owner,
'parameters': json.dumps(script_dict['parameters']),
'resources': json.dumps(script_dict['resources']),
'hadoopProperties': json.dumps(script_dict['hadoopProperties']),
'submissionVariables': json.dumps([{"name": "output", "value": self.cluster.fs_prefix + '/test_pig_script_submit'}]),
}
response = self.c.post(reverse('pig:run'), data=post_data, follow=True)
job_id = json.loads(response.content)['id']
self.wait_until_completion(job_id)
def test_stop(self):
script = PigScript.objects.get(id=1100713)
script_dict = script.dict
post_data = {
'id': script.id,
'name': script_dict['name'],
'script': script_dict['script'],
'user': script.owner,
'parameters': json.dumps(script_dict['parameters']),
'resources': json.dumps(script_dict['resources']),
'hadoopProperties': json.dumps(script_dict['hadoopProperties']),
'submissionVariables': json.dumps([{"name": "output", "value": self.cluster.fs_prefix + '/test_pig_script_stop'}]),
}
submit_response = self.c.post(reverse('pig:run'), data=post_data, follow=True)
script = PigScript.objects.get(id=json.loads(submit_response.content)['id'])
assert_true(script.dict['job_id'], script.dict)
self.c.post(reverse('pig:stop'), data={'id': script.id}, follow=True)
self.wait_until_completion(json.loads(submit_response.content)['id'], expected_status='KILLED')
OOZIE_LOGS =""" Log Type: stdout
Log Length: 117627
Oozie Launcher starts
Heart beat
Starting the execution of prepare actions
Completed the execution of prepare actions successfully
Files in current dir:/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/.
======================
File: commons-cli-1.2.jar
File: antlr-runtime-3.4.jar
File: stringtemplate-3.2.1.jar
File: script.pig
File: jyson-1.0.2.jar
Oozie Java/Map-Reduce/Pig action launcher-job configuration
=================================================================
Workflow job id : 0000000-131009162028638-oozie-oozi-W
Workflow action id: 0000000-131009162028638-oozie-oozi-W@pig
Classpath :
------------------------
/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002
/etc/hadoop/conf
/usr/lib/hadoop/hadoop-nfs-2.1.0-cdh5.0.0-SNAPSHOT.jar
/usr/lib/hadoop/hadoop-common-2.1.0-cdh5.0.0-SNAPSHOT.jar
/usr/lib/hadoop/hadoop-auth-2.1.0-cdh5.0.0-SNAPSHOT.jar
/usr/lib/hadoop/hadoop-common.jar
/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/jyson-1.0.2.jar
------------------------
Main class : org.apache.oozie.action.hadoop.PigMain
Maximum output : 2048
Arguments :
Java System Properties:
------------------------
#
#Wed Oct 09 17:30:39 PDT 2013
java.runtime.name=Java(TM) SE Runtime Environment
awt.toolkit=sun.awt.X11.XToolkit
java.vm.info=mixed mode
java.version=1.7.0_40
java.ext.dirs=/usr/lib/jvm/java-7-oracle/jre/lib/ext\:/usr/java/packages/lib/ext
sun.boot.class.path=/usr/lib/jvm/java-7-oracle/jre/lib/resources.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/rt.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/sunrsasign.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/jsse.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/jce.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/charsets.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/jfr.jar\:/usr/lib/jvm/java-7-oracle/jre/classes
java.vendor=Oracle Corporation
file.separator=/
oozie.launcher.job.id=job_1381360805876_0001
oozie.action.stats.properties=/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/stats.properties
java.vendor.url.bug=http\://bugreport.sun.com/bugreport/
sun.io.unicode.encoding=UnicodeLittle
sun.cpu.endian=little
sun.cpu.isalist=
------------------------
=================================================================
>>> Invoking Main class now >>>
Oozie Pig action configuration
=================================================================
------------------------
Setting env property for mapreduce.job.credentials.binary to:/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/container_tokens
------------------------
pig.properties:
--------------------
mapreduce.job.ubertask.enable : false
yarn.resourcemanager.max-completed-applications : 10000
yarn.resourcemanager.delayed.delegation-token.removal-interval-ms : 30000
yarn.nodemanager.delete.debug-delay-sec : 0
hadoop.ssl.require.client.cert : false
dfs.datanode.max.transfer.threads : 4096
--------------------
Pig script [script.pig] content:
------------------------
ls
------------------------
Current (local) dir = /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002
Pig command arguments :
-file
script.pig
-log4jconf
/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/piglog4j.properties
-logfile
pig-job_1381360805876_0001.log
=================================================================
>>> Invoking Pig command line now >>>
Run pig script using PigRunner.run() for Pig version 0.8+
Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported)
compiled Jun 30 2013, 03:40:22
Run pig script using PigRunner.run() for Pig version 0.8+
2013-10-09 17:30:39,709 [main] INFO org.apache.pig.Main - Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported) compiled Jun 30 2013, 03:40:22
2013-10-09 17:30:39,709 [main] INFO org.apache.pig.Main - Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported) compiled Jun 30 2013, 03:40:22
2013-10-09 17:30:39,710 [main] INFO org.apache.pig.Main - Logging error messages to: /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/pig-job_1381360805876_0001.log
2013-10-09 17:30:39,710 [main] INFO org.apache.pig.Main - Logging error messages to: /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/pig-job_1381360805876_0001.log
2013-10-09 17:30:39,739 [main] WARN org.apache.hadoop.conf.Configuration - dfs.df.interval is deprecated. Instead, use fs.df.interval
2013-10-09 17:30:39,739 [main] WARN org.apache.hadoop.conf.Configuration - mapred.task.tracker.http.address is deprecated. Instead, use mapreduce.tasktracker.http.address
2013-10-09 17:30:39,833 [main] INFO org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to map-reduce job tracker at: localhost:8032
hdfs://localhost:8020/user/romain/.Trash <dir>
hdfs://localhost:8020/user/romain/examples <dir>
hdfs://localhost:8020/user/romain/tweets <dir>
hdfs://localhost:8020/user/romain/wordcount.jar<r 1> 3165
hdfs://localhost:8020/user/romain/words <dir>
hdfs://localhost:8020/user/romain/yelp <dir>
<<< Invocation of Pig command completed <<<
Hadoop Job IDs executed by Pig:
<<< Invocation of Main class completed <<<
Oozie Launcher ends
2013-10-09 17:30:40,009 [main] INFO org.apache.hadoop.mapred.Task - Task:attempt_1381360805876_0001_m_000000_0 is done. And is in the process of committing
2013-10-09 17:30:40,087 [main] INFO org.apache.hadoop.mapred.Task - Task attempt_1381360805876_0001_m_000000_0 is allowed to commit now
2013-10-09 17:30:40,094 [main] INFO org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter - Saved output of task 'attempt_1381360805876_0001_m_000000_0' to hdfs://localhost:8020/user/romain/oozie-oozi/0000000-131009162028638-oozie-oozi-W/pig--pig/output/_temporary/1/task_1381360805876_0001_m_000000
2013-10-09 17:30:40,153 [main] INFO org.apache.hadoop.mapred.Task - Task 'attempt_1381360805876_0001_m_000000_0' done.
2013-10-09 17:30:40,254 [main] INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl - Stopping MapTask metrics system...
2013-10-09 17:30:40,257 [main] INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl - MapTask metrics system stopped.
2013-10-09 17:30:40,257 [main] INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl - MapTask metrics system shutdown complete.
"""
|
cherrygirl/micronaet7 | refs/heads/master | menuitem_project/__openerp__.py | 2 | # -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Menuitem project',
'version': '0.1',
'category': 'Statistic',
'description': """
Module that create a list of standard group for show or hide
top / root menu
Extra project
""",
'author': 'Micronaet s.r.l.',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'menuitem_base',
'project',
],
'init_xml': [],
'demo_xml': [],
'data': [
'view/menuitem.xml',
],
'active': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
darkobas/android_kernel_oneplus_msm8974 | refs/heads/n | arch/ia64/scripts/unwcheck.py | 13143 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
|
michelesr/gasistafelice | refs/heads/master | gasistafelice/gf/settings.py | 2 | """
Django settings for gasistafelice project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import locale
import consts
from django.utils.translation import ugettext_lazy as _
from datetime import timedelta
ENV = os.getenv('APP_ENV', 'dev')
if ENV == 'prod':
DEBUG = False
TEMPLATE_DEBUG = False
EMAIL_DEBUG = False
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
ALLOWED_HOSTS = [os.getenv('APP_SERVER_NAME', '*')]
elif ENV == 'stage':
DEBUG = True
TEMPLATE_DEBUG = True
EMAIL_DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
ALLOWED_HOSTS = []
else:
DEBUG = True
TEMPLATE_DEBUG = True
EMAIL_DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
ALLOWED_HOSTS = []
FORM_DEBUG = False
EMAIL_FILE_PATH = os.getenv('APP_EMAIL_FILE_PATH', '/dev/stdout')
PROFILING = os.getenv('APP_PROFILING', False)
ACCOUNT_ACTIVATION_DAYS = 2
PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))
VERSION = __version__ = file(os.path.join(PROJECT_ROOT, 'VERSION')).read().strip()
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('POSTGRES_DBNAME', 'app'),
'USER': os.getenv('POSTGRES_USER', 'app'),
'PASSWORD': os.getenv('POSTGRES_PASSWORD', 'app'),
'HOST': os.getenv('POSTGRES_HOST', 'db'),
'PORT': os.getenv('POSTGRES_PORT', 5432),
}
}
AUTHENTICATION_BACKENDS = (
'gf.base.backends.AuthenticationParamRoleBackend',
'flexi_auth.backends.ParamRoleBackend',
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
)
}
JWT_AUTH = {
'JWT_AUTH_HEADER_PREFIX': 'Bearer',
'JWT_VERIFY_EXPIRATION': False,
'JWT_EXPIRATION_DELTA': timedelta(hours=6)
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Rome'
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'it'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
DATETIME_INPUT_FORMATS = ('%m/%d/%Y %H:%M', '%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M', '%Y-%m-%d', '%m/%d/%Y %H:%M:%S', '%m/%d/%Y',
'%m/%d/%y %H:%M:%S', '%m/%d/%y %H:%M', '%m/%d/%y')
TIME_INPUT_FORMATS = ('%H:%M', '%H:%M:%S')
DATE_INPUT_FORMATS = ('%d/%m/%Y', '%Y-%m-%d', '%m/%d/%y', '%b %d %Y',
'%b %d, %Y', '%d %b %Y', '%d %b, %Y', '%B %d %Y',
'%B %d, %Y', '%d %B %Y', '%d %B, %Y')
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ' '
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '26lk413y7^-z^t$#u(xh4uv@+##0jh)&_wxzqho655)eux33@k'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'middleware.ResourceMiddleware',
'middleware.UpdateRequestUserMiddleware',
# 'django.middleware.transaction.TransactionMiddleware',
)
ROOT_URLCONF = 'gf.urls'
TEMPLATE_DIRS = (
PROJECT_ROOT + "/rest/templates",
PROJECT_ROOT + "/templates",
)
# MUST BE A LIST IN ORDER TO APPEND APPS (see django-rosetta below)
INSTALLED_APPS = [
'permissions',
'workflows',
#'history',
'reversion',
'flexi_auth',
'simple_accounting',
'gf.base',
'gf.supplier',
'gf.gas',
#'admin',
'rest',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'des',
'users',
'django.contrib.messages',
'django.contrib.admin',
#'django.contrib.comments',
'django_comments',
'django.contrib.staticfiles',
'localejs',
#TO DJ17'des_notification',
#TO DJ17'notification',
#TO DJ17'registration',
'captcha',
'ajax_select',
'gdxp',
'rest_framework',
'rest_framework.authtoken',
'api_v1',
'api_remote',
#'django.contrib.staticfiles',
]
#INSTALLED_APPS.insert(0, 'django_extensions')
try:
import rosetta
except ImportError:
pass
else:
INSTALLED_APPS.append('rosetta')
FIXTURE_DIRS = (
os.path.join(PROJECT_ROOT, 'fixtures/auth/'),
os.path.join(PROJECT_ROOT, 'fixtures/base/'),
os.path.join(PROJECT_ROOT, 'fixtures/des/'),
os.path.join(PROJECT_ROOT, 'fixtures/supplier/'),
os.path.join(PROJECT_ROOT, 'fixtures/gas/'),
)
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'simple'
},
'logfile':{
'level':'INFO',
'class':'logging.StreamHandler',
'formatter': 'simple'
},
'logfile_debug':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'verbose'
},
# 'mail_admins': {
# 'level': 'ERROR',
# 'class': 'django.utils.log.AdminEmailHandler',
# }
},
'loggers': {
'django': {
'handlers':['null'],
'propagate': True,
'level':'INFO',
},
'django.request': {
'handlers': ['logfile'],
'level': 'ERROR',
'propagate': False,
},
'gasistafelice': {
'handlers': ['console', 'logfile', 'logfile_debug'],
'level': 'DEBUG',
}
}
}
THEME = "milky"
AUTH_PROFILE_MODULE = 'users.UserProfile'
RESOURCE_PAGE_BLOCKS = {
'site' : [{
'name' : 'people',
'descr' : 'Partecipanti',
'blocks' : ['gas_list', 'suppliers_report', 'persons'] #, 'suppliers'
},{
'name' : 'order',
'descr' : 'Ordini',
'blocks' : ['open_orders', 'des_pacts']
},{
'name' : 'info',
'descr' : 'Scheda del DES',
'blocks' : ['details', 'categories']
},{
'name' : 'accounting',
'descr' : 'Conto',
'blocks' : ['balance', 'site_transactions']
},{
'name' : 'archive',
'descr' : 'Archivio',
'blocks' : ['stored_orders']
}],
'gas' : [{
'name' : 'orders',
'descr': 'Ordini',
'blocks': ['open_orders', 'closed_orders', 'prepared_orders'],
},{
'name' : 'suppliers',
'descr': 'Patti',
'blocks': ['gas_pacts', 'categories'],
},{
'name' : 'info',
'descr' : 'Scheda del GAS',
'blocks' : ['gas_details', 'gasmembers'],
},{
'name' : 'admin',
'descr' : 'Admin',
'blocks' : ['gas_users'], # 'users' Referrer and roles
},{
'name' : 'accounting',
'descr' : 'Conto',
'blocks' : ['balance_gas', 'insolutes_orders', 'transactions', 'recharge', 'fee']
},{
'name' : 'archive',
'descr' : 'Archivio',
'blocks' : ['stored_orders'] #TODO transactions for Transact_gasmembers, Transact_suppliers
}],
'gasmember': [{
'name' : 'orders',
'descr': 'Ordinare',
'blocks': ['order'], #This can be filtered in order block, 'open_orders','closed_orders'],
},{
'name' : 'basket',
'descr' : 'Paniere',
'blocks' : ['basket', 'basket_sent']
},{
'name' : 'info',
'descr' : 'Scheda del gasista',
'blocks' : ['gasmember_details', 'person_details']
},{
'name' : 'accounting',
'descr' : 'Conto',
'blocks' : ['balance_gm', 'gasmember_transactions']
}],
'supplier' : [{
'name' : 'products',
'descr': 'Prodotti',
'blocks': ['stocks']
},{
'name' : 'orders',
'descr': 'Ordini',
'blocks': ['open_orders', 'prepared_orders', 'supplier_pacts']
},{
'name' : 'info',
'descr': 'Scheda del fornitore',
'blocks': ['supplier_details', 'categories', 'closed_orders']
},{
'name' : 'admin',
'descr' : 'Admin',
'blocks' : ['supplier_users'], # 'users' Referrer and roles
},{
'name' : 'accounting',
'descr' : 'Conto',
'blocks' : ['balance', 'transactions']
},{
'name' : 'archive',
'descr' : 'Archivio',
'blocks' : ['stored_orders']
}],
'order' : [{
'name' : 'info',
'descr': 'Ordine',
'blocks': ['order_details', 'order_report']
},{
'name' : 'registration',
'descr': 'Registrazione',
'blocks': ['order_invoice', 'curtail']
# },{
# 'name' : 'pay',
# 'descr': 'Pagamento',
# 'blocks': ['order_insolute']
}],
'person' : [{
'name': 'info',
'descr': 'Scheda della persona',
'blocks' : ['person_details', 'person_gasmembers']
}],
'pact' : [{
'name' : 'stock',
'descr': 'Prodotti',
'blocks': ['open_orders', 'gasstocks']
},{
'name': 'info',
'descr': 'Scheda del patto',
'blocks' : ['pact_details', 'closed_orders']
},{
'name' : 'accounting',
'descr' : 'Conto',
'blocks' : ['balance_pact', 'insolutes_orders', 'transactions']
},{
'name' : 'archive',
'descr' : 'Archivio',
'blocks' : ['stored_orders']
}],
'stock' : [{
'name': 'info',
'descr': 'Scheda del prodotto',
'blocks' : ['stock_details', 'open_orders']
}],
'place' : [{
'name': 'info',
'descr': 'Scheda del luogo',
'blocks' : ['details']
}],
}
LOGIN_URL = "/gasistafelice/accounts/login/"
LOGIN_REDIRECT_URL = "/gasistafelice"
LOGOUT_URL = "/gasistafelice/accounts/logout/"
CAN_CHANGE_CONFIGURATION_VIA_WEB = False
ENABLE_OLAP_REPORTS = False
DATE_FMT = "%d/%m/%Y"
LONG_DATE_FMT = "%A %d %B %Y"
MEDIUM_DATE_FMT = "%a %d %b"
MEDIUM_DATETIME_FMT = "%a %d %b %H:%M"
LONG_DATETIME_FMT = "%A %d %B %Y %H:%M"
SHORT_DATE_FMT = "%Y-%m-%d"
locale.setlocale(locale.LC_ALL, os.getenv('APP_LOCALE', 'it_IT.UTF-8'))
#DOMTHU:
#locale.setlocale(locale.LC_ALL, 'it_IT.ISO-8859-1')
#locale.setlocale(locale.LC_ALL, 'it_IT.1252')
#locale.setlocale(locale.LC_ALL, 'it_IT') #by default is .ISO8859-1
#locale.setlocale(locale.LC_ALL, ('it_IT', 'ISO-8859-1'))
#locale.setlocale(locale.LC_ALL, ('it_IT', '
# From: http://en.wikipedia.org/wiki/Postal_code
# A postal code (known in various countries as a post code, postcode, or ZIP code)
# is a series of letters and/or digits appended to a postal address for the purpose of sorting mail.
#
# DES is a usually a small land, so limit postal codes to validate only if int(zipcode) succeed
# could be a good practice
VALIDATE_NUMERICAL_ZIPCODES = True
INIT_OPTIONS = {
'domain': '{}:{}'.format(
os.getenv('APP_SERVER_NAME', 'ordini.desmacerata.it'),
os.getenv('APP_HTTP_PORT', '80'),
),
'sitename': os.getenv('APP_SITE_NAME', 'DES Macerata'),
'sitedescription': os.getenv('APP_SITE_DESC', "Gestione degli ordini per il Distretto di Economia Solidale della Provincia di Macerata (DES-MC)"),
'su_username': os.getenv('APP_ADMIN_USER', 'admin'),
'su_name': os.getenv('APP_ADMIN_NAME', 'Referente informatico'),
'su_surname': os.getenv('APP_ADMIN_SURNAME', 'del DES-MC'),
'su_email': os.getenv('APP_ADMIN_EMAIL', ''),
'su_PASSWORD': os.getenv('APP_ADMIN_PW', 'admin'),
}
MAINTENANCE_MODE = os.getenv('APP_MAINTENANCE_MODE', False)
# --- WARNING: changing following parameters implies fixtures adaptation --
# Default category for all uncategorized products
DEFAULT_CATEGORY_CATCHALL = 'Non definita' #fixtures/supplier/initial_data.json
JUNK_EMAIL = 'devnull@desmacerata.it'
EMAIL_DEBUG_ADDR = os.getenv('APP_EMAIL_DEBUG_ADDR', 'gftest@gasistafelice.org')
DEFAULT_SENDER_EMAIL = 'gasistafelice@desmacerata.it'
SUPPORT_EMAIL="dev@gasistafelice.org"
# Superuser username
#------ AUTH settings
from flexi_auth_settings import *
#------ ACCOUNTING settings
from simple_accounting_settings import *
#------ NOTIFICATION settings
DEFAULT_FROM_EMAIL = os.getenv('APP_DEFAULT_FROM_EMAIL', 'gasistafelice@desmacerata.it')
NOTIFICATION_BACKENDS = (
("email", "notification.backends.email.EmailBackend"),
)
#------ CAPTCHA settings
CAPTCHA_FONT_SIZE = 40
APTCHA_LETTER_ROTATION = (-25,25)
#-------------------------------------------------------------------------------
# Ajax_select settings
AJAX_LOOKUP_CHANNELS = {
'placechannel' : ( 'gf.base.forms.lookups' , 'PlaceLookup'),
'personchannel' : ( 'gf.base.forms.lookups' , 'PersonLookup')
}
# magically include jqueryUI/js/css
AJAX_SELECT_BOOTSTRAP = False
#AJAX_SELECT_INLINES = 'inline'
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'final_static')
#------------------------------------------------------------------------------
#The path where the profiling files are stored
PROFILE_LOG_BASE = os.getenv('APP_PROFILE_LOG_BASE', '/tmp/profiling')
|
ngc224/totransfer | refs/heads/master | totranslator/config.py | 1 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
application_name = 'totranslator'
version = '0.0.1'
filepath_user = os.environ['HOME'] + '/.' + application_name
api_request_url = 'http://api.datamarket.azure.com/Bing/MicrosoftTranslator/v1/Translate'
|
cloudbase/nova-virtualbox | refs/heads/virtualbox_driver | nova/api/ec2/__init__.py | 2 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Starting point for routing EC2 requests.
"""
import hashlib
from oslo_config import cfg
from oslo_context import context as common_context
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import netutils
from oslo_utils import timeutils
import requests
import six
import webob
import webob.dec
import webob.exc
from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova.api.ec2 import faults
from nova.api import validator
from nova import context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova.openstack.common import memorycache
from nova import wsgi
LOG = logging.getLogger(__name__)
ec2_opts = [
cfg.IntOpt('lockout_attempts',
default=5,
help='Number of failed auths before lockout.'),
cfg.IntOpt('lockout_minutes',
default=15,
help='Number of minutes to lockout if triggered.'),
cfg.IntOpt('lockout_window',
default=15,
help='Number of minutes for lockout window.'),
cfg.StrOpt('keystone_ec2_url',
default='http://localhost:5000/v2.0/ec2tokens',
help='URL to get token from ec2 request.'),
cfg.BoolOpt('ec2_private_dns_show_ip',
default=False,
help='Return the IP address as private dns hostname in '
'describe instances'),
cfg.BoolOpt('ec2_strict_validation',
default=True,
help='Validate security group names'
' according to EC2 specification'),
cfg.IntOpt('ec2_timestamp_expiry',
default=300,
help='Time in seconds before ec2 timestamp expires'),
cfg.BoolOpt('keystone_ec2_insecure', default=False, help='Disable SSL '
'certificate verification.'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
CONF.import_group('ssl', 'nova.openstack.common.sslutils')
# Fault Wrapper around all EC2 requests
class FaultWrapper(wsgi.Middleware):
"""Calls the middleware stack, captures any exceptions into faults."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
LOG.exception(_LE("FaultWrapper: %s"), ex)
return faults.Fault(webob.exc.HTTPInternalServerError())
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = timeutils.utcnow()
rv = req.get_response(self.application)
self.log_request_completion(rv, req, start)
return rv
def log_request_completion(self, response, request, start):
apireq = request.environ.get('ec2.request', None)
if apireq:
controller = apireq.controller
action = apireq.action
else:
controller = None
action = None
ctxt = request.environ.get('nova.context', None)
delta = timeutils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
"%s%s" % (request.script_name, request.path_info),
controller,
action,
response.status_int,
request.user_agent,
request.content_type,
response.content_type,
context=ctxt) # noqa
class Lockout(wsgi.Middleware):
"""Lockout for x minutes on y failed auths in a z minute period.
x = lockout_timeout flag
y = lockout_window flag
z = lockout_attempts flag
Uses memcached if lockout_memcached_servers flag is set, otherwise it
uses a very simple in-process cache. Due to the simplicity of
the implementation, the timeout window is started with the first
failed request, so it will block if there are x failed logins within
that period.
There is a possible race condition where simultaneous requests could
sneak in before the lockout hits, but this is extremely rare and would
only result in a couple of extra failed attempts.
"""
def __init__(self, application):
"""middleware can use fake for testing."""
self.mc = memorycache.get_client()
super(Lockout, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= CONF.lockout_attempts:
detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(explanation=detail)
res = req.get_response(self.application)
if res.status_int == 403:
failures = self.mc.incr(failures_key)
if failures is None:
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=CONF.lockout_window * 60)
elif failures >= CONF.lockout_attempts:
LOG.warning(_LW('Access key %(access_key)s has had '
'%(failures)d failed authentications and '
'will be locked out for %(lock_mins)d '
'minutes.'),
{'access_key': access_key,
'failures': failures,
'lock_mins': CONF.lockout_minutes})
self.mc.set(failures_key, str(failures),
time=CONF.lockout_minutes * 60)
return res
class EC2KeystoneAuth(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to context."""
def _get_signature(self, req):
"""Extract the signature from the request.
This can be a get/post variable or for version 4 also in a header
called 'Authorization'.
- params['Signature'] == version 0,1,2,3
- params['X-Amz-Signature'] == version 4
- header 'Authorization' == version 4
"""
sig = req.params.get('Signature') or req.params.get('X-Amz-Signature')
if sig is None and 'Authorization' in req.headers:
auth_str = req.headers['Authorization']
sig = auth_str.partition("Signature=")[2].split(',')[0]
return sig
def _get_access(self, req):
"""Extract the access key identifier.
For version 0/1/2/3 this is passed as the AccessKeyId parameter, for
version 4 it is either an X-Amz-Credential parameter or a Credential=
field in the 'Authorization' header string.
"""
access = req.params.get('AWSAccessKeyId')
if access is None:
cred_param = req.params.get('X-Amz-Credential')
if cred_param:
access = cred_param.split("/")[0]
if access is None and 'Authorization' in req.headers:
auth_str = req.headers['Authorization']
cred_str = auth_str.partition("Credential=")[2].split(',')[0]
access = cred_str.split("/")[0]
return access
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
# NOTE(alevine) We need to calculate the hash here because
# subsequent access to request modifies the req.body so the hash
# calculation will yield invalid results.
body_hash = hashlib.sha256(req.body).hexdigest()
request_id = common_context.generate_request_id()
signature = self._get_signature(req)
if not signature:
msg = _("Signature not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
access = self._get_access(req)
if not access:
msg = _("Access key not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
if 'X-Amz-Signature' in req.params or 'Authorization' in req.headers:
auth_params = {}
else:
# Make a copy of args for authentication and signature verification
auth_params = dict(req.params)
# Not part of authentication args
auth_params.pop('Signature', None)
cred_dict = {
'access': access,
'signature': signature,
'host': req.host,
'verb': req.method,
'path': req.path,
'params': auth_params,
'headers': req.headers,
'body_hash': body_hash
}
if "ec2" in CONF.keystone_ec2_url:
creds = {'ec2Credentials': cred_dict}
else:
creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
creds_json = jsonutils.dumps(creds)
headers = {'Content-Type': 'application/json'}
verify = not CONF.keystone_ec2_insecure
if verify and CONF.ssl.ca_file:
verify = CONF.ssl.ca_file
cert = None
if CONF.ssl.cert_file and CONF.ssl.key_file:
cert = (CONF.ssl.cert_file, CONF.ssl.key_file)
elif CONF.ssl.cert_file:
cert = CONF.ssl.cert_file
response = requests.request('POST', CONF.keystone_ec2_url,
data=creds_json, headers=headers,
verify=verify, cert=cert)
status_code = response.status_code
if status_code != 200:
msg = response.reason
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=status_code)
result = response.json()
try:
token_id = result['access']['token']['id']
user_id = result['access']['user']['id']
project_id = result['access']['token']['tenant']['id']
user_name = result['access']['user'].get('name')
project_name = result['access']['token']['tenant'].get('name')
roles = [role['name'] for role
in result['access']['user']['roles']]
except (AttributeError, KeyError) as e:
LOG.error(_LE("Keystone failure: %s"), e)
msg = _("Failure parsing response from keystone: %s") % e
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For',
remote_address)
catalog = result['access']['serviceCatalog']
ctxt = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=token_id,
remote_address=remote_address,
service_catalog=catalog)
req.environ['nova.context'] = ctxt
return self.application
class NoAuth(wsgi.Middleware):
"""Add user:project as 'nova.context' to WSGI environ."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'AWSAccessKeyId' not in req.params:
raise webob.exc.HTTPBadRequest()
user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
project_id = project_id or user_id
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=True,
remote_address=remote_address)
req.environ['nova.context'] = ctx
return self.application
class Requestify(wsgi.Middleware):
def __init__(self, app, controller):
super(Requestify, self).__init__(app)
self.controller = importutils.import_object(controller)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
# Not all arguments are mandatory with v4 signatures, as some data is
# passed in the header, not query arguments.
required_args = ['Action', 'Version']
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
args = dict(req.params)
try:
expired = ec2utils.is_ec2_timestamp_expired(req.params,
expires=CONF.ec2_timestamp_expiry)
if expired:
msg = _("Timestamp failed validation.")
LOG.debug("Timestamp failed validation")
raise webob.exc.HTTPForbidden(explanation=msg)
# Raise KeyError if omitted
action = req.params['Action']
# Fix bug lp:720157 for older (version 1) clients
# If not present assume v4
version = req.params.get('SignatureVersion', 4)
if int(version) == 1:
non_args.remove('SignatureMethod')
if 'SignatureMethod' in args:
args.pop('SignatureMethod')
for non_arg in non_args:
if non_arg in required_args:
# Remove, but raise KeyError if omitted
args.pop(non_arg)
else:
args.pop(non_arg, None)
except KeyError:
raise webob.exc.HTTPBadRequest()
except exception.InvalidRequest as err:
raise webob.exc.HTTPBadRequest(explanation=six.text_type(err))
LOG.debug('action: %s', action)
for key, value in args.items():
LOG.debug('arg: %(key)s\t\tval: %(value)s',
{'key': key, 'value': value})
# Success!
api_request = apirequest.APIRequest(self.controller, action,
req.params['Version'], args)
req.environ['ec2.request'] = api_request
return self.application
class Authorizer(wsgi.Middleware):
"""Authorize an EC2 API request.
Return a 401 if ec2.controller and ec2.action in WSGI environ may not be
executed in nova.context.
"""
def __init__(self, application):
super(Authorizer, self).__init__(application)
self.action_roles = {
'CloudController': {
'DescribeAvailabilityZones': ['all'],
'DescribeRegions': ['all'],
'DescribeSnapshots': ['all'],
'DescribeKeyPairs': ['all'],
'CreateKeyPair': ['all'],
'DeleteKeyPair': ['all'],
'DescribeSecurityGroups': ['all'],
'ImportKeyPair': ['all'],
'AuthorizeSecurityGroupIngress': ['netadmin'],
'RevokeSecurityGroupIngress': ['netadmin'],
'CreateSecurityGroup': ['netadmin'],
'DeleteSecurityGroup': ['netadmin'],
'GetConsoleOutput': ['projectmanager', 'sysadmin'],
'DescribeVolumes': ['projectmanager', 'sysadmin'],
'CreateVolume': ['projectmanager', 'sysadmin'],
'AttachVolume': ['projectmanager', 'sysadmin'],
'DetachVolume': ['projectmanager', 'sysadmin'],
'DescribeInstances': ['all'],
'DescribeAddresses': ['all'],
'AllocateAddress': ['netadmin'],
'ReleaseAddress': ['netadmin'],
'AssociateAddress': ['netadmin'],
'DisassociateAddress': ['netadmin'],
'RunInstances': ['projectmanager', 'sysadmin'],
'TerminateInstances': ['projectmanager', 'sysadmin'],
'RebootInstances': ['projectmanager', 'sysadmin'],
'UpdateInstance': ['projectmanager', 'sysadmin'],
'StartInstances': ['projectmanager', 'sysadmin'],
'StopInstances': ['projectmanager', 'sysadmin'],
'DeleteVolume': ['projectmanager', 'sysadmin'],
'DescribeImages': ['all'],
'DeregisterImage': ['projectmanager', 'sysadmin'],
'RegisterImage': ['projectmanager', 'sysadmin'],
'DescribeImageAttribute': ['all'],
'ModifyImageAttribute': ['projectmanager', 'sysadmin'],
'UpdateImage': ['projectmanager', 'sysadmin'],
'CreateImage': ['projectmanager', 'sysadmin'],
},
'AdminController': {
# All actions have the same permission: ['none'] (the default)
# superusers will be allowed to run them
# all others will get HTTPUnauthorized.
},
}
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
controller = req.environ['ec2.request'].controller.__class__.__name__
action = req.environ['ec2.request'].action
allowed_roles = self.action_roles[controller].get(action, ['none'])
if self._matches_any_role(context, allowed_roles):
return self.application
else:
LOG.info(_LI('Unauthorized request for controller=%(controller)s '
'and action=%(action)s'),
{'controller': controller, 'action': action},
context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
"""Return True if any role in roles is allowed in context."""
if context.is_admin:
return True
if 'all' in roles:
return True
if 'none' in roles:
return False
return any(role in context.roles for role in roles)
class Validator(wsgi.Middleware):
def validate_ec2_id(val):
if not validator.validate_str()(val):
return False
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
return False
return True
validator.validate_ec2_id = validate_ec2_id
validator.DEFAULT_VALIDATOR = {
'instance_id': validator.validate_ec2_id,
'volume_id': validator.validate_ec2_id,
'image_id': validator.validate_ec2_id,
'attribute': validator.validate_str(),
'image_location': validator.validate_image_path,
'public_ip': netutils.is_valid_ipv4,
'region_name': validator.validate_str(),
'group_name': validator.validate_str(max_length=255),
'group_description': validator.validate_str(max_length=255),
'size': validator.validate_int(),
'user_data': validator.validate_user_data
}
def __init__(self, application):
super(Validator, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if validator.validate(req.environ['ec2.request'].args,
validator.DEFAULT_VALIDATOR):
return self.application
else:
raise webob.exc.HTTPBadRequest()
def exception_to_ec2code(ex):
"""Helper to extract EC2 error code from exception.
For other than EC2 exceptions (those without ec2_code attribute),
use exception name.
"""
if hasattr(ex, 'ec2_code'):
code = ex.ec2_code
else:
code = type(ex).__name__
return code
def ec2_error_ex(ex, req, code=None, message=None, unexpected=False):
"""Return an EC2 error response based on passed exception and log
the exception on an appropriate log level:
* DEBUG: expected errors
* ERROR: unexpected errors
All expected errors are treated as client errors and 4xx HTTP
status codes are always returned for them.
Unexpected 5xx errors may contain sensitive information,
suppress their messages for security.
"""
if not code:
code = exception_to_ec2code(ex)
status = getattr(ex, 'code', None)
if not status:
status = 500
if unexpected:
log_fun = LOG.error
log_msg = _LE("Unexpected %(ex_name)s raised: %(ex_str)s")
else:
log_fun = LOG.debug
log_msg = "%(ex_name)s raised: %(ex_str)s"
# NOTE(jruzicka): For compatibility with EC2 API, treat expected
# exceptions as client (4xx) errors. The exception error code is 500
# by default and most exceptions inherit this from NovaException even
# though they are actually client errors in most cases.
if status >= 500:
status = 400
context = req.environ['nova.context']
request_id = context.request_id
log_msg_args = {
'ex_name': type(ex).__name__,
'ex_str': ex
}
log_fun(log_msg, log_msg_args, context=context)
if ex.args and not message and (not unexpected or status < 500):
message = unicode(ex.args[0])
if unexpected:
# Log filtered environment for unexpected errors.
env = req.environ.copy()
for k in env.keys():
if not isinstance(env[k], six.string_types):
env.pop(k)
log_fun(_LE('Environment: %s'), jsonutils.dumps(env))
if not message:
message = _('Unknown error occurred.')
return faults.ec2_error_response(request_id, code, message, status=status)
class Executor(wsgi.Application):
"""Execute an EC2 API request.
Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
response, or a 400 upon failure.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
api_request = req.environ['ec2.request']
try:
result = api_request.invoke(context)
except exception.InstanceNotFound as ex:
ec2_id = ec2utils.id_to_ec2_inst_id(ex.kwargs['instance_id'])
message = ex.msg_fmt % {'instance_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.VolumeNotFound as ex:
ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id'])
message = ex.msg_fmt % {'volume_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.SnapshotNotFound as ex:
ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id'])
message = ex.msg_fmt % {'snapshot_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except (exception.CannotDisassociateAutoAssignedFloatingIP,
exception.FloatingIpAssociated,
exception.FloatingIpNotFound,
exception.ImageNotActive,
exception.InvalidInstanceIDMalformed,
exception.InvalidVolumeIDMalformed,
exception.InvalidKeypair,
exception.InvalidParameterValue,
exception.InvalidPortRange,
exception.InvalidVolume,
exception.KeyPairExists,
exception.KeypairNotFound,
exception.MissingParameter,
exception.NoFloatingIpInterface,
exception.NoMoreFixedIps,
exception.Forbidden,
exception.QuotaError,
exception.SecurityGroupExists,
exception.SecurityGroupLimitExceeded,
exception.SecurityGroupRuleExists,
exception.VolumeUnattached,
# Following aren't translated to valid EC2 errors.
exception.ImageNotFound,
exception.ImageNotFoundEC2,
exception.InvalidAttribute,
exception.InvalidRequest,
exception.NotFound) as ex:
return ec2_error_ex(ex, req)
except Exception as ex:
return ec2_error_ex(ex, req, unexpected=True)
else:
resp = webob.Response()
resp.status = 200
resp.headers['Content-Type'] = 'text/xml'
resp.body = str(result)
return resp
|
dnozay/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/django/contrib/contenttypes/management.py | 315 | from django.contrib.contenttypes.models import ContentType
from django.db.models import get_apps, get_models, signals
from django.utils.encoding import smart_unicode
def update_contenttypes(app, created_models, verbosity=2, **kwargs):
"""
Creates content types for models in the given app, removing any model
entries that no longer have a matching model class.
"""
ContentType.objects.clear_cache()
content_types = list(ContentType.objects.filter(app_label=app.__name__.split('.')[-2]))
app_models = get_models(app)
if not app_models:
return
for klass in app_models:
opts = klass._meta
try:
ct = ContentType.objects.get(app_label=opts.app_label,
model=opts.object_name.lower())
content_types.remove(ct)
except ContentType.DoesNotExist:
ct = ContentType(name=smart_unicode(opts.verbose_name_raw),
app_label=opts.app_label, model=opts.object_name.lower())
ct.save()
if verbosity >= 2:
print "Adding content type '%s | %s'" % (ct.app_label, ct.model)
# The presence of any remaining content types means the supplied app has an
# undefined model. Confirm that the content type is stale before deletion.
if content_types:
if kwargs.get('interactive', False):
content_type_display = '\n'.join([' %s | %s' % (ct.app_label, ct.model) for ct in content_types])
ok_to_delete = raw_input("""The following content types are stale and need to be deleted:
%s
Any objects related to these content types by a foreign key will also
be deleted. Are you sure you want to delete these content types?
If you're unsure, answer 'no'.
Type 'yes' to continue, or 'no' to cancel: """ % content_type_display)
else:
ok_to_delete = False
if ok_to_delete == 'yes':
for ct in content_types:
if verbosity >= 2:
print "Deleting stale content type '%s | %s'" % (ct.app_label, ct.model)
ct.delete()
else:
if verbosity >= 2:
print "Stale content types remain."
def update_all_contenttypes(verbosity=2, **kwargs):
for app in get_apps():
update_contenttypes(app, None, verbosity, **kwargs)
signals.post_syncdb.connect(update_contenttypes)
if __name__ == "__main__":
update_all_contenttypes()
|
yannrouillard/weboob | refs/heads/master | modules/indeed/__init__.py | 3 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .backend import IndeedBackend
__all__ = ['IndeedBackend']
|
satanas/Turpial | refs/heads/development | ez_setup.py | 3 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
|
childehuang/ngoenzraiz | refs/heads/master | setup.py | 1 | # -*- coding: utf-8 -*-
import hashlib
import redis
REDIS_HOST = "localhost"
REDIS_PORT = 6379
ADMIN_PASSWORD = None
AUTH_VALUE = None
COOKIE_SECRET = None
if ADMIN_PASSWORD == None:
while True:
pswd = input("请输入12位管理密码:")
if len(pswd) < 12:
print("密码过于简单!")
continue
ADMIN_PASSWORD = pswd
break
if AUTH_VALUE == None:
mdstr = str(ADMIN_PASSWORD).encode("utf-8")
AUTH_VALUE = hashlib.md5(mdstr).hexdigest()
if COOKIE_SECRET == None:
mdstr = str(ADMIN_PASSWORD+AUTH_VALUE).encode("utf-8")
COOKIE_SECRET = hashlib.md5(mdstr).hexdigest()
print("ADMIN_PASSWORD:", ADMIN_PASSWORD)
print("AUTH_VALUE:", AUTH_VALUE)
print("COOKIE_SECRET:", COOKIE_SECRET)
r = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT)
r.set("password", ADMIN_PASSWORD)
r.set("auth_value", AUTH_VALUE)
r.set("cookie_secret", COOKIE_SECRET)
|
kenshay/ImageScripter | refs/heads/master | Script_Runner/PYTHON/Lib/site-packages/pkg_resources/__init__.py | 20 | # coding: utf-8
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import errno
import tempfile
import textwrap
import itertools
import inspect
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
from pkg_resources.extern import six
from pkg_resources.extern.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from . import py31compat
from pkg_resources.extern import appdirs
from pkg_resources.extern import packaging
__import__('pkg_resources.extern.packaging.version')
__import__('pkg_resources.extern.packaging.specifiers')
__import__('pkg_resources.extern.packaging.requirements')
__import__('pkg_resources.extern.packaging.markers')
if (3, 0) < sys.version_info < (3, 3):
raise RuntimeError("Python 3.3 or later is required")
if six.PY2:
# Those builtin exceptions are only defined in Python 3
PermissionError = None
NotADirectoryError = None
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
resource_dir = None
resource_stream = None
set_extraction_path = None
resource_isdir = None
resource_string = None
iter_entry_points = None
resource_listdir = None
resource_filename = None
resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
def parse_version(v):
try:
return packaging.version.Version(v)
except packaging.version.InvalidVersion:
return packaging.version.LegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]), int(version[1]),
_macosx_arch(machine),
)
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False, extras=None):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception
if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
`extras` is a list of the extras to be used with these requirements.
This is important because extra requirements may look like `my_req;
extra = "my_extra"`, which would otherwise be interpreted as a purely
optional requirement. Instead, we want to be able to assert that these
requirements are truly required.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req, extras):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(
req, ws, installer,
replace_conflicting=replace_conflicting
)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(
self, plugin_env, full_env=None, installer=None, fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req, extras=None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals)
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(
self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(
self, req, working_set, installer=None, replace_conflicting=False):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
try:
dist = working_set.find(req)
except VersionConflict:
if not replace_conflicting:
raise
dist = None
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s)
to the Python egg cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory?
You can change the cache directory by setting the PYTHON_EGG_CACHE
environment variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except Exception:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = (
"%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path
)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
def get_metadata(self, name):
if not self.egg_info:
return ""
value = self._get(self._fn(self.egg_info, name))
return value.decode('utf-8') if six.PY3 else value
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError(
"Script {script!r} not found in metadata at {self.egg_info!r}"
.format(**locals()),
)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_egg_path(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_cls = getattr(
importlib_machinery,
'SourceFileLoader',
type(None),
)
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
module_path = None
_isdir = _has = lambda self, path: False
def _get(self, path):
return ''
def _listdir(self, path):
return []
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with zipfile.ZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ''
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.7 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if _is_egg_path(subitem):
subpath = os.path.join(path_item, subitem)
dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
for dist in dists:
yield dist
elif subitem.lower().endswith('.dist-info'):
subpath = os.path.join(path_item, subitem)
submeta = EggMetadata(zipimport.zipimporter(subpath))
submeta.egg_info = subpath
yield Distribution.from_location(path_item, subitem, submeta)
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
return
entries = safe_listdir(path_item)
# for performance, before sorting by version,
# screen entries for only those that will yield
# distributions
filtered = (
entry
for entry in entries
if dist_factory(path_item, entry, only)
)
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(filtered)
for entry in path_item_entries:
fullpath = os.path.join(path_item, entry)
factory = dist_factory(path_item, entry, only)
for dist in factory(fullpath):
yield dist
def dist_factory(path_item, entry, only):
"""
Return a dist_factory for a path_item and entry
"""
lower = entry.lower()
is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
return (
distributions_from_metadata
if is_meta else
find_distributions
if not only and _is_egg_path(entry) else
resolve_egg_link
if not only and lower.endswith('.egg-link') else
NoDists()
)
class NoDists:
"""
>>> bool(NoDists())
False
>>> list(NoDists()('anything'))
[]
"""
def __bool__(self):
return False
if six.PY2:
__nonzero__ = __bool__
def __call__(self, fullpath):
return iter(())
def safe_listdir(path):
"""
Attempt to list contents of path, but suppress some exceptions.
"""
try:
return os.listdir(path)
except (PermissionError, NotADirectoryError):
pass
except OSError as e:
# Ignore the directory if does not exist, not a directory or
# permission denied
ignorable = (
e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
# Python 2 on Windows needs to be handled this way :(
or getattr(e, "winerror", None) == 267
)
if not ignorable:
raise
return ()
def distributions_from_metadata(path):
root = os.path.dirname(path)
if os.path.isdir(path):
if len(os.listdir(path)) == 0:
# empty metadata dir; skip
return
metadata = PathMetadata(root, path)
else:
metadata = FileMetadata(path)
entry = os.path.basename(path)
yield Distribution.from_location(
root, entry, metadata, precedence=DEVELOP_DIST,
)
def non_empty_lines(path):
"""
Yield non-empty lines from file at path
"""
with open(path) as f:
for line in f:
line = line.strip()
if line:
yield line
def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref)
for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ())
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
if not isinstance(orig_path, list):
# Is this behavior useful when module.__path__ is not a list?
return
orig_path.sort(key=position_in_sys_path)
module.__path__[:] = [_normalize_cached(p) for p in orig_path]
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_egg_path(path):
"""
Determine if given path appears to be an egg.
"""
return path.lower().endswith('.egg')
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
_is_egg_path(path) and
os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = tuple(extras)
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
# Get the requirements for this entry point with all its extras and
# then resolve them. We have to pass `extras` along when resolving so
# that the working set knows what extras we want. Otherwise, for
# dist-info distributions, the working set will assume that the
# requirements for that extra are purely optional and skip over them.
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
def is_version_line(line):
return line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(
self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = _version_from_file(self._get_metadata(self.PKG_INFO))
if version is None:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
return version
@property
def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
@staticmethod
def _filter_extras(dm):
"""
Given a mapping of extras to dependencies, strip off
environment markers and filter out any dependencies
not matching the markers.
"""
for extra in list(filter(None, dm)):
new_extra = extra
reqs = dm.pop(extra)
new_extra, _, marker = extra.partition(':')
fails_marker = marker and (
invalid_marker(marker)
or not evaluate_marker(marker)
)
if fails_marker:
reqs = []
new_extra = safe_extra(new_extra) or None
dm.setdefault(new_extra, []).extend(reqs)
return dm
def _build_dep_map(self):
dm = {}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""
Wrap an actual or potential sys.path entry
w/metadata, .dist-info style.
"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
try:
line += next(lines)
except StopIteration:
return
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self):
return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _always_object(classes):
"""
Ensure object appears in the mro even
for old-style classes.
"""
if object not in classes:
return classes + (object,)
return classes
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
for t in types:
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
py31compat.makedirs(dirname, exist_ok=True)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
tuple(
dist.activate(replace=False)
for dist in working_set
)
add_activation_listener(
lambda dist: dist.activate(replace=True),
existing=False,
)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
|
flgiordano/netcash | refs/heads/master | +/google-cloud-sdk/lib/googlecloudsdk/command_lib/config/completers.py | 1 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Argcomplete completers for various config related things."""
from googlecloudsdk.core import named_configs
from googlecloudsdk.core import properties
def PropertiesCompleter(prefix, **unused_kwargs):
"""An argcomplete completer for property and section names."""
all_sections = properties.VALUES.AllSections()
options = []
if '/' in prefix:
# Section has been specified, only return properties under that section.
parts = prefix.split('/', 1)
section = parts[0]
prefix = parts[1]
if section in all_sections:
section_str = section + '/'
props = properties.VALUES.Section(section).AllProperties()
options.extend([section_str + p for p in props if p.startswith(prefix)])
else:
# No section. Return matching sections and properties in the default
# group.
options.extend([s + '/' for s in all_sections if s.startswith(prefix)])
section = properties.VALUES.default_section.name
props = properties.VALUES.Section(section).AllProperties()
options.extend([p for p in props if p.startswith(prefix)])
return options
def NamedConfigCompleter(prefix, **unused_kwargs):
"""An argcomplete completer for existing named configuration names."""
configs = named_configs.ListNamedConfigs(log_warnings=False)
return [c.name for c in configs if c.name.startswith(prefix)]
|
TeamTwisted/external_chromium_org | refs/heads/opti-5.1 | tools/telemetry/telemetry/timeline/counter.py | 33 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.timeline.event_container as event_container
# Doesn't inherit from TimelineEvent because its only a temporary wrapper of a
# counter sample into an event. During stable operation, the samples are stored
# a dense array of values rather than in the long-form done by an Event.
class CounterSample(object):
def __init__(self, counter, sample_index):
self._counter = counter
self._sample_index = sample_index
@property
def name(self):
return None
@property
def start(self):
return self._counter.timestamps[self._sample_index]
@start.setter
def start(self, start):
self._counter.timestamps[self._sample_index] = start
@property
def duration(self):
return 0
@property
def end(self):
return self.start
@property
def thread_start(self):
return None
@property
def thread_duration(self):
return None
@property
def thread_end(self):
return None
class Counter(event_container.TimelineEventContainer):
""" Stores all the samples for a given counter.
"""
def __init__(self, parent, category, name):
super(Counter, self).__init__(name, parent)
self.category = category
self.full_name = category + '.' + name
self.samples = []
self.timestamps = []
self.series_names = []
self.totals = []
self.max_total = 0
def IterChildContainers(self):
return
yield # pylint: disable=W0101
def IterEventsInThisContainer(self, event_type_predicate, event_predicate):
if not event_type_predicate(CounterSample) or not self.timestamps:
return
# Pass event_predicate a reused CounterSample instance to avoid
# creating a ton of garbage for rejected samples.
test_sample = CounterSample(self, 0)
for i in xrange(len(self.timestamps)):
test_sample._sample_index = i
if event_predicate(test_sample):
yield CounterSample(self, i)
@property
def num_series(self):
return len(self.series_names)
@property
def num_samples(self):
return len(self.timestamps)
def FinalizeImport(self):
if self.num_series * self.num_samples != len(self.samples):
raise ValueError(
'Length of samples must be a multiple of length of timestamps.')
self.totals = []
self.max_total = 0
if not len(self.samples):
return
max_total = None
for i in xrange(self.num_samples):
total = 0
for j in xrange(self.num_series):
total += self.samples[i * self.num_series + j]
self.totals.append(total)
if max_total is None or total > max_total:
max_total = total
self.max_total = max_total
|
40223125/2015cd_midterm | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/xml/sax/_exceptions.py | 625 | """Different kinds of SAX Exceptions"""
#in brython the 4 lines below causes an $globals['Exception'] error
#import sys
#if sys.platform[:4] == "java":
# from java.lang import Exception
#del sys
# ===== SAXEXCEPTION =====
class SAXException(Exception):
"""Encapsulate an XML error or warning. This class can contain
basic error or warning information from either the XML parser or
the application: you can subclass it to provide additional
functionality, or to add localization. Note that although you will
receive a SAXException as the argument to the handlers in the
ErrorHandler interface, you are not actually required to raise
the exception; instead, you can simply read the information in
it."""
def __init__(self, msg, exception=None):
"""Creates an exception. The message is required, but the exception
is optional."""
self._msg = msg
self._exception = exception
Exception.__init__(self, msg)
def getMessage(self):
"Return a message for this exception."
return self._msg
def getException(self):
"Return the embedded exception, or None if there was none."
return self._exception
def __str__(self):
"Create a string representation of the exception."
return self._msg
def __getitem__(self, ix):
"""Avoids weird error messages if someone does exception[ix] by
mistake, since Exception has __getitem__ defined."""
raise AttributeError("__getitem__")
# ===== SAXPARSEEXCEPTION =====
class SAXParseException(SAXException):
"""Encapsulate an XML parse error or warning.
This exception will include information for locating the error in
the original XML document. Note that although the application will
receive a SAXParseException as the argument to the handlers in the
ErrorHandler interface, the application is not actually required
to raise the exception; instead, it can simply read the
information in it and take a different action.
Since this exception is a subclass of SAXException, it inherits
the ability to wrap another exception."""
def __init__(self, msg, exception, locator):
"Creates the exception. The exception parameter is allowed to be None."
SAXException.__init__(self, msg, exception)
self._locator = locator
# We need to cache this stuff at construction time.
# If this exception is raised, the objects through which we must
# traverse to get this information may be deleted by the time
# it gets caught.
self._systemId = self._locator.getSystemId()
self._colnum = self._locator.getColumnNumber()
self._linenum = self._locator.getLineNumber()
def getColumnNumber(self):
"""The column number of the end of the text where the exception
occurred."""
return self._colnum
def getLineNumber(self):
"The line number of the end of the text where the exception occurred."
return self._linenum
def getPublicId(self):
"Get the public identifier of the entity where the exception occurred."
return self._locator.getPublicId()
def getSystemId(self):
"Get the system identifier of the entity where the exception occurred."
return self._systemId
def __str__(self):
"Create a string representation of the exception."
sysid = self.getSystemId()
if sysid is None:
sysid = "<unknown>"
linenum = self.getLineNumber()
if linenum is None:
linenum = "?"
colnum = self.getColumnNumber()
if colnum is None:
colnum = "?"
return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg)
# ===== SAXNOTRECOGNIZEDEXCEPTION =====
class SAXNotRecognizedException(SAXException):
"""Exception class for an unrecognized identifier.
An XMLReader will raise this exception when it is confronted with an
unrecognized feature or property. SAX applications and extensions may
use this class for similar purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXNotSupportedException(SAXException):
"""Exception class for an unsupported operation.
An XMLReader will raise this exception when a service it cannot
perform is requested (specifically setting a state or value). SAX
applications and extensions may use this class for similar
purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXReaderNotAvailable(SAXNotSupportedException):
"""Exception class for a missing driver.
An XMLReader module (driver) should raise this exception when it
is first imported, e.g. when a support module cannot be imported.
It also may be raised during parsing, e.g. if executing an external
program is not permitted."""
pass
|
seriouscamp/frbb | refs/heads/master | web/manage.py | 1 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "flipside.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
75651/kbengine_cloud | refs/heads/master | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/html5lib/treewalkers/pulldom.py | 1729 | from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
|
mcking49/apache-flask | refs/heads/master | Python/Lib/test/test_getargs2.py | 35 | import unittest
from test import test_support
# Skip this test if the _testcapi module isn't available.
test_support.import_module('_testcapi')
from _testcapi import getargs_keywords
import warnings
"""
> How about the following counterproposal. This also changes some of
> the other format codes to be a little more regular.
>
> Code C type Range check
>
> b unsigned char 0..UCHAR_MAX
> h signed short SHRT_MIN..SHRT_MAX
> B unsigned char none **
> H unsigned short none **
> k * unsigned long none
> I * unsigned int 0..UINT_MAX
> i int INT_MIN..INT_MAX
> l long LONG_MIN..LONG_MAX
> K * unsigned long long none
> L long long LLONG_MIN..LLONG_MAX
> Notes:
>
> * New format codes.
>
> ** Changed from previous "range-and-a-half" to "none"; the
> range-and-a-half checking wasn't particularly useful.
Plus a C API or two, e.g. PyInt_AsLongMask() ->
unsigned long and PyInt_AsLongLongMask() -> unsigned
long long (if that exists).
"""
LARGE = 0x7FFFFFFF
VERY_LARGE = 0xFF0000121212121212121242L
from _testcapi import UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, INT_MAX, \
INT_MIN, LONG_MIN, LONG_MAX, PY_SSIZE_T_MIN, PY_SSIZE_T_MAX, \
SHRT_MIN, SHRT_MAX
try:
from _testcapi import getargs_L, getargs_K
except ImportError:
_PY_LONG_LONG_available = False
else:
_PY_LONG_LONG_available = True
# fake, they are not defined in Python's header files
LLONG_MAX = 2**63-1
LLONG_MIN = -2**63
ULLONG_MAX = 2**64-1
class Long:
def __int__(self):
return 99L
class Int:
def __int__(self):
return 99
class Unsigned_TestCase(unittest.TestCase):
def test_b(self):
from _testcapi import getargs_b
# b returns 'unsigned char', and does range checking (0 ... UCHAR_MAX)
self.assertRaises(TypeError, getargs_b, 3.14)
self.assertEqual(99, getargs_b(Long()))
self.assertEqual(99, getargs_b(Int()))
self.assertRaises(OverflowError, getargs_b, -1)
self.assertEqual(0, getargs_b(0))
self.assertEqual(UCHAR_MAX, getargs_b(UCHAR_MAX))
self.assertRaises(OverflowError, getargs_b, UCHAR_MAX + 1)
self.assertEqual(42, getargs_b(42))
self.assertEqual(42, getargs_b(42L))
self.assertRaises(OverflowError, getargs_b, VERY_LARGE)
def test_B(self):
from _testcapi import getargs_B
# B returns 'unsigned char', no range checking
self.assertRaises(TypeError, getargs_B, 3.14)
self.assertEqual(99, getargs_B(Long()))
self.assertEqual(99, getargs_B(Int()))
self.assertEqual(UCHAR_MAX, getargs_B(-1))
self.assertEqual(UCHAR_MAX, getargs_B(-1L))
self.assertEqual(0, getargs_B(0))
self.assertEqual(UCHAR_MAX, getargs_B(UCHAR_MAX))
self.assertEqual(0, getargs_B(UCHAR_MAX+1))
self.assertEqual(42, getargs_B(42))
self.assertEqual(42, getargs_B(42L))
self.assertEqual(UCHAR_MAX & VERY_LARGE, getargs_B(VERY_LARGE))
def test_H(self):
from _testcapi import getargs_H
# H returns 'unsigned short', no range checking
self.assertRaises(TypeError, getargs_H, 3.14)
self.assertEqual(99, getargs_H(Long()))
self.assertEqual(99, getargs_H(Int()))
self.assertEqual(USHRT_MAX, getargs_H(-1))
self.assertEqual(0, getargs_H(0))
self.assertEqual(USHRT_MAX, getargs_H(USHRT_MAX))
self.assertEqual(0, getargs_H(USHRT_MAX+1))
self.assertEqual(42, getargs_H(42))
self.assertEqual(42, getargs_H(42L))
self.assertEqual(VERY_LARGE & USHRT_MAX, getargs_H(VERY_LARGE))
def test_I(self):
from _testcapi import getargs_I
# I returns 'unsigned int', no range checking
self.assertRaises(TypeError, getargs_I, 3.14)
self.assertEqual(99, getargs_I(Long()))
self.assertEqual(99, getargs_I(Int()))
self.assertEqual(UINT_MAX, getargs_I(-1))
self.assertEqual(0, getargs_I(0))
self.assertEqual(UINT_MAX, getargs_I(UINT_MAX))
self.assertEqual(0, getargs_I(UINT_MAX+1))
self.assertEqual(42, getargs_I(42))
self.assertEqual(42, getargs_I(42L))
self.assertEqual(VERY_LARGE & UINT_MAX, getargs_I(VERY_LARGE))
def test_k(self):
from _testcapi import getargs_k
# k returns 'unsigned long', no range checking
# it does not accept float, or instances with __int__
self.assertRaises(TypeError, getargs_k, 3.14)
self.assertRaises(TypeError, getargs_k, Long())
self.assertRaises(TypeError, getargs_k, Int())
self.assertEqual(ULONG_MAX, getargs_k(-1))
self.assertEqual(0, getargs_k(0))
self.assertEqual(ULONG_MAX, getargs_k(ULONG_MAX))
self.assertEqual(0, getargs_k(ULONG_MAX+1))
self.assertEqual(42, getargs_k(42))
self.assertEqual(42, getargs_k(42L))
self.assertEqual(VERY_LARGE & ULONG_MAX, getargs_k(VERY_LARGE))
class Signed_TestCase(unittest.TestCase):
def test_h(self):
from _testcapi import getargs_h
# h returns 'short', and does range checking (SHRT_MIN ... SHRT_MAX)
self.assertRaises(TypeError, getargs_h, 3.14)
self.assertEqual(99, getargs_h(Long()))
self.assertEqual(99, getargs_h(Int()))
self.assertRaises(OverflowError, getargs_h, SHRT_MIN-1)
self.assertEqual(SHRT_MIN, getargs_h(SHRT_MIN))
self.assertEqual(SHRT_MAX, getargs_h(SHRT_MAX))
self.assertRaises(OverflowError, getargs_h, SHRT_MAX+1)
self.assertEqual(42, getargs_h(42))
self.assertEqual(42, getargs_h(42L))
self.assertRaises(OverflowError, getargs_h, VERY_LARGE)
def test_i(self):
from _testcapi import getargs_i
# i returns 'int', and does range checking (INT_MIN ... INT_MAX)
self.assertRaises(TypeError, getargs_i, 3.14)
self.assertEqual(99, getargs_i(Long()))
self.assertEqual(99, getargs_i(Int()))
self.assertRaises(OverflowError, getargs_i, INT_MIN-1)
self.assertEqual(INT_MIN, getargs_i(INT_MIN))
self.assertEqual(INT_MAX, getargs_i(INT_MAX))
self.assertRaises(OverflowError, getargs_i, INT_MAX+1)
self.assertEqual(42, getargs_i(42))
self.assertEqual(42, getargs_i(42L))
self.assertRaises(OverflowError, getargs_i, VERY_LARGE)
def test_l(self):
from _testcapi import getargs_l
# l returns 'long', and does range checking (LONG_MIN ... LONG_MAX)
self.assertRaises(TypeError, getargs_l, 3.14)
self.assertEqual(99, getargs_l(Long()))
self.assertEqual(99, getargs_l(Int()))
self.assertRaises(OverflowError, getargs_l, LONG_MIN-1)
self.assertEqual(LONG_MIN, getargs_l(LONG_MIN))
self.assertEqual(LONG_MAX, getargs_l(LONG_MAX))
self.assertRaises(OverflowError, getargs_l, LONG_MAX+1)
self.assertEqual(42, getargs_l(42))
self.assertEqual(42, getargs_l(42L))
self.assertRaises(OverflowError, getargs_l, VERY_LARGE)
def test_n(self):
from _testcapi import getargs_n
# n returns 'Py_ssize_t', and does range checking
# (PY_SSIZE_T_MIN ... PY_SSIZE_T_MAX)
self.assertRaises(TypeError, getargs_n, 3.14)
self.assertEqual(99, getargs_n(Long()))
self.assertEqual(99, getargs_n(Int()))
self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MIN-1)
self.assertEqual(PY_SSIZE_T_MIN, getargs_n(PY_SSIZE_T_MIN))
self.assertEqual(PY_SSIZE_T_MAX, getargs_n(PY_SSIZE_T_MAX))
self.assertRaises(OverflowError, getargs_n, PY_SSIZE_T_MAX+1)
self.assertEqual(42, getargs_n(42))
self.assertEqual(42, getargs_n(42L))
self.assertRaises(OverflowError, getargs_n, VERY_LARGE)
@unittest.skipUnless(_PY_LONG_LONG_available, 'PY_LONG_LONG not available')
class LongLong_TestCase(unittest.TestCase):
def test_L(self):
from _testcapi import getargs_L
# L returns 'long long', and does range checking (LLONG_MIN
# ... LLONG_MAX)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message=".*integer argument expected, got float",
module=__name__)
self.assertEqual(3, getargs_L(3.14))
with warnings.catch_warnings():
warnings.filterwarnings(
"error",
category=DeprecationWarning,
message=".*integer argument expected, got float",
module="unittest")
self.assertRaises(DeprecationWarning, getargs_L, 3.14)
self.assertRaises(TypeError, getargs_L, "Hello")
self.assertEqual(99, getargs_L(Long()))
self.assertEqual(99, getargs_L(Int()))
self.assertRaises(OverflowError, getargs_L, LLONG_MIN-1)
self.assertEqual(LLONG_MIN, getargs_L(LLONG_MIN))
self.assertEqual(LLONG_MAX, getargs_L(LLONG_MAX))
self.assertRaises(OverflowError, getargs_L, LLONG_MAX+1)
self.assertEqual(42, getargs_L(42))
self.assertEqual(42, getargs_L(42L))
self.assertRaises(OverflowError, getargs_L, VERY_LARGE)
def test_K(self):
from _testcapi import getargs_K
# K return 'unsigned long long', no range checking
self.assertRaises(TypeError, getargs_K, 3.14)
self.assertRaises(TypeError, getargs_K, Long())
self.assertRaises(TypeError, getargs_K, Int())
self.assertEqual(ULLONG_MAX, getargs_K(ULLONG_MAX))
self.assertEqual(0, getargs_K(0))
self.assertEqual(0, getargs_K(ULLONG_MAX+1))
self.assertEqual(42, getargs_K(42))
self.assertEqual(42, getargs_K(42L))
self.assertEqual(VERY_LARGE & ULLONG_MAX, getargs_K(VERY_LARGE))
class Tuple_TestCase(unittest.TestCase):
def test_tuple(self):
from _testcapi import getargs_tuple
ret = getargs_tuple(1, (2, 3))
self.assertEqual(ret, (1,2,3))
# make sure invalid tuple arguments are handled correctly
class seq:
def __len__(self):
return 2
def __getitem__(self, n):
raise ValueError
self.assertRaises(TypeError, getargs_tuple, 1, seq())
class Keywords_TestCase(unittest.TestCase):
def test_positional_args(self):
# using all positional args
self.assertEqual(
getargs_keywords((1,2), 3, (4,(5,6)), (7,8,9), 10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_mixed_args(self):
# positional and keyword args
self.assertEqual(
getargs_keywords((1,2), 3, (4,(5,6)), arg4=(7,8,9), arg5=10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_keyword_args(self):
# all keywords
self.assertEqual(
getargs_keywords(arg1=(1,2), arg2=3, arg3=(4,(5,6)), arg4=(7,8,9), arg5=10),
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
)
def test_optional_args(self):
# missing optional keyword args, skipping tuples
self.assertEqual(
getargs_keywords(arg1=(1,2), arg2=3, arg5=10),
(1, 2, 3, -1, -1, -1, -1, -1, -1, 10)
)
def test_required_args(self):
# required arg missing
try:
getargs_keywords(arg1=(1,2))
except TypeError, err:
self.assertEqual(str(err), "Required argument 'arg2' (pos 2) not found")
else:
self.fail('TypeError should have been raised')
def test_too_many_args(self):
try:
getargs_keywords((1,2),3,(4,(5,6)),(7,8,9),10,111)
except TypeError, err:
self.assertEqual(str(err), "function takes at most 5 arguments (6 given)")
else:
self.fail('TypeError should have been raised')
def test_invalid_keyword(self):
# extraneous keyword arg
try:
getargs_keywords((1,2),3,arg5=10,arg666=666)
except TypeError, err:
self.assertEqual(str(err), "'arg666' is an invalid keyword argument for this function")
else:
self.fail('TypeError should have been raised')
def test_main():
tests = [Signed_TestCase, Unsigned_TestCase, LongLong_TestCase,
Tuple_TestCase, Keywords_TestCase]
test_support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
cainmatt/django | refs/heads/master | django/conf/locale/es_AR/formats.py | 504 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j N Y'
TIME_FORMAT = r'H:i'
DATETIME_FORMAT = r'j N Y H:i'
YEAR_MONTH_FORMAT = r'F Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = r'd/m/Y'
SHORT_DATETIME_FORMAT = r'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # 0: Sunday, 1: Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', # '31/12/2009'
'%d/%m/%y', # '31/12/09'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S',
'%d/%m/%Y %H:%M:%S.%f',
'%d/%m/%Y %H:%M',
'%d/%m/%y %H:%M:%S',
'%d/%m/%y %H:%M:%S.%f',
'%d/%m/%y %H:%M',
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
GiggleLiu/poorman_nn | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
import os
import sys
descr = """Python module for low abstraction neural network."""
DISTNAME = 'poornn'
DESCRIPTION = descr
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Giggle Liu',
MAINTAINER_EMAIL = 'cacate0129@gmail.com',
URL = 'http://github.com/GiggleLiu/poorman_nn'
LICENSE = 'MIT'
DOWNLOAD_URL = URL
PACKAGE_NAME = 'poornn'
EXTRA_INFO = dict(
classifiers=[
"Development Status :: 2 - Pre-Alpha",
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Fortran',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
]
)
try:
import setuptools # If you want to enable 'python setup.py develop'
EXTRA_INFO.update(dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
))
except:
print('setuptools module not found.')
print("Install setuptools if you want to enable \
'python setup.py develop'.")
def configuration(parent_package='', top_path=None, package_name=DISTNAME):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg: "Ignoring attempt to set 'name' (from ... "
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True
)
config.add_subpackage(PACKAGE_NAME)
return config
def get_version():
"""Obtain the version number"""
import imp
mod = imp.load_source('version', os.path.join(PACKAGE_NAME, 'version.py'))
return mod.__version__
def setup_package():
# Call the setup function
metadata = dict(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
version=get_version(),
install_requires=[
'numpy',
'scipy',
'jinja2',
# 'mpi4py', #recommended
# 'mkl-service'
# 'pygraphviz'
],
# test_suite="nose.collector",
**EXTRA_INFO
)
if (len(sys.argv) >= 2 and
('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = get_version()
else:
metadata['configuration'] = configuration
from numpy.distutils.core import setup
setup(**metadata)
if __name__ == "__main__":
setup_package()
|
juanalfonsopr/odoo | refs/heads/8.0 | addons/account_budget/tests/__init__.py | 327 | import test_theoreticalamount
|
RomanZWang/osf.io | refs/heads/develop | scripts/migration/migrate_profile_dates.py | 64 | """Changes existing profile education and employment dates from
YYYY-MM-DD formatted dates to "Month Year" format
Log:
Run on production by SL on 2014-10-14 at 19:11 EST. 269 users were migrated.
"""
import logging
from website.app import init_app
from website import models
from datetime import datetime
from tests.base import OsfTestCase
from tests.factories import UserFactory
from nose.tools import *
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def main():
# Set up storage backends
init_app(routes=False)
print ('{n} dates migrated'.format(n=migrate_dates()))
def replace_date(user_field, key, month, year):
if not isinstance(user_field[key], datetime):
date = datetime.strptime(user_field[key], '%Y-%m-%d')
else:
date = user_field[key]
user_field[month] = date.month
user_field[year] = date.year
del user_field[key]
def migrate_dates():
count = 0
for user in models.User.find():
changed = False
for job in user.jobs:
if job.get('start', None):
replace_date(job, 'start', 'startMonth', 'startYear')
changed = True
if job.get('end', None):
replace_date(job, 'end', 'endMonth', 'endYear')
changed = True
for school in user.schools:
if school.get('start', None):
replace_date(school, 'start', 'startMonth', 'startYear')
changed = True
if school.get('end', None):
replace_date(school, 'end', 'endMonth', 'endYear')
changed = True
if changed:
logger.info(user)
count += 1
logger.info('Process completed. {n} users affected'.format(n=count))
return count
class TestMigrateDates(OsfTestCase):
def setUp(self):
super(TestMigrateDates, self).setUp()
self.user = UserFactory()
self.user2 = UserFactory()
job1 = {
'position': 'Rockstar',
'institution': 'Queen',
'department': 'RocknRoll',
'location': 'Queens, NY',
'start': '2014-05-18',
'end': '2014-09-30',
'ongoing': False
}
job2 = {
'position': 'Artist',
'institution': 'Queen',
'department': 'RocknRoll',
'location': 'Queens, NY',
'start': '2014-05-18',
'end': None,
'ongoing': True
}
school1 = {
'degree': 'Philosophy',
'institution': 'Queens University',
'department': 'Contemplation',
'location': 'New York, NY',
'start': '2014-01-01',
'end': '2014-01-02',
'ongoing': False
}
school2 = {
'degree': 'Astrophysics',
'institution': 'Queens University',
'department': None,
'location': 'Space',
'start': '2014-01-01',
'end': None,
'ongoing': True
}
self.user.jobs.append(job1)
self.user.jobs.append(job2)
self.user.schools.append(school1)
self.user.schools.append(school2)
def test_migrate_dates(self):
migrate_dates()
assert_equal(self.user.jobs[0].get('startMonth'), 5)
assert_equal(self.user.jobs[0].get('startYear'), 2014)
assert_equal(self.user.jobs[0].get('endMonth'), 9)
assert_equal(self.user.jobs[0].get('endYear'), 2014)
assert_false(self.user.jobs[0].get('start', None))
assert_false(self.user.jobs[0].get('end', None))
assert_equal(self.user.jobs[1].get('startMonth'), 5)
assert_equal(self.user.jobs[1].get('startYear'), 2014)
assert_equal(self.user.jobs[1].get('endMonth'), None)
assert_equal(self.user.jobs[1].get('endYear'), None)
assert_false(self.user.jobs[1].get('start', None))
assert_false(self.user.jobs[1].get('end', None))
assert_equal(self.user.schools[0].get('startMonth'), 1)
assert_equal(self.user.schools[0].get('startYear'), 2014)
assert_equal(self.user.schools[0].get('endMonth'), 1)
assert_equal(self.user.schools[0].get('endYear'), 2014)
assert_false(self.user.schools[0].get('start', None))
assert_false(self.user.schools[0].get('end', None))
assert_equal(self.user.schools[1].get('startMonth'), 1)
assert_equal(self.user.schools[1].get('startYear'), 2014)
assert_equal(self.user.schools[1].get('endMonth'), None)
assert_equal(self.user.schools[1].get('endYear'), None)
assert_false(self.user.schools[1].get('start', None))
assert_false(self.user.schools[1].get('end', None))
if __name__ == '__main__':
main()
|
illicitonion/givabit | refs/heads/master | lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_3/tests/regressiontests/signals_regress/models.py | 92 | from django.db import models
class Author(models.Model):
name = models.CharField(max_length=20)
def __unicode__(self):
return self.name
class Book(models.Model):
name = models.CharField(max_length=20)
authors = models.ManyToManyField(Author)
def __unicode__(self):
return self.name
|
weimingtom/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/encodings/cp037.py | 266 | """ Python Character Mapping Codec cp037 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP037.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp037',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> CONTROL
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> CONTROL
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> CONTROL
'\x8d' # 0x09 -> CONTROL
'\x8e' # 0x0A -> CONTROL
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> CONTROL
'\x85' # 0x15 -> CONTROL
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> CONTROL
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> CONTROL
'\x8f' # 0x1B -> CONTROL
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> CONTROL
'\x81' # 0x21 -> CONTROL
'\x82' # 0x22 -> CONTROL
'\x83' # 0x23 -> CONTROL
'\x84' # 0x24 -> CONTROL
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> CONTROL
'\x89' # 0x29 -> CONTROL
'\x8a' # 0x2A -> CONTROL
'\x8b' # 0x2B -> CONTROL
'\x8c' # 0x2C -> CONTROL
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> CONTROL
'\x91' # 0x31 -> CONTROL
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> CONTROL
'\x94' # 0x34 -> CONTROL
'\x95' # 0x35 -> CONTROL
'\x96' # 0x36 -> CONTROL
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> CONTROL
'\x99' # 0x39 -> CONTROL
'\x9a' # 0x3A -> CONTROL
'\x9b' # 0x3B -> CONTROL
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> CONTROL
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\xa0' # 0x41 -> NO-BREAK SPACE
'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
'\xa2' # 0x4A -> CENT SIGN
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'|' # 0x4F -> VERTICAL LINE
'&' # 0x50 -> AMPERSAND
'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
'!' # 0x5A -> EXCLAMATION MARK
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'\xac' # 0x5F -> NOT SIGN
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
'\xb8' # 0x9D -> CEDILLA
'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
'\xbf' # 0xAB -> INVERTED QUESTION MARK
'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
'\xae' # 0xAF -> REGISTERED SIGN
'^' # 0xB0 -> CIRCUMFLEX ACCENT
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'[' # 0xBA -> LEFT SQUARE BRACKET
']' # 0xBB -> RIGHT SQUARE BRACKET
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
mcollins12321/anita | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/sanitizer.py | 180 | from __future__ import absolute_import, division, unicode_literals
import re
from xml.sax.saxutils import escape, unescape
from six.moves import urllib_parse as urlparse
from .tokenizer import HTMLTokenizer
from .constants import tokenTypes
content_type_rgx = re.compile(r'''
^
# Match a content type <application>/<type>
(?P<content_type>[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+)
# Match any character set and encoding
(?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?)
|(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?)
# Assume the rest is data
,.*
$
''',
re.VERBOSE)
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster', 'background', 'datasrc',
'dynsrc', 'lowsrc', 'ping', 'poster', 'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs', 'data']
acceptable_content_types = ['image/png', 'image/jpeg', 'image/gif', 'image/webp', 'image/bmp', 'text/plain']
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
allowed_content_types = acceptable_content_types
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in list(tokenTypes.keys()):
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
return self.allowed_token(token, token_type)
else:
return self.disallowed_token(token, token_type)
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def allowed_token(self, token, token_type):
if "data" in token:
attrs = dict([(name, val) for name, val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if attr not in attrs:
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace("\ufffd", "")
try:
uri = urlparse.urlparse(val_unescaped)
except ValueError:
uri = None
del attrs[attr]
if uri and uri.scheme:
if uri.scheme not in self.allowed_protocols:
del attrs[attr]
if uri.scheme == 'data':
m = content_type_rgx.match(uri.path)
if not m:
del attrs[attr]
elif m.group('content_type') not in self.allowed_content_types:
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if 'style' in attrs:
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name, val] for name, val in list(attrs.items())]
return token
def disallowed_token(self, token, token_type):
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
token["data"] = "<%s%s>" % (token["name"], attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
if token["type"] in list(tokenTypes.keys()):
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if keyword not in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
# Change case matching defaults as we only output lowercase html anyway
# This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName, parser=parser)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
|
MortimerGoro/servo | refs/heads/master | tests/wpt/web-platform-tests/service-workers/cache-storage/resources/fetch-status.py | 266 | def main(request, response):
return int(request.GET["status"]), [], ""
|
jelugbo/hebs_repo | refs/heads/master | common/lib/xmodule/xmodule/tests/test_editing_module.py | 27 | """ Tests for editing descriptors"""
import unittest
import os
import logging
from mock import Mock
from pkg_resources import resource_string
from opaque_keys.edx.locations import Location
from xmodule.editing_module import TabsEditingDescriptor
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.tests import get_test_descriptor_system
log = logging.getLogger(__name__)
class TabsEditingDescriptorTestCase(unittest.TestCase):
""" Testing TabsEditingDescriptor"""
def setUp(self):
super(TabsEditingDescriptorTestCase, self).setUp()
system = get_test_descriptor_system()
system.render_template = Mock(return_value="<div>Test Template HTML</div>")
self.tabs = [
{
'name': "Test_css",
'template': "tabs/codemirror-edit.html",
'current': True,
'css': {
'scss': [resource_string(__name__,
'../../test_files/test_tabseditingdescriptor.scss')],
'css': [resource_string(__name__,
'../../test_files/test_tabseditingdescriptor.css')]
}
},
{
'name': "Subtitles",
'template': "video/subtitles.html",
},
{
'name': "Settings",
'template': "tabs/video-metadata-edit-tab.html"
}
]
TabsEditingDescriptor.tabs = self.tabs
self.descriptor = system.construct_xblock_from_class(
TabsEditingDescriptor,
scope_ids=ScopeIds(None, None, None, Location('org', 'course', 'run', 'category', 'name', 'revision')),
field_data=DictFieldData({}),
)
def test_get_css(self):
"""test get_css"""
css = self.descriptor.get_css()
test_files_dir = os.path.dirname(__file__).replace('xmodule/tests', 'test_files')
test_css_file = os.path.join(test_files_dir, 'test_tabseditingdescriptor.scss')
with open(test_css_file) as new_css:
added_css = new_css.read()
self.assertEqual(css['scss'].pop(), added_css)
self.assertEqual(css['css'].pop(), added_css)
def test_get_context(self):
""""test get_context"""
rendered_context = self.descriptor.get_context()
self.assertListEqual(rendered_context['tabs'], self.tabs)
|
TrimBiggs/calico-docker | refs/heads/master | tests/st/no_orchestrator/test_container_to_host.py | 12 | # Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.st.test_base import TestBase
from tests.st.utils.docker_host import DockerHost
class TestContainerToHost(TestBase):
def test_container_to_host(self):
"""
Test that a container can ping the host.
This function is important for Mesos, since the containerized executor
needs to exchange messages with the Mesos Slave process on the host.
Note also that we do not use the Docker Network driver for this test.
The Docker Container Network Model defines a "network" as a group of
endpoints that can communicate with each other, but are isolated from
everything else. Thus, an endpoint of a Docker network should not be
able to ping the host.
"""
with DockerHost('host', dind=False) as host:
host.calicoctl("profile add TEST")
# Use standard docker bridge networking.
workload1 = host.create_workload("workload1")
# Add the nodes to Calico networking.
host.calicoctl("container add %s 192.168.100.1" % workload1)
# Now add the profiles.
host.calicoctl("container %s profile set TEST" % workload1)
# Check it works. Note that the profile allows all outgoing
# traffic by default, and conntrack should allow the reply.
workload1.assert_can_ping(host.ip, retries=10)
|
vitaly4uk/django | refs/heads/master | django/db/models/sql/constants.py | 633 | """
Constants specific to the SQL storage portion of the ORM.
"""
import re
# Valid query types (a set is used for speedy lookups). These are (currently)
# considered SQL-specific; other storage systems may choose to use different
# lookup types.
QUERY_TERMS = {
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'hour', 'minute', 'second', 'isnull', 'search',
'regex', 'iregex',
}
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Namedtuples for sql.* internal use.
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
CURSOR = 'cursor'
NO_RESULTS = 'no results'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC'),
}
# SQL join types.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
|
tsacha/rss2email | refs/heads/master | rss2email/__init__.py | 5 | # Copyright (C) 2012-2014 W. Trevor King <wking@tremily.us>
#
# This file is part of rss2email.
#
# rss2email is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) version 3 of
# the License.
#
# rss2email is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# rss2email. If not, see <http://www.gnu.org/licenses/>.
"""rss2email: get RSS feeds emailed to you
"""
import logging as _logging
import sys as _sys
__version__ = '3.9'
__url__ = 'https://github.com/wking/rss2email'
__author__ = 'W. Trevor King'
__email__ = 'rss2email@tremily.us'
__copyright__ = '(C) 2004 Aaron Swartz. GNU GPL 2 or 3.'
__contributors__ = [
'Aaron Swartz (original author)',
'Brian Lalor',
'Dean Jackson',
'Eelis van der Weegen',
'Erik Hetzner',
'Etienne Millon',
'George Saunders',
'Joey Hess',
'Lindsey Smith (lindsey@allthingsrss.com)',
'Marcel Ackermann (http://www.DreamFlasher.de)',
"Martin 'Joey' Schulze",
'Matej Cepl',
'W. Trevor King',
]
LOG = _logging.getLogger('rss2email')
LOG.addHandler(_logging.StreamHandler())
LOG.setLevel(_logging.ERROR)
if _sys.version_info < (3, 2):
raise ImportError(
"rss2email requires Python 3.2, but you're using:\n{}".format(
_sys.version))
|
remcoboerma/w2p_redirector | refs/heads/master | languages/hu.py | 162 | # coding: utf8
{
'!langcode!': 'hu',
'!langname!': 'Magyar',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%s %%{row} deleted': '%s sorok törlődtek',
'%s %%{row} updated': '%s sorok frissítődtek',
'%s selected': '%s kiválasztott',
'%Y-%m-%d': '%Y.%m.%d.',
'%Y-%m-%d %H:%M:%S': '%Y.%m.%d. %H:%M:%S',
'About': 'About',
'Access Control': 'Access Control',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': 'az adminisztrációs felületért kattints ide',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': 'az appadmin a biztonságtalan csatorna miatt letiltva',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Available Databases and Tables': 'Elérhető adatbázisok és táblák',
'Buy this book': 'Buy this book',
'cache': 'gyorsítótár',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Nem lehet üres',
'change password': 'jelszó megváltoztatása',
'Check to delete': 'Törléshez válaszd ki',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': 'Client IP',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'Jelenlegi lekérdezés',
'Current response': 'Jelenlegi válasz',
'Current session': 'Jelenlegi folyamat',
'customize me!': 'változtass meg!',
'data uploaded': 'adat feltöltve',
'Database': 'adatbázis',
'Database %s select': 'adatbázis %s kiválasztás',
'db': 'db',
'DB Model': 'DB Model',
'Delete:': 'Töröl:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Description',
'design': 'design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': 'kész!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit': 'Szerkeszt',
'Edit current record': 'Aktuális bejegyzés szerkesztése',
'edit profile': 'profil szerkesztése',
'Edit This App': 'Alkalmazást szerkeszt',
'Email and SMS': 'Email and SMS',
'Errors': 'Errors',
'export as csv file': 'exportál csv fájlba',
'FAQ': 'FAQ',
'First name': 'First name',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Group ID': 'Group ID',
'Groups': 'Groups',
'Hello World': 'Hello Világ',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'Import/Export',
'Index': 'Index',
'insert new': 'új beillesztése',
'insert new %s': 'új beillesztése %s',
'Internal State': 'Internal State',
'Introduction': 'Introduction',
'Invalid email': 'Invalid email',
'Invalid Query': 'Hibás lekérdezés',
'invalid request': 'hibás kérés',
'Key': 'Key',
'Last name': 'Last name',
'Layout': 'Szerkezet',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'login': 'belép',
'logout': 'kilép',
'lost password': 'elveszett jelszó',
'Lost Password': 'Lost Password',
'Main Menu': 'Főmenü',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menü model',
'My Sites': 'My Sites',
'Name': 'Name',
'New Record': 'Új bejegyzés',
'new record inserted': 'új bejegyzés felvéve',
'next 100 rows': 'következő 100 sor',
'No databases in this application': 'Nincs adatbázis ebben az alkalmazásban',
'Online examples': 'online példákért kattints ide',
'or import from csv file': 'vagy betöltés csv fájlból',
'Origin': 'Origin',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': 'Password',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous 100 rows': 'előző 100 sor',
'Python': 'Python',
'Query:': 'Lekérdezés:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'bejegyzés',
'record does not exist': 'bejegyzés nem létezik',
'Record ID': 'Record ID',
'Record id': 'bejegyzés id',
'Register': 'Register',
'register': 'regisztráció',
'Registration key': 'Registration key',
'Reset Password key': 'Reset Password key',
'Role': 'Role',
'Rows in Table': 'Sorok a táblában',
'Rows selected': 'Kiválasztott sorok',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': 'állapot',
'Statistics': 'Statistics',
'Stylesheet': 'Stylesheet',
'submit': 'submit',
'Support': 'Support',
'Sure you want to delete this object?': 'Biztos törli ezt az objektumot?',
'Table': 'tábla',
'Table name': 'Table name',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'This App': 'This App',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Timestamp',
'Twitter': 'Twitter',
'unable to parse csv file': 'nem lehet a csv fájlt beolvasni',
'Update:': 'Frissít:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User ID': 'User ID',
'Videos': 'Videos',
'View': 'Nézet',
'Welcome %s': 'Welcome %s',
'Welcome to web2py': 'Isten hozott a web2py-ban',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
|
vitaly-krugl/haigha | refs/heads/master | setup.py | 3 | import haigha
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
requirements = map(str.strip, open('requirements.txt').readlines())
setup(
name='haigha',
version=haigha.__version__,
author='Vitaly Babiy, Aaron Westendorf',
author_email="vbabiy@agoragames.com, aaron@agoragames.com",
packages = ['haigha', 'haigha.frames', 'haigha.classes', 'haigha.transports', 'haigha.connections'],
install_requires = requirements,
url='https://github.com/agoragames/haigha',
license="LICENSE.txt",
description='Synchronous and asynchronous AMQP client library',
long_description=open('README.rst').read(),
keywords=['python', 'amqp', 'event', 'rabbitmq'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
"Intended Audience :: Developers",
"Operating System :: POSIX",
"Topic :: Communications",
"Topic :: System :: Distributed Computing",
"Topic :: Software Development :: Libraries :: Python Modules",
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
#'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries'
]
)
|
treycucco/pynet-kvp | refs/heads/master | client.py | 1 | import argparse
import readline
from pynet import Encryptor, Node, MessageState
from pynet.util import send_data, to_bytes, to_address
class Client(object):
"""A client object holds the information needed to send data to a node."""
def __init__(self, private_key, name, server_address):
self.private_key = private_key
self.name = name
self.server_address = to_address(server_address)
def send(self, body, *, encrypt=True):
if encrypt:
message = Node.construct_message(self.encryptor, self.name, body)
else:
message = body
response = send_data(self.server_address, message)
message_status, data = Node.deconstruct_message(self.encryptor, response)
if message_status == MessageState.ok:
sender, body, signature = data
return (True, body)
elif message_status == MessageState.invalid_data:
return (False, data)
else:
message, signature = data
return (False, message)
def build_body(self, *parts, joiner=b" "):
return joiner.join(to_bytes(p) for p in parts)
def ping(self):
return send_data(self.server_address, "ping") == b"pong"
def connect(self):
server_name, server_public_key = send_data(self.server_address, "identify").decode().split("\n", 1)
self.encryptor = Encryptor(self.private_key, server_public_key)
return self.register()
def register(self):
reg_msg = self.build_body(
"register", self.encryptor.private_key.publickey().exportKey("PEM"),
joiner=(b"\n"))
return self.send(reg_msg)
def main():
args = parse_args()
if args is not None:
args.func(args)
def parse_args():
argument_parser = argparse.ArgumentParser("pynet-kvp client")
subparsers = argument_parser.add_subparsers()
config_parser = subparsers.add_parser("config", description="Dump out a sample config")
config_parser.set_defaults(func=dump_config)
run_parser = subparsers.add_parser("run", description="Run the client")
run_parser.add_argument("pem_filename")
run_parser.add_argument("-a", "--address", default="/tmp/pynet-kvp.sock")
run_parser.add_argument("-n", "--name", default="a peer")
run_parser.set_defaults(func=run_client)
args = argument_parser.parse_args()
if hasattr(args, "func"):
return args
else:
argument_parser.print_help()
return None
def dump_config(args):
print(Encryptor.new_key().exportKey("PEM").decode("UTF-8"))
def run_client(args):
with open(args.pem_filename, "r") as rf:
client = Client(rf.read(), args.name, args.address)
client.connect()
while continue_running(client):
pass
def continue_running(client):
data = input("PyNet-KVP> ")
if data == "exit":
return False
else:
print(client.send(data.encode()))
return True
if __name__ == "__main__":
main()
|
mudithkr/zamboni | refs/heads/master | manage.py | 17 | #!/usr/bin/env python
import logging
import os
import sys
from django.core.management import execute_from_command_line
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
if len(sys.argv) > 1 and sys.argv[1] == 'test':
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings_test'
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkt.settings')
# waffle and mkt form an import cycle because mkt patches waffle and
# waffle loads the user model, so we have to make sure mkt gets
# imported before anything else imports waffle.
import mkt # noqa
import session_csrf # noqa
session_csrf.monkeypatch()
# Fix jinja's Markup class to not crash when localizers give us bad format
# strings.
from jinja2 import Markup # noqa
mod = Markup.__mod__
trans_log = logging.getLogger('z.trans')
# Load this early so that anything else you import will use these log settings.
# Mostly to shut Raven the hell up.
from lib.log_settings_base import log_configure # noqa
log_configure()
def new(self, arg):
try:
return mod(self, arg)
except Exception:
trans_log.error(unicode(self))
return ''
Markup.__mod__ = new
# Import for side-effect: configures our logging handlers.
# pylint: disable-msg=W0611
from lib.utils import update_csp, validate_modules, validate_settings # noqa
update_csp()
validate_modules()
validate_settings()
import django.conf # noqa
newrelic_ini = getattr(django.conf.settings, 'NEWRELIC_INI', None)
load_newrelic = False
# Monkey patches DRF to not use fqdn urls.
from mkt.api.patch import patch # noqa
patch()
if newrelic_ini:
import newrelic.agent # noqa
try:
newrelic.agent.initialize(newrelic_ini)
load_newrelic = True
except:
startup_logger = logging.getLogger('z.startup')
startup_logger.exception('Failed to load new relic config.')
# Alter zamboni to run on a particular port as per the
# marketplace docs, unless overridden.
from django.core.management.commands import runserver # noqa
runserver.DEFAULT_PORT = 2600
if __name__ == '__main__':
execute_from_command_line(sys.argv)
|
coreboot/chrome-ec | refs/heads/master | extra/usb_power/convert_power_log_board.py | 2 | #!/usr/bin/env python
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Program to convert sweetberry config to servod config template.
"""
# Note: This is a py2/3 compatible file.
from __future__ import print_function
import json
import os
import sys
from powerlog import Spower
def fetch_records(board_file):
"""Import records from servo_ina file.
board files are json files, and have a list of tuples with
the INA data.
(name, rs, swetberry_num, net_name, channel)
Args:
board_file: board file
Returns:
list of tuples as described above.
"""
data = None
with open(board_file) as f:
data = json.load(f)
return data
def write_to_file(file, sweetberry, inas):
"""Writes records of |sweetberry| to |file|
Args:
file: file to write to.
sweetberry: sweetberry type. A or B.
inas: list of inas read from board file.
"""
with open(file, 'w') as pyfile:
pyfile.write('inas = [\n')
for rec in inas:
if rec['sweetberry'] != sweetberry:
continue
# EX : ('sweetberry', 0x40, 'SB_FW_CAM_2P8', 5.0, 1.000, 3, False),
channel, i2c_addr = Spower.CHMAP[rec['channel']]
record = (" ('sweetberry', 0x%02x, '%s', 5.0, %f, %d, 'True')"
",\n" % (i2c_addr, rec['name'], rec['rs'], channel))
pyfile.write(record)
pyfile.write(']\n')
def main(argv):
if len(argv) != 2:
print("usage:")
print(" %s input.board" % argv[0])
return
inputf = argv[1]
basename = os.path.splitext(inputf)[0]
inas = fetch_records(inputf)
sweetberry = set(rec['sweetberry'] for rec in inas)
if len(sweetberry) == 2:
print("Converting %s to %s and %s" % (inputf, basename + '_a.py',
basename + '_b.py'))
write_to_file(basename + '_a.py', 'A', inas)
write_to_file(basename + '_b.py', 'B', inas)
else:
print("Converting %s to %s" % (inputf, basename + '.py'))
write_to_file(basename + '.py', sweetberry.pop(), inas)
if __name__ == "__main__":
main(sys.argv)
|
pombredanne/pulp_ostree | refs/heads/master | plugins/setup.py | 2 | from setuptools import setup, find_packages
setup(
name='pulp_ostree_plugins',
version='1.1.0a1',
packages=find_packages(),
url='http://www.pulpproject.org',
license='GPLv2+',
author='Pulp Team',
author_email='pulp-list@redhat.com',
description='plugins for ostree support in pulp',
entry_points={
'pulp.importers': [
'importer = pulp_ostree.plugins.importers.web:entry_point',
],
'pulp.distributors': [
'distributor = pulp_ostree.plugins.distributors.web:entry_point'
],
'pulp.unit_models': [
'ostree=pulp_ostree.plugins.db.model:Branch'
]
}
)
|
stuliveshere/PySeis | refs/heads/master | examples/99.0_multistack_viewer.py | 2 | '''
displays multiple stacks
'''
import toolbox
import pylab
import numpy as np
filelist = ["1st_vels_stack.su",
#"1st_vels_stack_elev.su",
"fk_stack.su",
#"model.su",
"model_filtered.su",
#"trim_stack.su",
"trim_stack2.su",
]
dataset, params = toolbox.initialise(filelist[0])
dataset['fldr'] = 0
for index, file in enumerate(filelist[1:]):
data, junk = toolbox.initialise(file)
data['fldr'] = index + 1
dataset = np.column_stack([dataset, data])
params['window'] = 1000
toolbox.agc(dataset, None, **params)
params['primary'] = 'fldr'
params['secondary'] = 'cdp'
params['clip'] = 0.02
toolbox.display(dataset, **params)
pylab.show()
|
saulpw/visidata | refs/heads/stable | visidata/editor.py | 1 | import os
import signal
import subprocess
import tempfile
import curses
import visidata
class SuspendCurses:
'Context manager to leave windowed mode on enter and restore it on exit.'
def __enter__(self):
curses.endwin()
signal.signal(signal.SIGTSTP, visidata.vd.tstp_signal)
def __exit__(self, exc_type, exc_val, tb):
curses.reset_prog_mode()
visidata.vd.scrFull.refresh()
curses.doupdate()
@visidata.VisiData.global_api
def launchEditor(vd, *args):
'Launch $EDITOR with *args* as arguments.'
editor = os.environ.get('EDITOR') or vd.fail('$EDITOR not set')
args = [editor] + list(args)
with SuspendCurses():
return subprocess.call(args)
@visidata.VisiData.global_api
def launchExternalEditor(vd, v, linenum=0):
'Launch $EDITOR to edit string *v* starting on line *linenum*.'
import tempfile
with tempfile.NamedTemporaryFile() as temp:
with open(temp.name, 'w') as fp:
fp.write(v)
return launchExternalEditorPath(visidata.Path(temp.name), linenum)
def launchExternalEditorPath(path, linenum=0):
'Launch $EDITOR to edit *path* starting on line *linenum*.'
if linenum:
launchEditor(path, '+%s' % linenum)
else:
launchEditor(path)
with open(path, 'r') as fp:
try:
return fp.read().rstrip('\n') # trim inevitable trailing newlines
except Exception as e:
visidata.vd.exceptionCaught(e)
return ''
def suspend():
import signal
with SuspendCurses():
os.kill(os.getpid(), signal.SIGSTOP)
visidata.globalCommand('^Z', 'suspend', 'suspend()', 'suspend VisiData process')
|
calfonso/ansible | refs/heads/devel | lib/ansible/modules/storage/purestorage/purefa_facts.py | 15 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_facts
version_added: '2.6'
short_description: Collect facts from Pure Storage FlashArray
description:
- Collect facts information from a Pure Storage Flasharray running the
Purity//FA operating system. By default, the module will collect basic
fact information including hosts, host groups, protection
groups and volume counts. Additional fact information can be collected
based on the configured set of arguements.
author:
- Simon Dodsley (@sdodsley)
options:
gather_subset:
description:
- When supplied, this argument will define the facts to be collected.
Possible values for this include all, minimum, config, performance,
capacity, network, subnet, interfaces, hgroups, pgroups, hosts,
volumes and snapshots.
required: false
default: minimum
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: collect default set of facts
purefa_facts:
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: collect configuration and capacity facts
purefa_facts:
gather_subset:
- config
- capacity
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: collect all facts
purefa_facts:
gather_subset:
- all
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
ansible_facts:
description: Returns the facts collected from the FlashArray
returned: always
type: complex
contains:
"capacity": {}
"config": {
"directory_service": {
"array_admin_group": null,
"base_dn": null,
"bind_password": null,
"bind_user": null,
"check_peer": false,
"enabled": false,
"group_base": null,
"readonly_group": null,
"storage_admin_group": null,
"uri": []
},
"dns": {
"domain": "domain.com",
"nameservers": [
"8.8.8.8",
"8.8.4.4"
]
},
"ntp": [
"0.ntp.pool.org",
"1.ntp.pool.org",
"2.ntp.pool.org",
"3.ntp.pool.org"
],
"smtp": [
{
"enabled": true,
"name": "alerts@acme.com"
},
{
"enabled": true,
"name": "user@acme.com"
}
],
"snmp": [
{
"auth_passphrase": null,
"auth_protocol": null,
"community": null,
"host": "localhost",
"name": "localhost",
"privacy_passphrase": null,
"privacy_protocol": null,
"user": null,
"version": "v2c"
}
],
"ssl_certs": {
"country": null,
"email": null,
"issued_by": "",
"issued_to": "",
"key_size": 2048,
"locality": null,
"organization": "Acme Storage, Inc.",
"organizational_unit": "Acme Storage, Inc.",
"state": null,
"status": "self-signed",
"valid_from": "2017-08-11T23:09:06Z",
"valid_to": "2027-08-09T23:09:06Z"
},
"syslog": []
}
"default": {
"array_name": "flasharray1",
"hostgroups": 0,
"hosts": 10,
"protection_groups": 1,
"purity_version": "5.0.4",
"snapshots": 1
}
"hgroups": {}
"hosts": {
"host1": {
"hgroup": null,
"iqn": [
"iqn.1994-05.com.redhat:2f6f5715a533"
],
"wwn": []
},
"host2": {
"hgroup": null,
"iqn": [
"iqn.1994-05.com.redhat:d17fb13fe0b"
],
"wwn": []
},
"host3": {
"hgroup": null,
"iqn": [
"iqn.1994-05.com.redhat:97b1351bfb2"
],
"wwn": []
},
"host4": {
"hgroup": null,
"iqn": [
"iqn.1994-05.com.redhat:dd84e9a7b2cb"
],
"wwn": [
"10000000C96C48D1",
"10000000C96C48D2"
]
}
}
"interfaces": {
"CT0.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
"CT0.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
"CT1.ETH4": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682",
"CT1.ETH5": "iqn.2010-06.com.purestorage:flasharray.2111b767484e4682"
}
"network": {
"ct0.eth0": {
"address": "10.10.10.10",
"gateway": "10.10.10.1",
"hwaddr": "ec:f4:bb:c8:8a:04",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"management"
],
"speed": 1000000000
},
"ct0.eth2": {
"address": "10.10.10.11",
"gateway": null,
"hwaddr": "ec:f4:bb:c8:8a:00",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"replication"
],
"speed": 10000000000
},
"ct0.eth3": {
"address": "10.10.10.12",
"gateway": null,
"hwaddr": "ec:f4:bb:c8:8a:02",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"replication"
],
"speed": 10000000000
},
"ct0.eth4": {
"address": "10.10.10.13",
"gateway": null,
"hwaddr": "90:e2:ba:83:79:0c",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"iscsi"
],
"speed": 10000000000
},
"ct0.eth5": {
"address": "10.10.10.14",
"gateway": null,
"hwaddr": "90:e2:ba:83:79:0d",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"iscsi"
],
"speed": 10000000000
},
"vir0": {
"address": "10.10.10.20",
"gateway": "10.10.10.1",
"hwaddr": "fe:ba:e9:e7:6b:0f",
"mtu": 1500,
"netmask": "255.255.255.0",
"services": [
"management"
],
"speed": 1000000000
}
}
"performance": {
"input_per_sec": 8191,
"output_per_sec": 0,
"queue_depth": 1,
"reads_per_sec": 0,
"san_usec_per_write_op": 15,
"usec_per_read_op": 0,
"usec_per_write_op": 642,
"writes_per_sec": 2
}
"pgroups": {
"consisgroup-07b6b983-986e-46f5-bdc3-deaa3dbb299e-cinder": {
"hgroups": null,
"hosts": null,
"source": "host1",
"targets": null,
"volumes": [
"volume-1"
]
}
}
"snapshots": {
"consisgroup.cgsnapshot": {
"created": "2018-03-28T09:34:02Z",
"size": 13958643712,
"source": "volume-1"
}
}
"subnet": {}
"volumes": {
"ansible_data": {
"hosts": [
[
"host1",
1
]
],
"serial": "43BE47C12334399B000114A6",
"size": 1099511627776
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
AC_REQUIRED_API_VERSION = '1.14'
CAP_REQUIRED_API_VERSION = '1.6'
SAN_REQUIRED_API_VERSION = '1.10'
def generate_default_dict(array):
default_facts = {}
defaults = array.get()
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version:
pods = array.get_pods()
default_facts['pods'] = len(pods)
hosts = array.list_hosts()
snaps = array.list_volumes(snap=True, pending=True)
pgroups = array.list_pgroups(pending=True)
hgroups = array.list_hgroups()
default_facts['array_name'] = defaults['array_name']
default_facts['purity_version'] = defaults['version']
default_facts['hosts'] = len(hosts)
default_facts['snapshots'] = len(snaps)
default_facts['protection_groups'] = len(pgroups)
default_facts['hostgroups'] = len(hgroups)
return default_facts
def generate_perf_dict(array):
perf_facts = {}
perf_info = array.get(action='monitor')[0]
# IOPS
perf_facts['writes_per_sec'] = perf_info['writes_per_sec']
perf_facts['reads_per_sec'] = perf_info['reads_per_sec']
# Bandwidth
perf_facts['input_per_sec'] = perf_info['input_per_sec']
perf_facts['output_per_sec'] = perf_info['output_per_sec']
# Latency
api_version = array._list_available_rest_versions()
if SAN_REQUIRED_API_VERSION in api_version:
perf_facts['san_usec_per_read_op'] = perf_info['san_usec_per_read_op']
perf_facts['san_usec_per_write_op'] = perf_info['san_usec_per_write_op']
perf_facts['usec_per_read_op'] = perf_info['usec_per_read_op']
perf_facts['usec_per_write_op'] = perf_info['usec_per_write_op']
perf_facts['queue_depth'] = perf_info['queue_depth']
return perf_facts
def generate_config_dict(array):
config_facts = {}
# DNS
config_facts['dns'] = array.get_dns()
# SMTP
config_facts['smtp'] = array.list_alert_recipients()
# SMNP
config_facts['snmp'] = array.list_snmp_managers()
# DS
config_facts['directory_service'] = array.get_directory_service()
config_facts['directory_service'].update(array.get_directory_service(groups=True))
# NTP
config_facts['ntp'] = array.get(ntpserver=True)['ntpserver']
# SYSLOG
config_facts['syslog'] = array.get(syslogserver=True)['syslogserver']
# SSL
config_facts['ssl_certs'] = array.get_certificate()
return config_facts
def generate_subnet_dict(array):
sub_facts = {}
subnets = array.list_subnets()
for sub in range(0, len(subnets)):
sub_name = subnets[sub]['name']
if subnets[sub]['enabled']:
sub_facts[sub_name] = {
'gateway': subnets[sub]['gateway'],
'mtu': subnets[sub]['mtu'],
'vlan': subnets[sub]['vlan'],
'prefix': subnets[sub]['prefix'],
'interfaces': subnets[sub]['interfaces'],
'services': subnets[sub]['services'],
}
return sub_facts
def generate_network_dict(array):
net_facts = {}
ports = array.list_network_interfaces()
for port in range(0, len(ports)):
int_name = ports[port]['name']
if ports[port]['enabled']:
net_facts[int_name] = {
'hwaddr': ports[port]['hwaddr'],
'mtu': ports[port]['mtu'],
'speed': ports[port]['speed'],
'address': ports[port]['address'],
'services': ports[port]['services'],
'gateway': ports[port]['gateway'],
'netmask': ports[port]['netmask'],
}
if ports[port]['subnet']:
subnets = array.get_subnet(ports[port]['subnet'])
if subnets['enabled']:
net_facts[int_name]['subnet'] = {
'name': subnets['name'],
'prefix': subnets['prefix'],
'vlan': subnets['vlan'],
}
return net_facts
def generate_capacity_dict(array):
capacity_facts = {}
api_version = array._list_available_rest_versions()
if CAP_REQUIRED_API_VERSION in api_version:
volumes = array.list_volumes(pending=True)
capacity_facts['provisioned_space'] = sum(item['size'] for item in volumes)
capacity = array.get(space=True)
total_capacity = capacity[0]['capacity']
used_space = capacity[0]["total"]
capacity_facts['free_space'] = total_capacity - used_space
capacity_facts['total_capacity'] = total_capacity
capacity_facts['data_reduction'] = capacity[0]['data_reduction']
capacity_facts['system_space'] = capacity[0]['system']
capacity_facts['volume_space'] = capacity[0]['volumes']
capacity_facts['shared_space'] = capacity[0]['shared_space']
capacity_facts['snapshot_space'] = capacity[0]['snapshots']
capacity_facts['thin_provisioning'] = capacity[0]['thin_provisioning']
capacity_facts['total_reduction'] = capacity[0]['total_reduction']
return capacity_facts
def generate_snap_dict(array):
snap_facts = {}
snaps = array.list_volumes(snap=True)
for snap in range(0, len(snaps)):
snapshot = snaps[snap]['name']
snap_facts[snapshot] = {
'size': snaps[snap]['size'],
'source': snaps[snap]['source'],
'created': snaps[snap]['created'],
}
return snap_facts
def generate_vol_dict(array):
volume_facts = {}
vols = array.list_volumes()
for vol in range(0, len(vols)):
volume = vols[vol]['name']
volume_facts[volume] = {
'size': vols[vol]['size'],
'serial': vols[vol]['serial'],
'hosts': []
}
cvols = array.list_volumes(connect=True)
for cvol in range(0, len(cvols)):
volume = cvols[cvol]['name']
voldict = [cvols[cvol]['host'], cvols[cvol]['lun']]
volume_facts[volume]['hosts'].append(voldict)
return volume_facts
def generate_host_dict(array):
host_facts = {}
hosts = array.list_hosts()
for host in range(0, len(hosts)):
hostname = hosts[host]['name']
host_facts[hostname] = {
'hgroup': hosts[host]['hgroup'],
'iqn': hosts[host]['iqn'],
'wwn': hosts[host]['wwn'],
}
return host_facts
def generate_pgroups_dict(array):
pgroups_facts = {}
pgroups = array.list_pgroups()
for pgroup in range(0, len(pgroups)):
protgroup = pgroups[pgroup]['name']
pgroups_facts[protgroup] = {
'hgroups': pgroups[pgroup]['hgroups'],
'hosts': pgroups[pgroup]['hosts'],
'source': pgroups[pgroup]['source'],
'targets': pgroups[pgroup]['targets'],
'volumes': pgroups[pgroup]['volumes'],
}
return pgroups_facts
def generate_hgroups_dict(array):
hgroups_facts = {}
hgroups = array.list_hgroups()
for hgroup in range(0, len(hgroups)):
hostgroup = hgroups[hgroup]['name']
hgroups_facts[hostgroup] = {
'hosts': hgroups[hgroup]['hosts'],
'pgs': [],
'vols': [],
}
pghgroups = array.list_hgroups(protect=True)
for pghg in range(0, len(pghgroups)):
pgname = pghgroups[pghg]['name']
hgroups_facts[pgname]['pgs'].append(pghgroups[pghg]['protection_group'])
volhgroups = array.list_hgroups(connect=True)
for pgvol in range(0, len(volhgroups)):
pgname = volhgroups[pgvol]['name']
volpgdict = [volhgroups[pgvol]['vol'], volhgroups[pgvol]['lun']]
hgroups_facts[pgname]['vols'].append(volpgdict)
return hgroups_facts
def generate_interfaces_dict(array):
int_facts = {}
ports = array.list_ports()
for port in range(0, len(ports)):
int_name = ports[port]['name']
if ports[port]['wwn']:
int_facts[int_name] = ports[port]['wwn']
if ports[port]['iqn']:
int_facts[int_name] = ports[port]['iqn']
return int_facts
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
gather_subset=dict(default='minimum', type='list',)
))
module = AnsibleModule(argument_spec, supports_check_mode=False)
array = get_system(module)
subset = [test.lower() for test in module.params['gather_subset']]
valid_subsets = ('all', 'minimum', 'config', 'performance', 'capacity',
'network', 'subnet', 'interfaces', 'hgroups', 'pgroups',
'hosts', 'volumes', 'snapshots')
subset_test = (test in valid_subsets for test in subset)
if not all(subset_test):
module.fail_json(msg="value must gather_subset must be one or more of: %s, got: %s"
% (",".join(valid_subsets), ",".join(subset)))
facts = {}
if 'minimum' in subset or 'all' in subset:
facts['default'] = generate_default_dict(array)
if 'performance' in subset or 'all' in subset:
facts['performance'] = generate_perf_dict(array)
if 'config' in subset or 'all' in subset:
facts['config'] = generate_config_dict(array)
if 'capacity' in subset or 'all' in subset:
facts['capacity'] = generate_capacity_dict(array)
if 'network' in subset or 'all' in subset:
facts['network'] = generate_network_dict(array)
if 'subnet' in subset or 'all' in subset:
facts['subnet'] = generate_subnet_dict(array)
if 'interfaces' in subset or 'all' in subset:
facts['interfaces'] = generate_interfaces_dict(array)
if 'hosts' in subset or 'all' in subset:
facts['hosts'] = generate_host_dict(array)
if 'volumes' in subset or 'all' in subset:
facts['volumes'] = generate_vol_dict(array)
if 'snapshots' in subset or 'all' in subset:
facts['snapshots'] = generate_snap_dict(array)
if 'hgroups' in subset or 'all' in subset:
facts['hgroups'] = generate_hgroups_dict(array)
if 'pgroups' in subset or 'all' in subset:
facts['pgroups'] = generate_pgroups_dict(array)
result = dict(ansible_purefa_facts=facts,)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
abhishekshanbhag/emotion_based_spotify | refs/heads/master | Emotify_Final_Files/classify_with_openCV.py | 1 | '''
This script performs emotion classification on the photo taken by the user. The
script performs the following steps:
1. Download photo from Amazon S3 storage bucket
2. Covert colored photo to grayscale
3. Loads trained convolutional neural network
4. Performs emotion prediction
'''
# Load required functions and classes
import numpy
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.models import model_from_json
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Convolution2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import sys
K.set_image_dim_ordering('th')
if(len(sys.argv) != 2):
sys.exit(1)
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# Download image from Amazon s3 bucket
with open('rootkey_4.csv',"r") as infile:
text = infile.readlines()
infile.close()
AWS_KEY = text[0].strip()
AWS_SECRET = text[1].strip()
AWS_KEY = AWS_KEY[15:]
AWS_SECRET = AWS_SECRET[13:]
predict_file_key = Key()
aws_connection = S3Connection(AWS_KEY, AWS_SECRET)
bucket = aws_connection.get_bucket('ec601imagebucket')
for file_key in bucket.list():
if(file_key.name.encode('utf-8') == sys.argv[1]):
predict_file_key = file_key
break
if(predict_file_key.name == None):
#print('No such file found')
sys.exit(1)
predict_file_key.get_contents_to_filename('test_image.jpg')
bucket.delete_key(predict_file_key)
# Covert photo to grayscale
def rgb2gray(rgb):
return numpy.dot(rgb[...,:3],[0.299,0.587,0.114])
def detect(img, cascade):
rects = []
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv2.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
rects = list([])
else:
rects[:,2:] += rects[:,:2]
return rects
cascade_fn = "face_detection_files/haarcascades/haarcascade_frontalface_alt.xml"
cascade = cv2.CascadeClassifier(cascade_fn)
#img = mpimg.imread('test_image.jpg')
img = cv2.imread('test_image.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detect(gray, cascade)
if(len(rects)):
i = 0
biggest_sz = 0
biggest_index = 0
for x1, y1, x2, y2 in rects:
temp_face = gray[y1:y2, x1:x2]
sz1 = (y2-y1)*(x2-x1)
if sz1 > biggest_sz:
biggest_sz = sz1
biggest_index = i
i = i+1
x1, y1, x2, y2 = rects[biggest_index]
roi = gray[y1:y2, x1:x2]
else:
roi = gray
#print('face not detected. Please click from another angle of lighting.')
roi = cv2.equalizeHist(roi)
roi = cv2.resize(roi, (48, 48))
# X_test = rgb2gray(img)
#X_test = X_test.reshape(1,1,48, 48).astype('float32')
X_test = roi.reshape(1,1,48, 48).astype('float32')
X_test = X_test / 255
# plt.imshow(gray_out,cmap = plt.get_cmap('gray'))
# plt.show()'''
# y_test = np_utils.to_categorical(y_test)
# Y_test = testset[:,2304]
# Load trained convolutional neural network model (both structure and weights)
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
# evaluate loaded model on test data
loaded_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
label_predict = loaded_model.predict(X_test)
label_predict = numpy.argmax(label_predict)
# image_num = 2;
print(label_predict)
|
codebhendi/scrapy | refs/heads/master | scrapy/pipelines/__init__.py | 142 | """
Item pipeline
See documentation in docs/item-pipeline.rst
"""
from scrapy.middleware import MiddlewareManager
from scrapy.utils.conf import build_component_list
class ItemPipelineManager(MiddlewareManager):
component_name = 'item pipeline'
@classmethod
def _get_mwlist_from_settings(cls, settings):
item_pipelines = settings['ITEM_PIPELINES']
if isinstance(item_pipelines, (tuple, list, set, frozenset)):
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('ITEM_PIPELINES defined as a list or a set is deprecated, switch to a dict',
category=ScrapyDeprecationWarning, stacklevel=1)
# convert old ITEM_PIPELINE list to a dict with order 500
item_pipelines = dict(zip(item_pipelines, range(500, 500+len(item_pipelines))))
return build_component_list(settings['ITEM_PIPELINES_BASE'], item_pipelines)
def _add_middleware(self, pipe):
super(ItemPipelineManager, self)._add_middleware(pipe)
if hasattr(pipe, 'process_item'):
self.methods['process_item'].append(pipe.process_item)
def process_item(self, item, spider):
return self._process_chain('process_item', item, spider)
|
tamsky/ansible-upstream | refs/heads/devel | lib/ansible/plugins/action/normal.py | 16 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
results = super(ActionModule, self).run(tmp, task_vars)
results.update(self._execute_module(tmp=tmp, task_vars=task_vars))
# Remove special fields from the result, which can only be set
# internally by the executor engine. We do this only here in
# the 'normal' action, as other action plugins may set this.
for field in ('ansible_notify',):
if field in results:
results.pop(field)
return results
|
lengtche/beets | refs/heads/master | beetsplug/permissions.py | 18 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
"""Fixes file permissions after the file gets written on import. Put something
like the following in your config.yaml to configure:
permissions:
file: 644
dir: 755
"""
import os
from beets import config, util
from beets.plugins import BeetsPlugin
from beets.util import ancestry
import six
def convert_perm(perm):
"""Convert a string to an integer, interpreting the text as octal.
Or, if `perm` is an integer, reinterpret it as an octal number that
has been "misinterpreted" as decimal.
"""
if isinstance(perm, six.integer_types):
perm = six.text_type(perm)
return int(perm, 8)
def check_permissions(path, permission):
"""Check whether the file's permissions equal the given vector.
Return a boolean.
"""
return oct(os.stat(path).st_mode & 0o777) == oct(permission)
def assert_permissions(path, permission, log):
"""Check whether the file's permissions are as expected, otherwise,
log a warning message. Return a boolean indicating the match, like
`check_permissions`.
"""
if not check_permissions(util.syspath(path), permission):
log.warning(
u'could not set permissions on {}',
util.displayable_path(path),
)
log.debug(
u'set permissions to {}, but permissions are now {}',
permission,
os.stat(util.syspath(path)).st_mode & 0o777,
)
def dirs_in_library(library, item):
"""Creates a list of ancestor directories in the beets library path.
"""
return [ancestor
for ancestor in ancestry(item)
if ancestor.startswith(library)][1:]
class Permissions(BeetsPlugin):
def __init__(self):
super(Permissions, self).__init__()
# Adding defaults.
self.config.add({
u'file': '644',
u'dir': '755',
})
self.register_listener('item_imported', self.fix)
self.register_listener('album_imported', self.fix)
def fix(self, lib, item=None, album=None):
"""Fix the permissions for an imported Item or Album.
"""
# Get the configured permissions. The user can specify this either a
# string (in YAML quotes) or, for convenience, as an integer so the
# quotes can be omitted. In the latter case, we need to reinterpret the
# integer as octal, not decimal.
file_perm = config['permissions']['file'].get()
dir_perm = config['permissions']['dir'].get()
file_perm = convert_perm(file_perm)
dir_perm = convert_perm(dir_perm)
# Create chmod_queue.
file_chmod_queue = []
if item:
file_chmod_queue.append(item.path)
elif album:
for album_item in album.items():
file_chmod_queue.append(album_item.path)
# A set of directories to change permissions for.
dir_chmod_queue = set()
for path in file_chmod_queue:
# Changing permissions on the destination file.
self._log.debug(
u'setting file permissions on {}',
util.displayable_path(path),
)
os.chmod(util.syspath(path), file_perm)
# Checks if the destination path has the permissions configured.
assert_permissions(path, file_perm, self._log)
# Adding directories to the directory chmod queue.
dir_chmod_queue.update(
dirs_in_library(lib.directory,
path))
# Change permissions for the directories.
for path in dir_chmod_queue:
# Chaning permissions on the destination directory.
self._log.debug(
u'setting directory permissions on {}',
util.displayable_path(path),
)
os.chmod(util.syspath(path), dir_perm)
# Checks if the destination path has the permissions configured.
assert_permissions(path, dir_perm, self._log)
|
flacjacket/sympy | refs/heads/master | sympy/printing/pretty/pretty.py | 2 | from sympy.core import S, C, Basic, Interval
from sympy.core.function import _coeff_isneg
from sympy.utilities import group
from sympy.core.sympify import SympifyError
from sympy.printing.printer import Printer
from sympy.printing.str import sstr
from stringpict import prettyForm, stringPict
from pretty_symbology import xstr, hobj, vobj, xobj, xsym, pretty_symbol,\
pretty_atom, pretty_use_unicode, pretty_try_use_unicode, greek, U, \
annotated
from sympy.utilities import default_sort_key
# rename for usage from outside
pprint_use_unicode = pretty_use_unicode
pprint_try_use_unicode = pretty_try_use_unicode
class PrettyPrinter(Printer):
"""Printer, which converts an expression into 2D ASCII-art figure."""
printmethod = "_pretty"
_default_settings = {
"order": None,
"full_prec": "auto",
"use_unicode": None,
"wrap_line": True,
"num_columns": None,
}
def __init__(self, settings=None):
Printer.__init__(self, settings)
self.emptyPrinter = lambda x: prettyForm(xstr(x))
@property
def _use_unicode(self):
if self._settings['use_unicode']:
return True
else:
return pretty_use_unicode()
def doprint(self, expr):
return self._print(expr).render(**self._settings)
# empty op so _print(stringPict) returns the same
def _print_stringPict(self, e):
return e
def _print_basestring(self, e):
return prettyForm(e)
def _print_atan2(self, e):
pform = prettyForm(*self._print_seq(e.args).parens())
pform = prettyForm(*pform.left('atan2'))
return pform
def _print_Symbol(self, e):
symb = pretty_symbol(e.name)
return prettyForm(symb)
def _print_Float(self, e):
# we will use StrPrinter's Float printer, but we need to handle the
# full_prec ourselves, according to the self._print_level
full_prec = self._settings["full_prec"]
if full_prec == "auto":
full_prec = self._print_level == 1
return prettyForm(sstr(e, full_prec=full_prec))
def _print_Atom(self, e):
try:
# print atoms like Exp1 or Pi
return prettyForm(pretty_atom(e.__class__.__name__))
except KeyError:
return self.emptyPrinter(e)
# Infinity inherits from Number, so we have to override _print_XXX order
_print_Infinity = _print_Atom
_print_NegativeInfinity = _print_Atom
_print_EmptySet = _print_Atom
_print_Naturals = _print_Atom
_print_Integers = _print_Atom
_print_Reals = _print_Atom
def _print_factorial(self, e):
x = e.args[0]
pform = self._print(x)
# Add parentheses if needed
if not ((x.is_Integer and x.is_nonnegative) or x.is_Symbol):
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.right('!'))
return pform
def _print_factorial2(self, e):
x = e.args[0]
pform = self._print(x)
# Add parentheses if needed
if not ((x.is_Integer and x.is_nonnegative) or x.is_Symbol):
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.right('!!'))
return pform
def _print_binomial(self, e):
n, k = e.args
n_pform = self._print(n)
k_pform = self._print(k)
bar = ' '*max(n_pform.width(), k_pform.width())
pform = prettyForm(*k_pform.above(bar))
pform = prettyForm(*pform.above(n_pform))
pform = prettyForm(*pform.parens('(', ')'))
pform.baseline = (pform.baseline + 1)//2
return pform
def _print_Relational(self, e):
op = prettyForm(' ' + xsym(e.rel_op) + ' ')
l = self._print(e.lhs)
r = self._print(e.rhs)
pform = prettyForm(*stringPict.next(l, op, r))
return pform
def _print_Not(self, e):
if self._use_unicode:
arg = e.args[0]
pform = self._print(arg)
if arg.is_Boolean and not arg.is_Not:
pform = prettyForm(*pform.parens())
return prettyForm(*pform.left(u"\u00ac "))
else:
return self._print_Function(e)
def __print_Boolean(self, e, char, sort=True):
args = e.args
if sort:
args = sorted(e.args, key=default_sort_key)
arg = args[0]
pform = self._print(arg)
if arg.is_Boolean and not arg.is_Not:
pform = prettyForm(*pform.parens())
for arg in args[1:]:
pform_arg = self._print(arg)
if arg.is_Boolean and not arg.is_Not:
pform_arg = prettyForm(*pform_arg.parens())
pform = prettyForm(*pform.right(u' %s ' % char))
pform = prettyForm(*pform.right(pform_arg))
return pform
def _print_And(self, e):
if self._use_unicode:
return self.__print_Boolean(e, u"\u2227")
else:
return self._print_Function(e, sort=True)
def _print_Or(self, e):
if self._use_unicode:
return self.__print_Boolean(e, u"\u2228")
else:
return self._print_Function(e, sort=True)
def _print_Xor(self, e):
if self._use_unicode:
return self.__print_Boolean(e, u"\u22bb")
else:
return self._print_Function(e, sort=True)
def _print_Nand(self, e):
if self._use_unicode:
return self.__print_Boolean(e, u"\u22bc")
else:
return self._print_Function(e, sort=True)
def _print_Nor(self, e):
if self._use_unicode:
return self.__print_Boolean(e, u"\u22bd")
else:
return self._print_Function(e, sort=True)
def _print_Implies(self, e):
if self._use_unicode:
return self.__print_Boolean(e, u"\u2192", sort=False)
else:
return self._print_Function(e)
def _print_Equivalent(self, e):
if self._use_unicode:
return self.__print_Boolean(e, u"\u2261")
else:
return self._print_Function(e, sort=True)
def _print_conjugate(self, e):
pform = self._print(e.args[0])
return prettyForm( *pform.above( hobj('_',pform.width())) )
def _print_Abs(self, e):
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens('|', '|'))
return pform
def _print_floor(self, e):
if self._use_unicode:
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens('lfloor', 'rfloor'))
return pform
else:
return self._print_Function(e)
def _print_ceiling(self, e):
if self._use_unicode:
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens('lceil', 'rceil'))
return pform
else:
return self._print_Function(e)
def _print_Derivative(self, deriv):
# XXX use U('PARTIAL DIFFERENTIAL') here ?
syms = list(reversed(deriv.variables))
x = None
for sym, num in group(syms, multiple=False):
s = self._print(sym)
ds = prettyForm(*s.left('d'))
if num > 1:
ds = ds**prettyForm(str(num))
if x is None:
x = ds
else:
x = prettyForm(*x.right(' '))
x = prettyForm(*x.right(ds))
f = prettyForm(binding=prettyForm.FUNC, *self._print(deriv.expr).parens())
pform = prettyForm('d')
if len(syms) > 1:
pform = pform**prettyForm(str(len(syms)))
pform = prettyForm(*pform.below(stringPict.LINE, x))
pform.baseline = pform.baseline + 1
pform = prettyForm(*stringPict.next(pform, f))
return pform
def _print_PDF(self, pdf):
lim = self._print(pdf.pdf.args[0])
lim = prettyForm(*lim.right(', '))
lim = prettyForm(*lim.right(self._print(pdf.domain[0])))
lim = prettyForm(*lim.right(', '))
lim = prettyForm(*lim.right(self._print(pdf.domain[1])))
lim = prettyForm(*lim.parens())
f = self._print(pdf.pdf.args[1])
f = prettyForm(*f.right(', '))
f = prettyForm(*f.right(lim))
f = prettyForm(*f.parens())
pform = prettyForm('PDF')
pform = prettyForm(*pform.right(f))
return pform
def _print_Integral(self, integral):
f = integral.function
# Add parentheses if arg involves addition of terms and
# create a pretty form for the argument
prettyF = self._print(f)
# XXX generalize parens
if f.is_Add:
prettyF = prettyForm(*prettyF.parens())
# dx dy dz ...
arg = prettyF
for x in integral.limits:
prettyArg = self._print(x[0])
# XXX qparens (parens if needs-parens)
if prettyArg.width() > 1:
prettyArg = prettyForm(*prettyArg.parens())
arg = prettyForm(*arg.right(' d', prettyArg))
# \int \int \int ...
firstterm = True
s = None
for lim in integral.limits:
x = lim[0]
# Create bar based on the height of the argument
h = arg.height()
H = h+2
# XXX hack!
ascii_mode = not self._use_unicode
if ascii_mode:
H += 2
vint= vobj('int', H)
# Construct the pretty form with the integral sign and the argument
pform = prettyForm(vint)
#pform.baseline = pform.height()//2 # vcenter
pform.baseline = arg.baseline + (H-h)//2 # covering the whole argument
if len(lim) > 1:
# Create pretty forms for endpoints, if definite integral.
# Do not print empty endpoints.
if len(lim) == 2:
prettyA = prettyForm("")
prettyB = self._print(lim[1])
if len(lim) == 3:
prettyA = self._print(lim[1])
prettyB = self._print(lim[2])
if ascii_mode: # XXX hack
# Add spacing so that endpoint can more easily be
# identified with the correct integral sign
spc = max(1, 3 - prettyB.width())
prettyB = prettyForm(*prettyB.left(' ' * spc))
spc = max(1, 4 - prettyA.width())
prettyA = prettyForm(*prettyA.right(' ' * spc))
pform = prettyForm(*pform.above(prettyB))
pform = prettyForm(*pform.below(prettyA))
#if ascii_mode: # XXX hack
# # too much vspace beetween \int and argument
# # but I left it as is
# pform = prettyForm(*pform.right(' '))
if not ascii_mode: # XXX hack
pform = prettyForm(*pform.right(' '))
if firstterm:
s = pform # first term
firstterm = False
else:
s = prettyForm(*s.left(pform))
pform = prettyForm(*arg.left(s))
return pform
def _print_Product(self, expr):
func = expr.term
pretty_func = self._print(func)
horizontal_chr = xobj('_', 1)
corner_chr = xobj('_', 1)
vertical_chr = xobj('|', 1)
if self._use_unicode:
# use unicode corners
horizontal_chr = xobj('-', 1)
corner_chr = u'\u252c'
func_height = pretty_func.height()
first = True
max_upper = 0
sign_height = 0
for lim in expr.limits:
width = (func_height + 2) * 5 // 3 - 2
sign_lines = []
sign_lines.append(corner_chr+(horizontal_chr*width)+corner_chr)
for i in range(func_height+1):
sign_lines.append(vertical_chr+(' '*width)+vertical_chr)
pretty_sign = stringPict('')
pretty_sign = prettyForm(*pretty_sign.stack(*sign_lines))
pretty_upper = self._print(lim[2])
pretty_lower = self._print(C.Equality(lim[0], lim[1]))
max_upper = max(max_upper, pretty_upper.height())
if first:
sign_height = pretty_sign.height()
pretty_sign = prettyForm(*pretty_sign.above(pretty_upper))
pretty_sign = prettyForm(*pretty_sign.below(pretty_lower))
if first:
pretty_func.baseline = 0
first = False
height = pretty_sign.height()
padding = stringPict('')
padding = prettyForm(*padding.stack(*[' ']*(height-1)))
pretty_sign = prettyForm(*pretty_sign.right(padding))
pretty_func = prettyForm(*pretty_sign.right(pretty_func))
#pretty_func.baseline = 0
pretty_func.baseline = max_upper + sign_height//2
return pretty_func
def _print_Sum(self, expr):
ascii_mode = not self._use_unicode
def asum(hrequired, lower, upper, use_ascii):
def adjust(s, wid=None, how='<^>'):
if not wid or len(s)>wid:
return s
need = wid - len(s)
if how == '<^>' or how == "<" or how not in list('<^>'):
return s + ' '*need
half = need//2
lead = ' '*half
if how == ">":
return " "*need + s
return lead + s + ' '*(need - len(lead))
h = max(hrequired, 2)
d = h//2
wrequired = max(lower, upper)
w = d + 1
more = hrequired % 2
lines = []
if use_ascii:
lines.append("_"*(w) + ' ')
lines.append("\%s`" % (' '*(w - 1)))
for i in range(1, d):
lines.append('%s\\%s' % (' '*i, ' '*(w - i)))
if more:
lines.append('%s)%s' % (' '*(d), ' '*(w - d)))
for i in reversed(range(1, d)):
lines.append('%s/%s' % (' '*i, ' '*(w - i)))
lines.append("/" + "_"*(w - 1) + ',')
return d, h + more, lines, 0
else:
w = w + more
d = d + more
vsum = vobj('sum', 4)
lines.append("_"*(w))
for i in range(0, d):
lines.append('%s%s%s' % (' '*i, vsum[2], ' '*(w - i - 1)))
for i in reversed(range(0, d)):
lines.append('%s%s%s' % (' '*i, vsum[4], ' '*(w - i - 1)))
lines.append(vsum[8]*(w))
return d, h + 2*more, lines, more
f = expr.function
prettyF = self._print(f)
if f.is_Add: # add parens
prettyF = prettyForm(*prettyF.parens())
H = prettyF.height() + 2
# \sum \sum \sum ...
first = True
max_upper = 0
sign_height = 0
for lim in expr.limits:
if len(lim) == 3:
prettyUpper = self._print(lim[2])
prettyLower = self._print(C.Equality(lim[0], lim[1]))
elif len(lim) == 2:
prettyUpper = self._print("")
prettyLower = self._print(C.Equality(lim[0], lim[1]))
elif len(lim) == 1:
prettyUpper = self._print("")
prettyLower = self._print(lim[0])
max_upper = max(max_upper, prettyUpper.height())
# Create sum sign based on the height of the argument
d, h, slines, adjustment = asum(H, prettyLower.width(), prettyUpper.width(), ascii_mode)
prettySign = stringPict('')
prettySign = prettyForm(*prettySign.stack(*slines))
if first:
sign_height = prettySign.height()
prettySign = prettyForm(*prettySign.above(prettyUpper))
prettySign = prettyForm(*prettySign.below(prettyLower))
if first:
# change F baseline so it centers on the sign
prettyF.baseline -= d - (prettyF.height()//2 -
prettyF.baseline) - adjustment
first = False
# put padding to the right
pad = stringPict('')
pad = prettyForm(*pad.stack(*[' ']*h))
prettySign = prettyForm(*prettySign.right(pad))
# put the present prettyF to the right
prettyF = prettyForm(*prettySign.right(prettyF))
prettyF.baseline = max_upper + sign_height//2
return prettyF
def _print_Limit(self, l):
# XXX we do not print dir ...
e, z, z0, dir = l.args
E = self._print(e)
Lim = prettyForm('lim')
LimArg = self._print(z)
LimArg = prettyForm(*LimArg.right('->'))
LimArg = prettyForm(*LimArg.right(self._print(z0)))
Lim = prettyForm(*Lim.below(LimArg))
Lim = prettyForm(*Lim.right(E))
return Lim
def _print_matrix_contents(self, e):
"""
This method factors out what is essentially grid printing.
"""
M = e # matrix
Ms = {} # i,j -> pretty(M[i,j])
for i in range(M.rows):
for j in range(M.cols):
Ms[i,j] = self._print(M[i,j])
# h- and v- spacers
hsep = 2
vsep = 1
# max width for columns
maxw = [-1] * M.cols
for j in range(M.cols):
maxw[j] = max([Ms[i,j].width() for i in range(M.rows)] or [0])
# drawing result
D = None
for i in range(M.rows):
D_row = None
for j in range(M.cols):
s = Ms[i,j]
# reshape s to maxw
# XXX this should be generalized, and go to stringPict.reshape ?
assert s.width() <= maxw[j]
# hcenter it, +0.5 to the right 2
# ( it's better to align formula starts for say 0 and r )
# XXX this is not good in all cases -- maybe introduce vbaseline?
wdelta = maxw[j] - s.width()
wleft = wdelta // 2
wright = wdelta - wleft
s = prettyForm(*s.right(' '*wright))
s = prettyForm(*s.left (' '*wleft))
# we don't need vcenter cells -- this is automatically done in
# a pretty way because when their baselines are taking into
# account in .right()
if D_row is None:
D_row = s # first box in a row
continue
D_row = prettyForm(*D_row.right(' '*hsep)) # h-spacer
D_row = prettyForm(*D_row.right(s))
if D is None:
D = D_row # first row in a picture
continue
# v-spacer
for _ in range(vsep):
D = prettyForm(*D.below(' '))
D = prettyForm(*D.below(D_row))
if D is None:
D = prettyForm('') # Empty Matrix
return D
def _print_MatrixBase(self, e):
D = self._print_matrix_contents(e)
D = prettyForm(*D.parens('[',']'))
return D
_print_ImmutableMatrix = _print_MatrixBase
_print_MutableMatrix = _print_MatrixBase
def _print_Transpose(self, T):
pform = self._print(T.arg)
if (T.arg.is_Add or T.arg.is_Mul or T.arg.is_Pow):
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.right("'"))
return pform
def _print_Inverse(self, I):
pform = self._print(I.arg)
if (I.arg.is_Add or I.arg.is_Mul or I.arg.is_Pow):
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.right("^-1"))
return pform
def _print_BlockMatrix(self, B):
if B.mat.shape == (1,1):
return self._print(B.mat[0,0])
return self._print(B.mat)
def _print_MatMul(self, expr):
a = list(expr.args)
for i in xrange(0, len(a)):
if a[i].is_Add and len(a) > 1:
a[i] = prettyForm(*self._print(a[i]).parens())
else:
a[i] = self._print(a[i])
return prettyForm.__mul__(*a)
def _print_MatAdd(self, expr):
return self._print_seq(expr.args, None, None, ' + ')
def _print_FunctionMatrix(self, X):
D = self._print(X.lamda.expr)
D = prettyForm(*D.parens('[',']'))
return D
def _print_Piecewise(self, pexpr):
P = {}
for n, ec in enumerate(pexpr.args):
P[n,0] = self._print(ec.expr)
if ec.cond == True:
P[n,1] = prettyForm('otherwise')
else:
P[n,1] = prettyForm(*prettyForm('for ').right(self._print(ec.cond)))
hsep = 2
vsep = 1
len_args = len(pexpr.args)
# max widths
maxw = [max([P[i,j].width() for i in xrange(len_args)]) \
for j in xrange(2)]
# FIXME: Refactor this code and matrix into some tabular environment.
# drawing result
D = None
for i in xrange(len_args):
D_row = None
for j in xrange(2):
p = P[i,j]
assert p.width() <= maxw[j]
wdelta = maxw[j] - p.width()
wleft = wdelta // 2
wright = wdelta - wleft
p = prettyForm(*p.right(' '*wright))
p = prettyForm(*p.left (' '*wleft))
if D_row is None:
D_row = p
continue
D_row = prettyForm(*D_row.right(' '*hsep)) # h-spacer
D_row = prettyForm(*D_row.right(p))
if D is None:
D = D_row # first row in a picture
continue
# v-spacer
for _ in range(vsep):
D = prettyForm(*D.below(' '))
D = prettyForm(*D.below(D_row))
D = prettyForm(*D.parens('{',''))
return D
def _hprint_vec(self, v):
D = None
for a in v:
p = a
if D is None:
D = p
else:
D = prettyForm(*D.right(', '))
D = prettyForm(*D.right(p))
if D is None:
D = stringPict(' ')
return D
def _hprint_vseparator(self, p1, p2):
tmp = prettyForm(*p1.right(p2))
sep = stringPict(vobj('|', tmp.height()), baseline=tmp.baseline)
return prettyForm(*p1.right(sep, p2))
def _print_hyper(self, e):
# FIXME refactor Matrix, Piecewise, and this into a tabular environment
ap = [self._print(a) for a in e.ap]
bq = [self._print(b) for b in e.bq]
P = self._print(e.argument)
P.baseline = P.height()//2
# Drawing result - first create the ap, bq vectors
D = None
for v in [ap, bq]:
D_row = self._hprint_vec(v)
if D is None:
D = D_row # first row in a picture
else:
D = prettyForm(*D.below(' '))
D = prettyForm(*D.below(D_row))
# make sure that the argument `z' is centred vertically
D.baseline = D.height()//2
# insert horizontal separator
P = prettyForm(*P.left(' '))
D = prettyForm(*D.right(' '))
# insert separating `|`
D = self._hprint_vseparator(D, P)
# add parens
D = prettyForm(*D.parens('(', ')'))
# create the F symbol
above = D.height()//2 - 1
below = D.height() - above - 1
if self._use_unicode:
pic = (2, 0, 2, u'\u250c\u2500\n\u251c\u2500\n\u2575')
else:
pic = (3, 0, 3, ' _\n|_\n|\n')
sz, t, b, add, img = annotated('F')
F = prettyForm('\n' * (above - t) + img + '\n' * (below - b),
baseline = above + sz)
add = (sz+1)//2
F = prettyForm(*F.left(self._print(len(e.ap))))
F = prettyForm(*F.right(self._print(len(e.bq))))
F.baseline = above + add
D = prettyForm(*F.right(' ', D))
return D
def _print_meijerg(self, e):
# FIXME refactor Matrix, Piecewise, and this into a tabular environment
v = {}
v[(0, 0)] = [self._print(a) for a in e.an]
v[(0, 1)] = [self._print(a) for a in e.aother]
v[(1, 0)] = [self._print(b) for b in e.bm]
v[(1, 1)] = [self._print(b) for b in e.bother]
P = self._print(e.argument)
P.baseline = P.height()//2
vp = {}
for idx in v:
vp[idx] = self._hprint_vec(v[idx])
for i in range(2):
maxw = max(vp[(0, i)].width(), vp[(1, i)].width())
for j in range(2):
s = vp[(j, i)]
left = (maxw - s.width()) // 2
right = maxw - left - s.width()
s = prettyForm(*s.left(' ' * left))
s = prettyForm(*s.right(' ' * right))
vp[(j, i)] = s
D1 = prettyForm(*vp[(0, 0)].right(' ', vp[(0, 1)]))
D1 = prettyForm(*D1.below(' '))
D2 = prettyForm(*vp[(1, 0)].right(' ', vp[(1, 1)]))
D = prettyForm(*D1.below(D2))
# make sure that the argument `z' is centred vertically
D.baseline = D.height()//2
# insert horizontal separator
P = prettyForm(*P.left(' '))
D = prettyForm(*D.right(' '))
# insert separating `|`
D = self._hprint_vseparator(D, P)
# add parens
D = prettyForm(*D.parens('(', ')'))
# create the G symbol
above = D.height()//2 - 1
below = D.height() - above - 1
sz, t, b, add, img = annotated('G')
F = prettyForm('\n' * (above - t) + img + '\n' * (below - b),
baseline = above + sz)
pp = self._print(len(e.ap))
pq = self._print(len(e.bq))
pm = self._print(len(e.bm))
pn = self._print(len(e.an))
def adjust(p1, p2):
diff = p1.width() - p2.width()
if diff == 0:
return p1, p2
elif diff > 0:
return p1, prettyForm(*p2.left(' '*diff))
else:
return prettyForm(*p1.left(' '*-diff)), p2
pp, pm = adjust(pp, pm)
pq, pn = adjust(pq, pn)
pu = prettyForm(*pm.right(', ', pn))
pl = prettyForm(*pp.right(', ', pq))
ht = F.baseline - above - 2
if ht > 0:
pu = prettyForm(*pu.below('\n'*ht))
p = prettyForm(*pu.below(pl))
F.baseline = above
F = prettyForm(*F.right(p))
F.baseline = above + add
D = prettyForm(*F.right(' ', D))
return D
def _print_ExpBase(self, e):
# TODO should exp_polar be printed differently?
# what about exp_polar(0), exp_polar(1)?
base = prettyForm(pretty_atom('Exp1', 'e'))
return base ** self._print(e.args[0])
def _print_Function(self, e, sort=False):
# XXX works only for applied functions
func = e.func
args = e.args
if sort:
args = sorted(args, key=default_sort_key)
n = len(args)
func_name = func.__name__
prettyFunc = self._print(C.Symbol(func_name))
prettyArgs = prettyForm(*self._print_seq(args).parens())
pform = prettyForm(binding=prettyForm.FUNC, *stringPict.next(prettyFunc, prettyArgs))
# store pform parts so it can be reassembled e.g. when powered
pform.prettyFunc = prettyFunc
pform.prettyArgs = prettyArgs
return pform
def _print_GeometryEntity(self, expr):
# GeometryEntity is based on Tuple but should not print like a Tuple
return self.emptyPrinter(expr)
def _print_Lambda(self, e):
symbols, expr = e.args
if len(symbols) == 1:
symbols = self._print(symbols[0])
else:
symbols = self._print(tuple(symbols))
args = (symbols, self._print(expr))
prettyFunc = self._print(C.Symbol("Lambda"))
prettyArgs = prettyForm(*self._print_seq(args).parens())
pform = prettyForm(binding=prettyForm.FUNC, *stringPict.next(prettyFunc, prettyArgs))
# store pform parts so it can be reassembled e.g. when powered
pform.prettyFunc = prettyFunc
pform.prettyArgs = prettyArgs
return pform
def _print_Order(self, e):
pform = self._print(e.expr)
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left('O'))
return pform
def _print_gamma(self, e):
if self._use_unicode:
pform = self._print(e.args[0])
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left(greek['gamma'][1]))
return pform
else:
return self._print_Function(e)
def _print_uppergamma(self, e):
if self._use_unicode:
pform = self._print(e.args[0])
pform = prettyForm(*pform.right(', ', self._print(e.args[1])))
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left(greek['gamma'][1]))
return pform
else:
return self._print_Function(e)
def _print_lowergamma(self, e):
if self._use_unicode:
pform = self._print(e.args[0])
pform = prettyForm(*pform.right(', ', self._print(e.args[1])))
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left(greek['gamma'][0]))
return pform
else:
return self._print_Function(e)
def _print_expint(self, e):
from sympy import Function
if e.args[0].is_Integer and self._use_unicode:
return self._print_Function(Function('E_%s' % e.args[0])(e.args[1]))
return self._print_Function(e)
def _print_Chi(self, e):
# This needs a special case since otherwise it comes out as greek
# letter chi...
prettyFunc = prettyForm("Chi")
prettyArgs = prettyForm(*self._print_seq(e.args).parens())
pform = prettyForm(binding=prettyForm.FUNC, *stringPict.next(prettyFunc, prettyArgs))
# store pform parts so it can be reassembled e.g. when powered
pform.prettyFunc = prettyFunc
pform.prettyArgs = prettyArgs
return pform
def _print_Add(self, expr, order=None):
if self.order == 'none':
terms = list(expr.args)
else:
terms = self._as_ordered_terms(expr, order=order)
pforms, indices = [], []
def pretty_negative(pform, index):
"""Prepend a minus sign to a pretty form. """
if index == 0:
if pform.height() > 1:
pform_neg = '- '
else:
pform_neg = '-'
else:
pform_neg = ' - '
pform = stringPict.next(pform_neg, pform)
return prettyForm(binding=prettyForm.NEG, *pform)
for i, term in enumerate(terms):
if term.is_Mul and _coeff_isneg(term):
pform = self._print(-term)
pforms.append(pretty_negative(pform, i))
elif term.is_Rational and term.q > 1:
pforms.append(None)
indices.append(i)
elif term.is_Number and term < 0:
pform = self._print(-term)
pforms.append(pretty_negative(pform, i))
else:
pforms.append(self._print(term))
if indices:
large = True
for pform in pforms:
if pform is not None and pform.height() > 1:
break
else:
large = False
for i in indices:
term, negative = terms[i], False
if term < 0:
term, negative = -term, True
if large:
pform = prettyForm(str(term.p))/prettyForm(str(term.q))
else:
pform = self._print(term)
if negative:
pform = pretty_negative(pform, i)
pforms[i] = pform
return prettyForm.__add__(*pforms)
def _print_Mul(self, product):
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = product.as_ordered_factors()
else:
args = product.args
# Gather terms for numerator/denominator
for item in args:
if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative:
b.append(C.Pow(item.base, -item.exp))
elif item.is_Rational and item is not S.Infinity:
if item.p != 1:
a.append( C.Rational(item.p) )
if item.q != 1:
b.append( C.Rational(item.q) )
else:
a.append(item)
# Convert to pretty forms. Add parens to Add instances if there
# is more than one term in the numer/denom
for i in xrange(0, len(a)):
if a[i].is_Add and len(a) > 1:
a[i] = prettyForm(*self._print(a[i]).parens())
else:
a[i] = self._print(a[i])
for i in xrange(0, len(b)):
if b[i].is_Add and len(b) > 1:
b[i] = prettyForm(*self._print(b[i]).parens())
else:
b[i] = self._print(b[i])
# Construct a pretty form
if len(b) == 0:
return prettyForm.__mul__(*a)
else:
if len(a) == 0:
a.append( self._print(S.One) )
return prettyForm.__mul__(*a)/prettyForm.__mul__(*b)
# A helper function for _print_Pow to print x**(1/n)
def _print_nth_root (self, base, expt):
bpretty = self._print(base)
# Construct root sign, start with the \/ shape
_zZ = xobj('/',1)
rootsign = xobj('\\',1) + _zZ
# Make exponent number to put above it
if isinstance(expt, C.Rational):
exp = str(expt.q)
if exp == '2':
exp = ''
else:
exp = str(expt.args[0])
exp = exp.ljust(2)
if len(exp) > 2:
rootsign = ' '*(len(exp) - 2) + rootsign
# Stack the exponent
rootsign = stringPict(exp + '\n' + rootsign)
rootsign.baseline = 0
# Diagonal: length is one less than height of base
linelength = bpretty.height() - 1
diagonal = stringPict('\n'.join(
' '*(linelength - i - 1) + _zZ + ' '*i
for i in range(linelength)
))
# Put baseline just below lowest line: next to exp
diagonal.baseline = linelength - 1
# Make the root symbol
rootsign = prettyForm(*rootsign.right(diagonal))
# Det the baseline to match contents to fix the height
# but if the height of bpretty is one, the rootsign must be one higher
rootsign.baseline = max(1, bpretty.baseline)
#build result
s = prettyForm(hobj('_', 2 + bpretty.width()))
s = prettyForm(*bpretty.above(s))
s = prettyForm(*s.left(rootsign))
return s
def _print_Pow(self, power):
from sympy import fraction
b, e = power.as_base_exp()
if power.is_commutative:
if e is S.NegativeOne:
return prettyForm("1")/self._print(b)
n, d = fraction(e)
if n is S.One and d.is_Atom and not e.is_Integer:
return self._print_nth_root(b, e)
if e.is_Rational and e < 0:
return prettyForm("1")/self._print(b)**self._print(-e)
# None of the above special forms, do a standard power
return self._print(b)**self._print(e)
def __print_numer_denom(self, p, q):
if q == 1:
if p < 0:
return prettyForm(str(p),binding=prettyForm.NEG)
else:
return prettyForm(str(p))
elif abs(p) >= 10 and abs(q) >= 10:
# If more than one digit in numer and denom, print larger fraction
if p < 0:
pform = prettyForm(str(-p))/prettyForm(str(q))
return prettyForm(binding=prettyForm.NEG, *pform.left('- '))
else:
return prettyForm(str(p))/prettyForm(str(q))
else:
return None
def _print_Rational(self, expr):
result = self.__print_numer_denom(expr.p, expr.q)
if result is not None:
return result
else:
return self.emptyPrinter(expr)
def _print_Fraction(self, expr):
result = self.__print_numer_denom(expr.numerator, expr.denominator)
if result is not None:
return result
else:
return self.emptyPrinter(expr)
def _print_ProductSet(self, p):
if len(set(p.sets)) == 1 and len(p.sets) > 1:
from sympy import Pow
return self._print(Pow(p.sets[0], len(p.sets), evaluate=False))
else:
prod_char = u'\xd7'
return self._print_seq(p.sets, None, None, ' %s '%prod_char,
parenthesize = lambda set:set.is_Union or set.is_Intersection)
def _print_FiniteSet(self, s):
items = sorted(s.args, key=default_sort_key)
return self._print_seq(items, '{', '}', ', ' )
def _print_Range(self, s):
if self._use_unicode:
dots = u"\u2026"
else:
dots = '...'
if len(s) > 4:
it = iter(s)
printset = it.next(), it.next(), dots, s._last_element
else:
printset = tuple(s)
return self._print_seq(printset, '{', '}', ', ' )
def _print_Interval(self, i):
if i.start == i.end:
return self._print_seq(i.args[:1], '{', '}')
else:
if i.left_open:
left = '('
else:
left = '['
if i.right_open:
right = ')'
else:
right = ']'
return self._print_seq(i.args[:2], left, right)
def _print_Intersection(self, u):
delimiter = ' %s ' % pretty_atom('Intersection')
return self._print_seq(u.args, None, None, delimiter,
parenthesize = lambda set:set.is_ProductSet or set.is_Union)
def _print_Union(self, u):
union_delimiter = ' %s ' % pretty_atom('Union')
return self._print_seq(u.args, None, None, union_delimiter,
parenthesize = lambda set:set.is_ProductSet or set.is_Intersection)
def _print_TransformationSet(self, ts):
if self._use_unicode:
inn = u"\u220a"
else:
inn = 'in'
variables = self._print_seq(ts.lamda.variables)
expr = self._print(ts.lamda.expr)
bar = self._print("|")
base = self._print(ts.base_set)
return self._print_seq((expr, bar, variables, inn, base), "{", "}", ' ')
def _print_seq(self, seq, left=None, right=None, delimiter=', ',
parenthesize=lambda x: False):
s = None
for item in seq:
pform = self._print(item)
if parenthesize(item):
pform = prettyForm(*pform.parens())
if s is None:
# first element
s = pform
else:
s = prettyForm(*stringPict.next(s, delimiter))
s = prettyForm(*stringPict.next(s, pform))
if s is None:
s = stringPict('')
s = prettyForm(*s.parens(left, right, ifascii_nougly=True))
return s
def join(self, delimiter, args):
pform = None
for arg in args:
if pform is None:
pform = arg
else:
pform = prettyForm(*pform.right(delimiter))
pform = prettyForm(*pform.right(arg))
if pform is None:
return prettyForm("")
else:
return pform
def _print_list(self, l):
return self._print_seq(l, '[', ']')
def _print_tuple(self, t):
if len(t) == 1:
ptuple = prettyForm(*stringPict.next(self._print(t[0]), ','))
return prettyForm(*ptuple.parens('(', ')', ifascii_nougly=True))
else:
return self._print_seq(t, '(', ')')
def _print_Tuple(self, expr):
return self._print_tuple(expr)
def _print_dict(self, d):
keys = sorted(d.keys(), key=default_sort_key)
items = []
for k in keys:
K = self._print(k)
V = self._print(d[k])
s = prettyForm(*stringPict.next(K, ': ', V))
items.append(s)
return self._print_seq(items, '{', '}')
def _print_Dict(self, d):
return self._print_dict(d)
def _print_set(self, s):
items = sorted(s, key=default_sort_key)
pretty = self._print_seq(items, '[', ']')
pretty = prettyForm(*pretty.parens('(', ')', ifascii_nougly=True))
pretty = prettyForm(*stringPict.next(type(s).__name__, pretty))
return pretty
_print_frozenset = _print_set
def _print_AlgebraicNumber(self, expr):
if expr.is_aliased:
return self._print(expr.as_poly().as_expr())
else:
return self._print(expr.as_expr())
def _print_RootOf(self, expr):
args = [self._print_Add(expr.expr, order='lex'), expr.index]
pform = prettyForm(*self._print_seq(args).parens())
pform = prettyForm(*pform.left('RootOf'))
return pform
def _print_RootSum(self, expr):
args = [self._print_Add(expr.expr, order='lex')]
if expr.fun is not S.IdentityFunction:
args.append(self._print(expr.fun))
pform = prettyForm(*self._print_seq(args).parens())
pform = prettyForm(*pform.left('RootSum'))
return pform
def _print_FiniteField(self, expr):
if self._use_unicode:
form = u'\u2124_%d'
else:
form = 'GF(%d)'
return prettyForm(pretty_symbol(form % expr.mod))
def _print_IntegerRing(self, expr):
if self._use_unicode:
return prettyForm(u'\u2124')
else:
return prettyForm('ZZ')
def _print_RationalField(self, expr):
if self._use_unicode:
return prettyForm(u'\u211A')
else:
return prettyForm('QQ')
def _print_RealDomain(self, expr):
if self._use_unicode:
return prettyForm(u'\u211D')
else:
return prettyForm('RR')
def _print_ComplexDomain(self, expr):
if self._use_unicode:
return prettyForm(u'\u2102')
else:
return prettyForm('CC')
def _print_PolynomialRingBase(self, expr):
g = expr.gens
if str(expr.order) != str(expr.default_order):
g = g + ("order=" + str(expr.order),)
pform = self._print_seq(g, '[', ']')
pform = prettyForm(*pform.left(self._print(expr.dom)))
return pform
def _print_FractionField(self, expr):
pform = self._print_seq(expr.gens, '(', ')')
pform = prettyForm(*pform.left(self._print(expr.dom)))
return pform
def _print_GroebnerBasis(self, basis):
cls = basis.__class__.__name__
exprs = [ self._print_Add(arg, order=basis.order) for arg in basis.exprs ]
exprs = prettyForm(*self.join(", ", exprs).parens(left="[", right="]"))
gens = [ self._print(gen) for gen in basis.gens ]
domain = prettyForm(*prettyForm("domain=").right(self._print(basis.domain)))
order = prettyForm(*prettyForm("order=").right(self._print(basis.order)))
pform = self.join(", ", [exprs] + gens + [domain, order])
pform = prettyForm(*pform.parens())
pform = prettyForm(*pform.left(basis.__class__.__name__))
return pform
def _print_Subs(self, e):
pform = self._print(e.expr)
pform = prettyForm(*pform.parens())
h = pform.height() if pform.height() > 1 else 2
rvert = stringPict(vobj('|', h), baseline=pform.baseline)
pform = prettyForm(*pform.right(rvert))
b = pform.baseline
pform.baseline = pform.height() - 1
pform = prettyForm(*pform.right(self._print_seq([
self._print_seq((self._print(v[0]), xsym('=='), self._print(v[1])),
delimiter='') for v in zip(e.variables, e.point) ])))
pform.baseline = b
return pform
def _print_euler(self, e):
pform = prettyForm("E")
arg = self._print(e.args[0])
pform_arg = prettyForm(" "*arg.width())
pform_arg = prettyForm(*pform_arg.below(arg))
pform = prettyForm(*pform.right(pform_arg))
return pform
def _print_catalan(self, e):
pform = prettyForm("C")
arg = self._print(e.args[0])
pform_arg = prettyForm(" "*arg.width())
pform_arg = prettyForm(*pform_arg.below(arg))
pform = prettyForm(*pform.right(pform_arg))
return pform
def _print_KroneckerDelta(self, e):
pform = self._print(e.args[0])
pform = prettyForm(*pform.right((prettyForm(','))))
pform = prettyForm(*pform.right((self._print(e.args[1]))))
if self._use_unicode:
a = stringPict(pretty_symbol('delta'))
else:
a = stringPict('d')
b = pform
top = stringPict(*b.left(' '*a.width()))
bot = stringPict(*a.right(' '*b.width()))
return prettyForm(binding=prettyForm.POW, *bot.below(top))
def _print_atan2(self, e):
pform = prettyForm(*self._print_seq(e.args).parens())
pform = prettyForm(*pform.left('atan2'))
return pform
def _print_RandomDomain(self, d):
try:
pform = self._print('Domain: ')
pform = prettyForm(*pform.right(self._print(d.as_boolean())))
return pform
except:
try:
pform = self._print('Domain: ')
pform = prettyForm(*pform.right(self._print(d.symbols)))
pform = prettyForm(*pform.right(self._print(' in ')))
pform = prettyForm(*pform.right(self._print(d.set)))
return pform
except:
return self._print(None)
def _print_DMP(self, p):
try:
if p.ring is not None:
# TODO incorporate order
return self._print(p.ring.to_sympy(p))
except SympifyError:
pass
return self._print(repr(p))
def _print_DMF(self, p):
return self._print_DMP(p)
def _print_Object(self, object):
return self._print(pretty_symbol(object.name))
def _print_Morphism(self, morphism):
arrow = xsym("-->")
domain = self._print(morphism.domain)
codomain = self._print(morphism.codomain)
tail = domain.right(arrow, codomain)[0]
return prettyForm(tail)
def _print_NamedMorphism(self, morphism):
pretty_name = self._print(pretty_symbol(morphism.name))
pretty_morphism = self._print_Morphism(morphism)
return prettyForm(pretty_name.right(":", pretty_morphism)[0])
def _print_IdentityMorphism(self, morphism):
from sympy.categories import NamedMorphism
return self._print_NamedMorphism(
NamedMorphism(morphism.domain, morphism.codomain, "id"))
def _print_CompositeMorphism(self, morphism):
from sympy.categories import NamedMorphism
circle = xsym(".")
# All components of the morphism have names and it is thus
# possible to build the name of the composite.
component_names_list = [pretty_symbol(component.name) for \
component in morphism.components]
component_names_list.reverse()
component_names = circle.join(component_names_list) + ":"
pretty_name = self._print(component_names)
pretty_morphism = self._print_Morphism(morphism)
return prettyForm(pretty_name.right(pretty_morphism)[0])
def _print_Category(self, category):
return self._print(pretty_symbol(category.name))
def _print_Diagram(self, diagram):
if not diagram.premises:
# This is an empty diagram.
return self._print(S.EmptySet)
pretty_result = self._print(diagram.premises)
if diagram.conclusions:
results_arrow = " %s " % xsym("==>")
pretty_conclusions = self._print(diagram.conclusions)[0]
pretty_result = pretty_result.right(results_arrow, pretty_conclusions)
return prettyForm(pretty_result[0])
def _print_DiagramGrid(self, grid):
from sympy.matrices import Matrix
from sympy import Symbol
matrix = Matrix([[grid[i, j] if grid[i, j] else Symbol(" ")
for j in xrange(grid.width)]
for i in xrange(grid.height)])
return self._print_matrix_contents(matrix)
def _print_FreeModuleElement(self, m):
# Print as row vector for convenience, for now.
return self._print_seq(m, '[', ']')
def _print_SubModule(self, M):
return self._print_seq(M.gens, '<', '>')
def _print_FreeModule(self, M):
return self._print(M.ring)**self._print(M.rank)
def _print_ModuleImplementedIdeal(self, M):
return self._print_seq([x for [x] in M._module.gens], '<', '>')
def _print_QuotientRing(self, R):
return self._print(R.ring) / self._print(R.base_ideal)
def _print_QuotientRingElement(self, R):
return self._print(R.data) + self._print(R.ring.base_ideal)
def _print_QuotientModuleElement(self, m):
return self._print(m.data) + self._print(m.module.killed_module)
def _print_QuotientModule(self, M):
return self._print(M.base) / self._print(M.killed_module)
def _print_MatrixHomomorphism(self, h):
matrix = self._print(h._sympy_matrix())
matrix.baseline = matrix.height() // 2
pform = prettyForm(*matrix.right(' : ', self._print(h.domain),
' %s> ' % hobj('-', 2), self._print(h.codomain)))
return pform
def _print_BaseScalarField(self, field):
string = field._coord_sys._names[field._index]
return self._print(pretty_symbol(string))
def _print_BaseVectorField(self, field):
s = U('PARTIAL DIFFERENTIAL')+'_'+field._coord_sys._names[field._index]
return self._print(pretty_symbol(s))
def _print_Differential(self, diff):
field = diff._form_field
if hasattr(field, '_coord_sys'):
string = field._coord_sys._names[field._index]
return self._print(u'\u2146 '+pretty_symbol(string))
else:
pform = self._print(field)
pform = prettyForm(*pform.parens())
return prettyForm(*pform.left(u"\u2146"))
def _print_Tr(self, p):
#TODO: Handle indices
pform = self._print(p.args[0])
pform = prettyForm(*pform.left('%s(' % (p.__class__.__name__)))
pform = prettyForm(*pform.right(')'))
return pform
def pretty(expr, **settings):
"""Returns a string containing the prettified form of expr.
For information on keyword arguments see pretty_print function.
"""
pp = PrettyPrinter(settings)
# XXX: this is an ugly hack, but at least it works
use_unicode = pp._settings['use_unicode']
uflag = pretty_use_unicode(use_unicode)
try:
return pp.doprint(expr)
finally:
pretty_use_unicode(uflag)
def pretty_print(expr, **settings):
"""Prints expr in pretty form.
pprint is just a shortcut for this function.
Parameters
==========
expr : expression
the expression to print
wrap_line : bool, optional
line wrapping enabled/disabled, defaults to True
num_columns : bool, optional
number of columns before line breaking (default to None which reads
the terminal width), useful when using SymPy without terminal.
use_unicode : bool or None, optional
use unicode characters, such as the Greek letter pi instead of
the string pi.
full_prec : bool or string, optional
use full precision. Default to "auto"
order : bool or string, optional
set to 'none' for long expressions if slow; default is None
"""
print pretty(expr, **settings)
pprint = pretty_print
def pager_print(expr, **settings):
"""Prints expr using the pager, in pretty form.
This invokes a pager command using pydoc. Lines are not wrapped
automatically. This routine is meant to be used with a pager that allows
sideways scrolling, like ``less -S``.
Parameters are the same as for ``pretty_print``. If you wish to wrap lines,
pass ``num_columns=None`` to auto-detect the width of the terminal.
"""
from pydoc import pager
from locale import getpreferredencoding
if 'num_columns' not in settings:
settings['num_columns'] = 500000 # disable line wrap
pager(pretty(expr, **settings).encode(getpreferredencoding()))
|
mdesco/dipy | refs/heads/master | dipy/sims/tests/test_voxel.py | 3 | import numpy as np
from nose.tools import (assert_true, assert_false, assert_equal,
assert_almost_equal)
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_)
from dipy.sims.voxel import (SingleTensor, MultiTensor, multi_tensor_odf, all_tensor_evecs,
add_noise, single_tensor, sticks_and_ball)
from dipy.core.geometry import vec2vec_rotmat
from dipy.data import get_data, get_sphere
from dipy.core.gradients import gradient_table
from dipy.io.gradients import read_bvals_bvecs
fimg, fbvals, fbvecs = get_data('small_64D')
bvals = np.load(fbvals)
bvecs = np.load(fbvecs)
gtab = gradient_table(bvals, bvecs)
def diff2eigenvectors(dx, dy, dz):
""" numerical derivatives 2 eigenvectors
"""
u = np.array([dx, dy, dz])
u = u / np.linalg.norm(u)
R = vec2vec_rotmat(basis[:, 0], u)
eig0 = u
eig1 = np.dot(R, basis[:, 1])
eig2 = np.dot(R, basis[:, 2])
eigs = np.zeros((3, 3))
eigs[:, 0] = eig0
eigs[:, 1] = eig1
eigs[:, 2] = eig2
return eigs, R
def test_sticks_and_ball():
d = 0.0015
S, sticks = sticks_and_ball(gtab, d=d, S0=1, angles=[(0, 0), ],
fractions=[100], snr=None)
assert_array_equal(sticks, [[0, 0, 1]])
S_st = SingleTensor(gtab, 1, evals=[d, 0, 0], evecs=[[0, 0, 0],
[0, 0, 0],
[1, 0, 0]])
assert_array_almost_equal(S, S_st)
def test_single_tensor():
evals = np.array([1.4, .35, .35]) * 10 ** (-3)
evecs = np.eye(3)
S = SingleTensor(gtab, 100, evals, evecs, snr=None)
assert_array_almost_equal(S[gtab.b0s_mask], 100)
assert_(np.mean(S[~gtab.b0s_mask]) < 100)
from dipy.reconst.dti import TensorModel
m = TensorModel(gtab)
t = m.fit(S)
assert_array_almost_equal(t.fa, 0.707, decimal=3)
def test_multi_tensor():
sphere = get_sphere('symmetric724')
vertices = sphere.vertices
mevals = np.array(([0.0015, 0.0003, 0.0003],
[0.0015, 0.0003, 0.0003]))
e0 = np.array([np.sqrt(2) / 2., np.sqrt(2) / 2., 0])
e1 = np.array([0, np.sqrt(2) / 2., np.sqrt(2) / 2.])
mevecs = [all_tensor_evecs(e0), all_tensor_evecs(e1)]
# odf = multi_tensor_odf(vertices, [0.5, 0.5], mevals, mevecs)
# assert_(odf.shape == (len(vertices),))
# assert_(np.all(odf <= 1) & np.all(odf >= 0))
fimg, fbvals, fbvecs = get_data('small_101D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
gtab = gradient_table(bvals, bvecs)
s1 = single_tensor(gtab, 100, mevals[0], mevecs[0].T, snr=None)
s2 = single_tensor(gtab, 100, mevals[1], mevecs[1].T, snr=None)
Ssingle = 0.5 * s1 + 0.5 * s2
S, sticks = MultiTensor(gtab, mevals, S0=100, angles=[(90, 45), (45, 90)],
fractions=[50, 50], snr=None)
assert_array_almost_equal(S, Ssingle)
def test_snr():
np.random.seed(1978)
s = single_tensor(gtab)
# For reasonably large SNR, var(signal) ~= sigma**2, where sigma = 1/SNR
for snr in [5, 10, 20]:
sigma = 1.0 / snr
for j in range(1000):
s_noise = add_noise(s, snr, 1, noise_type='rician')
assert_array_almost_equal(np.var(s_noise - s), sigma ** 2, decimal=2)
def test_all_tensor_evecs():
e0 = np.array([1/np.sqrt(2), 1/np.sqrt(2), 0])
desired = np.array([[1/np.sqrt(2), 1/np.sqrt(2), 0],
[-1/np.sqrt(2), 1/np.sqrt(2), 0],
[0, 0, 1]])
assert_array_almost_equal(all_tensor_evecs(e0), desired)
if __name__ == "__main__":
test_multi_tensor()
|
GitAngel/django | refs/heads/master | django/contrib/gis/db/models/sql/__init__.py | 476 | from django.contrib.gis.db.models.sql.conversion import (
AreaField, DistanceField, GeomField, GMLField,
)
__all__ = [
'AreaField', 'DistanceField', 'GeomField', 'GMLField'
]
|
Qalthos/ansible | refs/heads/devel | lib/ansible/module_utils/common/text/formatters.py | 15 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
from ansible.module_utils.six import iteritems
SIZE_RANGES = {
'Y': 1 << 80,
'Z': 1 << 70,
'E': 1 << 60,
'P': 1 << 50,
'T': 1 << 40,
'G': 1 << 30,
'M': 1 << 20,
'K': 1 << 10,
'B': 1,
}
def lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def human_to_bytes(number, default_unit=None, isbits=False):
"""Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument.
example: human_to_bytes('10M') <=> human_to_bytes(10, 'M')
"""
m = re.search(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except Exception:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except Exception:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (size / limit, suffix)
|
zzzeek/sqlalchemy | refs/heads/master | test/orm/inheritance/test_abc_inheritance.py | 3 | from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm import mapper
from sqlalchemy.orm import polymorphic_union
from sqlalchemy.orm import relationship
from sqlalchemy.orm.interfaces import MANYTOONE
from sqlalchemy.orm.interfaces import ONETOMANY
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
def _combinations():
for parent in ["a", "b", "c"]:
for child in ["a", "b", "c"]:
for direction in [ONETOMANY, MANYTOONE]:
name = "Test%sTo%s%s" % (
parent,
child,
(direction is ONETOMANY and "O2M" or "M2O"),
)
yield (name, parent, child, direction)
@testing.combinations(
*list(_combinations()), argnames="name,parent,child,direction", id_="saaa"
)
class ABCTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
parent, child, direction = cls.parent, cls.child, cls.direction
ta = ["a", metadata]
ta.append(
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
)
),
ta.append(Column("a_data", String(30)))
if "a" == parent and direction == MANYTOONE:
ta.append(
Column(
"child_id",
Integer,
ForeignKey("%s.id" % child, use_alter=True, name="foo"),
)
)
elif "a" == child and direction == ONETOMANY:
ta.append(
Column(
"parent_id",
Integer,
ForeignKey("%s.id" % parent, use_alter=True, name="foo"),
)
)
ta = Table(*ta)
tb = ["b", metadata]
tb.append(Column("id", Integer, ForeignKey("a.id"), primary_key=True))
tb.append(Column("b_data", String(30)))
if "b" == parent and direction == MANYTOONE:
tb.append(
Column(
"child_id",
Integer,
ForeignKey("%s.id" % child, use_alter=True, name="foo"),
)
)
elif "b" == child and direction == ONETOMANY:
tb.append(
Column(
"parent_id",
Integer,
ForeignKey("%s.id" % parent, use_alter=True, name="foo"),
)
)
tb = Table(*tb)
tc = ["c", metadata]
tc.append(Column("id", Integer, ForeignKey("b.id"), primary_key=True))
tc.append(Column("c_data", String(30)))
if "c" == parent and direction == MANYTOONE:
tc.append(
Column(
"child_id",
Integer,
ForeignKey("%s.id" % child, use_alter=True, name="foo"),
)
)
elif "c" == child and direction == ONETOMANY:
tc.append(
Column(
"parent_id",
Integer,
ForeignKey("%s.id" % parent, use_alter=True, name="foo"),
)
)
tc = Table(*tc)
@classmethod
def setup_mappers(cls):
parent, child, direction = cls.parent, cls.child, cls.direction
ta, tb, tc = cls.tables("a", "b", "c")
parent_table = {"a": ta, "b": tb, "c": tc}[parent]
child_table = {"a": ta, "b": tb, "c": tc}[child]
remote_side = None
if direction == MANYTOONE:
foreign_keys = [parent_table.c.child_id]
elif direction == ONETOMANY:
foreign_keys = [child_table.c.parent_id]
atob = ta.c.id == tb.c.id
btoc = tc.c.id == tb.c.id
if direction == ONETOMANY:
relationshipjoin = parent_table.c.id == child_table.c.parent_id
elif direction == MANYTOONE:
relationshipjoin = parent_table.c.child_id == child_table.c.id
if parent is child:
remote_side = [child_table.c.id]
abcjoin = polymorphic_union(
{
"a": ta.select()
.where(tb.c.id == None) # noqa
.select_from(ta.outerjoin(tb, onclause=atob))
.subquery(),
"b": ta.join(tb, onclause=atob)
.outerjoin(tc, onclause=btoc)
.select()
.where(tc.c.id == None)
.reduce_columns()
.subquery(), # noqa
"c": tc.join(tb, onclause=btoc).join(ta, onclause=atob),
},
"type",
"abcjoin",
)
bcjoin = polymorphic_union(
{
"b": ta.join(tb, onclause=atob)
.outerjoin(tc, onclause=btoc)
.select()
.where(tc.c.id == None)
.reduce_columns()
.subquery(), # noqa
"c": tc.join(tb, onclause=btoc).join(ta, onclause=atob),
},
"type",
"bcjoin",
)
class A(cls.Comparable):
def __init__(self, name):
self.a_data = name
class B(A):
pass
class C(B):
pass
mapper(
A,
ta,
polymorphic_on=abcjoin.c.type,
with_polymorphic=("*", abcjoin),
polymorphic_identity="a",
)
mapper(
B,
tb,
polymorphic_on=bcjoin.c.type,
with_polymorphic=("*", bcjoin),
polymorphic_identity="b",
inherits=A,
inherit_condition=atob,
)
mapper(
C,
tc,
polymorphic_identity="c",
with_polymorphic=("*", tc.join(tb, btoc).join(ta, atob)),
inherits=B,
inherit_condition=btoc,
)
parent_mapper = class_mapper({ta: A, tb: B, tc: C}[parent_table])
child_mapper = class_mapper({ta: A, tb: B, tc: C}[child_table])
parent_mapper.add_property(
"collection",
relationship(
child_mapper,
primaryjoin=relationshipjoin,
foreign_keys=foreign_keys,
order_by=child_mapper.c.id,
remote_side=remote_side,
uselist=True,
),
)
def test_roundtrip(self):
parent, child, direction = self.parent, self.child, self.direction
A, B, C = self.classes("A", "B", "C")
parent_class = {"a": A, "b": B, "c": C}[parent]
child_class = {"a": A, "b": B, "c": C}[child]
sess = fixture_session(autoflush=False, expire_on_commit=False)
parent_obj = parent_class("parent1")
child_obj = child_class("child1")
somea = A("somea")
someb = B("someb")
somec = C("somec")
# print "APPENDING", parent.__class__.__name__ , "TO",
# child.__class__.__name__
sess.add(parent_obj)
parent_obj.collection.append(child_obj)
if direction == ONETOMANY:
child2 = child_class("child2")
parent_obj.collection.append(child2)
sess.add(child2)
elif direction == MANYTOONE:
parent2 = parent_class("parent2")
parent2.collection.append(child_obj)
sess.add(parent2)
sess.add(somea)
sess.add(someb)
sess.add(somec)
sess.commit()
sess.close()
# assert result via direct get() of parent object
result = sess.get(parent_class, parent_obj.id)
assert result.id == parent_obj.id
assert result.collection[0].id == child_obj.id
if direction == ONETOMANY:
assert result.collection[1].id == child2.id
elif direction == MANYTOONE:
result2 = sess.get(parent_class, parent2.id)
assert result2.id == parent2.id
assert result2.collection[0].id == child_obj.id
sess.expunge_all()
# assert result via polymorphic load of parent object
result = sess.query(A).filter_by(id=parent_obj.id).one()
assert result.id == parent_obj.id
assert result.collection[0].id == child_obj.id
if direction == ONETOMANY:
assert result.collection[1].id == child2.id
elif direction == MANYTOONE:
result2 = sess.query(A).filter_by(id=parent2.id).one()
assert result2.id == parent2.id
assert result2.collection[0].id == child_obj.id
|
feigames/Odoo | refs/heads/master | addons/account/report/account_partner_balance.py | 95 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.report import report_sxw
from common_report_header import common_report_header
class partner_balance(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
super(partner_balance, self).__init__(cr, uid, name, context=context)
self.account_ids = []
self.localcontext.update( {
'time': time,
'lines': self.lines,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'sum_litige': self._sum_litige,
'get_fiscalyear': self._get_fiscalyear,
'get_journal': self._get_journal,
'get_filter': self._get_filter,
'get_account': self._get_account,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_partners':self._get_partners,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
self.display_partner = data['form'].get('display_partner', 'non-zero_balance')
obj_move = self.pool.get('account.move.line')
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
self.result_selection = data['form'].get('result_selection')
self.target_move = data['form'].get('target_move', 'all')
if (self.result_selection == 'customer' ):
self.ACCOUNT_TYPE = ('receivable',)
elif (self.result_selection == 'supplier'):
self.ACCOUNT_TYPE = ('payable',)
else:
self.ACCOUNT_TYPE = ('payable', 'receivable')
self.cr.execute("SELECT a.id " \
"FROM account_account a " \
"LEFT JOIN account_account_type t " \
"ON (a.type = t.code) " \
"WHERE a.type IN %s " \
"AND a.active", (self.ACCOUNT_TYPE,))
self.account_ids = [a for (a,) in self.cr.fetchall()]
return super(partner_balance, self).set_context(objects, data, ids, report_type=report_type)
def lines(self):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
full_account = []
self.cr.execute(
"SELECT p.ref,l.account_id,ac.name AS account_name,ac.code AS code,p.name, sum(debit) AS debit, sum(credit) AS credit, " \
"CASE WHEN sum(debit) > sum(credit) " \
"THEN sum(debit) - sum(credit) " \
"ELSE 0 " \
"END AS sdebit, " \
"CASE WHEN sum(debit) < sum(credit) " \
"THEN sum(credit) - sum(debit) " \
"ELSE 0 " \
"END AS scredit, " \
"(SELECT sum(debit-credit) " \
"FROM account_move_line l " \
"WHERE partner_id = p.id " \
"AND " + self.query + " " \
"AND blocked = TRUE " \
") AS enlitige " \
"FROM account_move_line l LEFT JOIN res_partner p ON (l.partner_id=p.id) " \
"JOIN account_account ac ON (l.account_id = ac.id)" \
"JOIN account_move am ON (am.id = l.move_id)" \
"WHERE ac.type IN %s " \
"AND am.state IN %s " \
"AND " + self.query + "" \
"GROUP BY p.id, p.ref, p.name,l.account_id,ac.name,ac.code " \
"ORDER BY l.account_id,p.name",
(self.ACCOUNT_TYPE, tuple(move_state)))
res = self.cr.dictfetchall()
if self.display_partner == 'non-zero_balance':
full_account = [r for r in res if r['sdebit'] > 0 or r['scredit'] > 0]
else:
full_account = [r for r in res]
for rec in full_account:
if not rec.get('name', False):
rec.update({'name': _('Unknown Partner')})
## We will now compute Total
subtotal_row = self._add_subtotal(full_account)
return subtotal_row
def _add_subtotal(self, cleanarray):
i = 0
completearray = []
tot_debit = 0.0
tot_credit = 0.0
tot_scredit = 0.0
tot_sdebit = 0.0
tot_enlitige = 0.0
for r in cleanarray:
# For the first element we always add the line
# type = 1 is the line is the first of the account
# type = 2 is an other line of the account
if i==0:
# We add the first as the header
#
##
new_header = {}
new_header['ref'] = ''
new_header['name'] = r['account_name']
new_header['code'] = r['code']
new_header['debit'] = r['debit']
new_header['credit'] = r['credit']
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = r['debit'] - r['credit']
new_header['type'] = 3
##
completearray.append(new_header)
#
r['type'] = 1
r['balance'] = float(r['sdebit']) - float(r['scredit'])
completearray.append(r)
#
tot_debit = r['debit']
tot_credit = r['credit']
tot_scredit = r['scredit']
tot_sdebit = r['sdebit']
tot_enlitige = (r['enlitige'] or 0.0)
#
else:
if cleanarray[i]['account_id'] <> cleanarray[i-1]['account_id']:
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
new_header['type'] = 3
# we reset the counter
tot_debit = r['debit']
tot_credit = r['credit']
tot_scredit = r['scredit']
tot_sdebit = r['sdebit']
tot_enlitige = (r['enlitige'] or 0.0)
#
##
new_header = {}
new_header['ref'] = ''
new_header['name'] = r['account_name']
new_header['code'] = r['code']
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
new_header['type'] = 3
##get_fiscalyear
##
completearray.append(new_header)
##
#
r['type'] = 1
#
r['balance'] = float(r['sdebit']) - float(r['scredit'])
completearray.append(r)
if cleanarray[i]['account_id'] == cleanarray[i-1]['account_id']:
# we reset the counter
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
new_header['type'] = 3
tot_debit = tot_debit + r['debit']
tot_credit = tot_credit + r['credit']
tot_scredit = tot_scredit + r['scredit']
tot_sdebit = tot_sdebit + r['sdebit']
tot_enlitige = tot_enlitige + (r['enlitige'] or 0.0)
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
#
r['type'] = 2
#
r['balance'] = float(r['sdebit']) - float(r['scredit'])
#
completearray.append(r)
i = i + 1
return completearray
def _sum_debit(self):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
if not self.ids:
return 0.0
self.cr.execute(
"SELECT sum(debit) " \
"FROM account_move_line AS l " \
"JOIN account_move am ON (am.id = l.move_id)" \
"WHERE l.account_id IN %s" \
"AND am.state IN %s" \
"AND " + self.query + "",
(tuple(self.account_ids), tuple(move_state)))
temp_res = float(self.cr.fetchone()[0] or 0.0)
return temp_res
def _sum_credit(self):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
if not self.ids:
return 0.0
self.cr.execute(
"SELECT sum(credit) " \
"FROM account_move_line AS l " \
"JOIN account_move am ON (am.id = l.move_id)" \
"WHERE l.account_id IN %s" \
"AND am.state IN %s" \
"AND " + self.query + "",
(tuple(self.account_ids), tuple(move_state)))
temp_res = float(self.cr.fetchone()[0] or 0.0)
return temp_res
def _sum_litige(self):
#gives the total of move lines with blocked boolean set to TRUE for the report selection
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
if not self.ids:
return 0.0
self.cr.execute(
"SELECT sum(debit-credit) " \
"FROM account_move_line AS l " \
"JOIN account_move am ON (am.id = l.move_id)" \
"WHERE l.account_id IN %s" \
"AND am.state IN %s" \
"AND " + self.query + " " \
"AND l.blocked=TRUE ",
(tuple(self.account_ids), tuple(move_state), ))
temp_res = float(self.cr.fetchone()[0] or 0.0)
return temp_res
def _get_partners(self):
if self.result_selection == 'customer':
return _('Receivable Accounts')
elif self.result_selection == 'supplier':
return _('Payable Accounts')
elif self.result_selection == 'customer_supplier':
return _('Receivable and Payable Accounts')
return ''
class report_partnerbalance(osv.AbstractModel):
_name = 'report.account.report_partnerbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_partnerbalance'
_wrapped_report_class = partner_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
repotvsupertuga/tvsupertuga.repository | refs/heads/master | instal/script.module.requests/lib/requests/packages/chardet/latin1prober.py | 1777 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
|
laurenrevere/osf.io | refs/heads/develop | api/actions/urls.py | 3 | from django.conf.urls import url
from . import views
app_name = 'osf'
urlpatterns = [
url(r'^reviews/$', views.ReviewActionListCreate.as_view(), name=views.ReviewActionListCreate.view_name),
url(r'^(?P<action_id>\w+)/$', views.ActionDetail.as_view(), name=views.ActionDetail.view_name),
]
|
pigate/mongo-python-driver | refs/heads/master | gridfs/errors.py | 61 | # Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions raised by the :mod:`gridfs` package"""
from pymongo.errors import PyMongoError
class GridFSError(PyMongoError):
"""Base class for all GridFS exceptions."""
class CorruptGridFile(GridFSError):
"""Raised when a file in :class:`~gridfs.GridFS` is malformed."""
class NoFile(GridFSError):
"""Raised when trying to read from a non-existent file."""
class FileExists(GridFSError):
"""Raised when trying to create a file that already exists."""
|
thanhacun/odoo | refs/heads/8.0 | openerp/tools/appdirs.py | 376 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 3, 0)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by deafult "~/.local/share/<AppName>".
"""
if sys.platform == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
path = os.path.join(path, appauthor, appname)
elif sys.platform == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if sys.platform == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
path = os.path.join(path, appauthor, appname)
elif sys.platform == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep) ]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [ os.sep.join([x, appname]) for x in pathlist ]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by deafult "~/.local/share/<AppName>".
"""
if sys.platform in [ "win32", "darwin" ]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if sys.platform in [ "win32", "darwin" ]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep) ]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [ os.sep.join([x, appname]) for x in pathlist ]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if sys.platform == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
path = os.path.join(path, appauthor, appname)
if opinion:
path = os.path.join(path, "Cache")
elif sys.platform == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only required and used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if sys.platform == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif sys.platform == "win32":
path = user_data_dir(appname, appauthor, version); version=False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version); version=False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None,
roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if sys.platform == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
|
guorendong/iridium-browser-ubuntu | refs/heads/ubuntu/precise | third_party/cython/src/pyximport/pyxbuild.py | 95 | """Build a Pyrex file from .pyx source to .so loadable module using
the installed distutils infrastructure. Call:
out_fname = pyx_to_dll("foo.pyx")
"""
import os
import sys
from distutils.dist import Distribution
from distutils.errors import DistutilsArgError, DistutilsError, CCompilerError
from distutils.extension import Extension
from distutils.util import grok_environment_error
try:
from Cython.Distutils import build_ext
HAS_CYTHON = True
except ImportError:
HAS_CYTHON = False
DEBUG = 0
_reloads={}
def pyx_to_dll(filename, ext = None, force_rebuild = 0,
build_in_temp=False, pyxbuild_dir=None, setup_args={},
reload_support=False, inplace=False):
"""Compile a PYX file to a DLL and return the name of the generated .so
or .dll ."""
assert os.path.exists(filename), "Could not find %s" % os.path.abspath(filename)
path, name = os.path.split(os.path.abspath(filename))
if not ext:
modname, extension = os.path.splitext(name)
assert extension in (".pyx", ".py"), extension
if not HAS_CYTHON:
filename = filename[:-len(extension)] + '.c'
ext = Extension(name=modname, sources=[filename])
if not pyxbuild_dir:
pyxbuild_dir = os.path.join(path, "_pyxbld")
package_base_dir = path
for package_name in ext.name.split('.')[-2::-1]:
package_base_dir, pname = os.path.split(package_base_dir)
if pname != package_name:
# something is wrong - package path doesn't match file path
package_base_dir = None
break
script_args=setup_args.get("script_args",[])
if DEBUG or "--verbose" in script_args:
quiet = "--verbose"
else:
quiet = "--quiet"
args = [quiet, "build_ext"]
if force_rebuild:
args.append("--force")
if inplace and package_base_dir:
args.extend(['--build-lib', package_base_dir])
if ext.name == '__init__' or ext.name.endswith('.__init__'):
# package => provide __path__ early
if not hasattr(ext, 'cython_directives'):
ext.cython_directives = {'set_initial_path' : 'SOURCEFILE'}
elif 'set_initial_path' not in ext.cython_directives:
ext.cython_directives['set_initial_path'] = 'SOURCEFILE'
if HAS_CYTHON and build_in_temp:
args.append("--pyrex-c-in-temp")
sargs = setup_args.copy()
sargs.update(
{"script_name": None,
"script_args": args + script_args} )
dist = Distribution(sargs)
if not dist.ext_modules:
dist.ext_modules = []
dist.ext_modules.append(ext)
if HAS_CYTHON:
dist.cmdclass = {'build_ext': build_ext}
build = dist.get_command_obj('build')
build.build_base = pyxbuild_dir
config_files = dist.find_config_files()
try: config_files.remove('setup.cfg')
except ValueError: pass
dist.parse_config_files(config_files)
cfgfiles = dist.find_config_files()
try: cfgfiles.remove('setup.cfg')
except ValueError: pass
dist.parse_config_files(cfgfiles)
try:
ok = dist.parse_command_line()
except DistutilsArgError:
raise
if DEBUG:
print("options (after parsing command line):")
dist.dump_option_dicts()
assert ok
try:
obj_build_ext = dist.get_command_obj("build_ext")
dist.run_commands()
so_path = obj_build_ext.get_outputs()[0]
if obj_build_ext.inplace:
# Python distutils get_outputs()[ returns a wrong so_path
# when --inplace ; see http://bugs.python.org/issue5977
# workaround:
so_path = os.path.join(os.path.dirname(filename),
os.path.basename(so_path))
if reload_support:
org_path = so_path
timestamp = os.path.getmtime(org_path)
global _reloads
last_timestamp, last_path, count = _reloads.get(org_path, (None,None,0) )
if last_timestamp == timestamp:
so_path = last_path
else:
basename = os.path.basename(org_path)
while count < 100:
count += 1
r_path = os.path.join(obj_build_ext.build_lib,
basename + '.reload%s'%count)
try:
import shutil # late import / reload_support is: debugging
try:
# Try to unlink first --- if the .so file
# is mmapped by another process,
# overwriting its contents corrupts the
# loaded image (on Linux) and crashes the
# other process. On Windows, unlinking an
# open file just fails.
if os.path.isfile(r_path):
os.unlink(r_path)
except OSError:
continue
shutil.copy2(org_path, r_path)
so_path = r_path
except IOError:
continue
break
else:
# used up all 100 slots
raise ImportError("reload count for %s reached maximum"%org_path)
_reloads[org_path]=(timestamp, so_path, count)
return so_path
except KeyboardInterrupt:
sys.exit(1)
except (IOError, os.error):
exc = sys.exc_info()[1]
error = grok_environment_error(exc)
if DEBUG:
sys.stderr.write(error + "\n")
raise
if __name__=="__main__":
pyx_to_dll("dummy.pyx")
import test
|
opendatakosovo/undp-gsc-api | refs/heads/master | ugca/views/groupedanswers.py | 1 | from flask import Response
from flask.views import View
from bson import json_util
from ugca import utils
from ugca import mongo
class GroupedAnswers(View):
def dispatch_request(self, qid, group, disaggregate=None):
'''Get the answers to a question grouped by a given interviewee parameter
:param qid: the question id, i.e. the number of the question.
:param group: which surveyee parameter to group by:
income, gender, municipality , maritalstatus, gender, age, education, region,
ethnicity, employment.position, employment.institution, and employtment.level
:param disaggregate: which surveyee parameter to disaggregate.
sh.: /question/1/group/age/disaggregate/gender
sh.: /question/1/group/employment.level/disaggregate/maritalstatus
'''
result_json = {}
# Figure out the numer of possible answers for the given question.
number_of_answers = utils.get_number_of_answers(qid)
if number_of_answers > 0:
# Build $group JSON
group_json = {}
if disaggregate != None:
group_json["_id"] = {
"group": str("$surveyee." + group),
"disaggregate": str("$surveyee." + disaggregate)
}
else:
group_json["_id"] = str("$surveyee." + group)
for answer_index in range(1, number_of_answers + 1):
question_key = "q" + str(qid) + "a" + str(answer_index)
group_json[question_key] = {
"$sum": "$q" + str(qid) + ".answers.a" + str(answer_index) + ".value"
}
# Build $project JSON object
project_json = {}
for answer_index in range(1, number_of_answers + 1):
question_key = "q" + str(qid)
answer_key = "a" + str(answer_index)
question_key_ref = "$" + question_key + "a" + str(answer_index)
project_json[answer_key] = question_key_ref
# Build $sort JSON object
sort_json = {}
sort_json["_id"] = 1
# Build aggregate JSON
aggregate_json = [
{
"$group": group_json
},
{
"$project": project_json
},
{
"$sort": sort_json
}
]
# Execture aggregate query
response_json = mongo.db.gsc.aggregate(aggregate_json)
answers_json = response_json['result']
result_json = {
"count": {
"answers": number_of_answers,
"group": len(answers_json)
},
"answers": answers_json
}
# Build response object
resp = Response(
response=json_util.dumps(result_json),
mimetype='application/json')
# Return response
return resp
|
lmazuel/azure-sdk-for-python | refs/heads/master | azure-mgmt-rdbms/azure/mgmt/rdbms/mysql/models/firewall_rule_py3.py | 2 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource import ProxyResource
class FirewallRule(ProxyResource):
"""Represents a server firewall rule.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param start_ip_address: Required. The start IP address of the server
firewall rule. Must be IPv4 format.
:type start_ip_address: str
:param end_ip_address: Required. The end IP address of the server firewall
rule. Must be IPv4 format.
:type end_ip_address: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'start_ip_address': {'required': True, 'pattern': r'^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$'},
'end_ip_address': {'required': True, 'pattern': r'^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$'},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'start_ip_address': {'key': 'properties.startIpAddress', 'type': 'str'},
'end_ip_address': {'key': 'properties.endIpAddress', 'type': 'str'},
}
def __init__(self, *, start_ip_address: str, end_ip_address: str, **kwargs) -> None:
super(FirewallRule, self).__init__(, **kwargs)
self.start_ip_address = start_ip_address
self.end_ip_address = end_ip_address
|
Microvellum/Fluid-Designer | refs/heads/master | win64-vc/2.78/python/lib/encodings/hex_codec.py | 202 | """Python 'hex_codec' Codec - 2-digit hex content transfer encoding.
This codec de/encodes from bytes to bytes.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs
import binascii
### Codec APIs
def hex_encode(input, errors='strict'):
assert errors == 'strict'
return (binascii.b2a_hex(input), len(input))
def hex_decode(input, errors='strict'):
assert errors == 'strict'
return (binascii.a2b_hex(input), len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return hex_encode(input, errors)
def decode(self, input, errors='strict'):
return hex_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return binascii.b2a_hex(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return binascii.a2b_hex(input)
class StreamWriter(Codec, codecs.StreamWriter):
charbuffertype = bytes
class StreamReader(Codec, codecs.StreamReader):
charbuffertype = bytes
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='hex',
encode=hex_encode,
decode=hex_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
_is_text_encoding=False,
)
|
adfinis-sygroup/pyaptly | refs/heads/master | pyaptly/version.py | 1 | """Version module to be read from various places"""
__version__ = "1.2.0" # pragma: no cover
|
phenoxim/nova | refs/heads/master | nova/tests/functional/api_sample_tests/test_extension_info.py | 15 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api_sample_tests import api_sample_base
class ExtensionInfoAllSamplesJsonTest(api_sample_base.ApiSampleTestBaseV21):
sample_dir = "extension-info"
def test_list_extensions(self):
response = self._do_get('extensions')
# The full extension list is one of the places that things are
# different between the API versions and the legacy vs. new
# stack. We default to the v2.1 case.
template = 'extensions-list-resp'
if self.api_major_version == 'v2':
template = 'extensions-list-resp-v21-compatible'
self._verify_response(template, {}, response, 200)
class ExtensionInfoSamplesJsonTest(api_sample_base.ApiSampleTestBaseV21):
sample_dir = "extension-info"
def test_get_extensions(self):
response = self._do_get('extensions/os-agents')
self._verify_response('extensions-get-resp', {}, response, 200)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.