repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
navycrow/Sick-Beard
|
refs/heads/development
|
lib/hachoir_parser/misc/msoffice.py
|
90
|
"""
Parsers for the different streams and fragments found in an OLE2 file.
Documents:
- goffice source code
Author: Robert Xiao, Victor Stinner
Creation: 2006-04-23
"""
from lib.hachoir_parser import HachoirParser
from lib.hachoir_core.field import FieldSet, RootSeekableFieldSet, RawBytes
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.stream import StringInputStream
from lib.hachoir_parser.misc.msoffice_summary import SummaryFieldSet, CompObj
from lib.hachoir_parser.misc.word_doc import WordDocumentFieldSet
PROPERTY_NAME = {
u"\5DocumentSummaryInformation": "doc_summary",
u"\5SummaryInformation": "summary",
u"WordDocument": "word_doc",
}
class OfficeRootEntry(HachoirParser, RootSeekableFieldSet):
PARSER_TAGS = {
"description": "Microsoft Office document subfragments",
}
endian = LITTLE_ENDIAN
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
return True
def createFields(self):
for index, property in enumerate(self.ole2.properties):
if index == 0:
continue
try:
name = PROPERTY_NAME[property["name"].value]
except LookupError:
name = property.name+"content"
for field in self.parseProperty(index, property, name):
yield field
def parseProperty(self, property_index, property, name_prefix):
ole2 = self.ole2
if not property["size"].value:
return
if property["size"].value >= ole2["header/threshold"].value:
return
name = "%s[]" % name_prefix
first = None
previous = None
size = 0
start = property["start"].value
chain = ole2.getChain(start, True)
blocksize = ole2.ss_size
desc_format = "Small blocks %s..%s (%s)"
while True:
try:
block = chain.next()
contiguous = False
if not first:
first = block
contiguous = True
if previous and block == (previous+1):
contiguous = True
if contiguous:
previous = block
size += blocksize
continue
except StopIteration:
block = None
self.seekSBlock(first)
desc = desc_format % (first, previous, previous-first+1)
size = min(size, property["size"].value*8)
if name_prefix in ("summary", "doc_summary"):
yield SummaryFieldSet(self, name, desc, size=size)
elif name_prefix == "word_doc":
yield WordDocumentFieldSet(self, name, desc, size=size)
elif property_index == 1:
yield CompObj(self, "comp_obj", desc, size=size)
else:
yield RawBytes(self, name, size//8, desc)
if block is None:
break
first = block
previous = block
size = ole2.sector_size
def seekSBlock(self, block):
self.seekBit(block * self.ole2.ss_size)
class FragmentGroup:
def __init__(self, parser):
self.items = []
self.parser = parser
def add(self, item):
self.items.append(item)
def createInputStream(self):
# FIXME: Use lazy stream creation
data = []
for item in self.items:
data.append( item["rawdata"].value )
data = "".join(data)
# FIXME: Use smarter code to send arguments
args = {"ole2": self.items[0].root}
tags = {"class": self.parser, "args": args}
tags = tags.iteritems()
return StringInputStream(data, "<fragment group>", tags=tags)
class CustomFragment(FieldSet):
def __init__(self, parent, name, size, parser, description=None, group=None):
FieldSet.__init__(self, parent, name, description, size=size)
if not group:
group = FragmentGroup(parser)
self.group = group
self.group.add(self)
def createFields(self):
yield RawBytes(self, "rawdata", self.size//8)
def _createInputStream(self, **args):
return self.group.createInputStream()
|
plotly/python-api
|
refs/heads/master
|
packages/python/plotly/plotly/validators/scatter3d/error_y/__init__.py
|
4
|
import sys
if sys.version_info < (3, 7):
from ._width import WidthValidator
from ._visible import VisibleValidator
from ._valueminus import ValueminusValidator
from ._value import ValueValidator
from ._type import TypeValidator
from ._tracerefminus import TracerefminusValidator
from ._traceref import TracerefValidator
from ._thickness import ThicknessValidator
from ._symmetric import SymmetricValidator
from ._copy_zstyle import Copy_ZstyleValidator
from ._color import ColorValidator
from ._arraysrc import ArraysrcValidator
from ._arrayminussrc import ArrayminussrcValidator
from ._arrayminus import ArrayminusValidator
from ._array import ArrayValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._width.WidthValidator",
"._visible.VisibleValidator",
"._valueminus.ValueminusValidator",
"._value.ValueValidator",
"._type.TypeValidator",
"._tracerefminus.TracerefminusValidator",
"._traceref.TracerefValidator",
"._thickness.ThicknessValidator",
"._symmetric.SymmetricValidator",
"._copy_zstyle.Copy_ZstyleValidator",
"._color.ColorValidator",
"._arraysrc.ArraysrcValidator",
"._arrayminussrc.ArrayminussrcValidator",
"._arrayminus.ArrayminusValidator",
"._array.ArrayValidator",
],
)
|
sbellem/django
|
refs/heads/master
|
django/contrib/gis/utils/wkt.py
|
589
|
"""
Utilities for manipulating Geometry WKT.
"""
from django.utils import six
def precision_wkt(geom, prec):
"""
Returns WKT text of the geometry according to the given precision (an
integer or a string). If the precision is an integer, then the decimal
places of coordinates WKT will be truncated to that number:
>>> from django.contrib.gis.geos import Point
>>> pnt = Point(5, 23)
>>> pnt.wkt
'POINT (5.0000000000000000 23.0000000000000000)'
>>> precision_wkt(pnt, 1)
'POINT (5.0 23.0)'
If the precision is a string, it must be valid Python format string
(e.g., '%20.7f') -- thus, you should know what you're doing.
"""
if isinstance(prec, int):
num_fmt = '%%.%df' % prec
elif isinstance(prec, six.string_types):
num_fmt = prec
else:
raise TypeError
# TODO: Support 3D geometries.
coord_fmt = ' '.join([num_fmt, num_fmt])
def formatted_coords(coords):
return ','.join(coord_fmt % c[:2] for c in coords)
def formatted_poly(poly):
return ','.join('(%s)' % formatted_coords(r) for r in poly)
def formatted_geom(g):
gtype = str(g.geom_type).upper()
yield '%s(' % gtype
if gtype == 'POINT':
yield formatted_coords((g.coords,))
elif gtype in ('LINESTRING', 'LINEARRING'):
yield formatted_coords(g.coords)
elif gtype in ('POLYGON', 'MULTILINESTRING'):
yield formatted_poly(g)
elif gtype == 'MULTIPOINT':
yield formatted_coords(g.coords)
elif gtype == 'MULTIPOLYGON':
yield ','.join('(%s)' % formatted_poly(p) for p in g)
elif gtype == 'GEOMETRYCOLLECTION':
yield ','.join(''.join(wkt for wkt in formatted_geom(child)) for child in g)
else:
raise TypeError
yield ')'
return ''.join(wkt for wkt in formatted_geom(geom))
|
krisb78/py-couchdb
|
refs/heads/master
|
pycouchdb/__init__.py
|
3
|
# -*- coding: utf-8 -*-
__author__ = "Andrey Antukh"
__license__ = "BSD"
__version__ = "1.12"
__maintainer__ = "Rinat Sabitov"
__email__ = "rinat.sabitov@gmail.com"
__status__ = "Development"
from .client import Server
|
tlatzko/spmcluster
|
refs/heads/master
|
.tox/2.6-cover/lib/python2.6/site-packages/nose/sphinx/__init__.py
|
1307
|
pass
|
MarkWh1te/xueqiu_predict
|
refs/heads/master
|
python3_env/lib/python3.4/site-packages/sqlalchemy/orm/scoping.py
|
55
|
# orm/scoping.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import exc as sa_exc
from ..util import ScopedRegistry, ThreadLocalRegistry, warn
from . import class_mapper, exc as orm_exc
from .session import Session
__all__ = ['scoped_session']
class scoped_session(object):
"""Provides scoped management of :class:`.Session` objects.
See :ref:`unitofwork_contextual` for a tutorial.
"""
session_factory = None
"""The `session_factory` provided to `__init__` is stored in this
attribute and may be accessed at a later time. This can be useful when
a new non-scoped :class:`.Session` or :class:`.Connection` to the
database is needed."""
def __init__(self, session_factory, scopefunc=None):
"""Construct a new :class:`.scoped_session`.
:param session_factory: a factory to create new :class:`.Session`
instances. This is usually, but not necessarily, an instance
of :class:`.sessionmaker`.
:param scopefunc: optional function which defines
the current scope. If not passed, the :class:`.scoped_session`
object assumes "thread-local" scope, and will use
a Python ``threading.local()`` in order to maintain the current
:class:`.Session`. If passed, the function should return
a hashable token; this token will be used as the key in a
dictionary in order to store and retrieve the current
:class:`.Session`.
"""
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
def __call__(self, **kw):
"""Return the current :class:`.Session`, creating it
using the :attr:`.scoped_session.session_factory` if not present.
:param \**kw: Keyword arguments will be passed to the
:attr:`.scoped_session.session_factory` callable, if an existing
:class:`.Session` is not present. If the :class:`.Session` is present
and keyword arguments have been passed,
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if kw:
scope = kw.pop('scope', False)
if scope is not None:
if self.registry.has():
raise sa_exc.InvalidRequestError(
"Scoped session is already present; "
"no new arguments may be specified.")
else:
sess = self.session_factory(**kw)
self.registry.set(sess)
return sess
else:
return self.session_factory(**kw)
else:
return self.registry()
def remove(self):
"""Dispose of the current :class:`.Session`, if present.
This will first call :meth:`.Session.close` method
on the current :class:`.Session`, which releases any existing
transactional/connection resources still being held; transactions
specifically are rolled back. The :class:`.Session` is then
discarded. Upon next usage within the same scope,
the :class:`.scoped_session` will produce a new
:class:`.Session` object.
"""
if self.registry.has():
self.registry().close()
self.registry.clear()
def configure(self, **kwargs):
"""reconfigure the :class:`.sessionmaker` used by this
:class:`.scoped_session`.
See :meth:`.sessionmaker.configure`.
"""
if self.registry.has():
warn('At least one scoped session is already present. '
' configure() can not affect sessions that have '
'already been created.')
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a :class:`.Query` object
against the class and the current :class:`.Session` when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass(object):
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query(object):
def __get__(s, instance, owner):
try:
mapper = class_mapper(owner)
if mapper:
if query_cls:
# custom query class
return query_cls(mapper, session=self.registry())
else:
# session's configured query class
return self.registry().query(mapper)
except orm_exc.UnmappedClassError:
return None
return query()
ScopedSession = scoped_session
"""Old name for backwards compatibility."""
def instrument(name):
def do(self, *args, **kwargs):
return getattr(self.registry(), name)(*args, **kwargs)
return do
for meth in Session.public_methods:
setattr(scoped_session, meth, instrument(meth))
def makeprop(name):
def set(self, attr):
setattr(self.registry(), name, attr)
def get(self):
return getattr(self.registry(), name)
return property(get, set)
for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map',
'is_active', 'autoflush', 'no_autoflush', 'info'):
setattr(scoped_session, prop, makeprop(prop))
def clslevel(name):
def do(cls, *args, **kwargs):
return getattr(Session, name)(*args, **kwargs)
return classmethod(do)
for prop in ('close_all', 'object_session', 'identity_key'):
setattr(scoped_session, prop, clslevel(prop))
|
daodaoliang/python-phonenumbers
|
refs/heads/dev
|
python/phonenumbers/shortdata/region_CX.py
|
11
|
"""Auto-generated file, do not edit by hand. CX metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_CX = PhoneMetadata(id='CX', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[01]\\d{2}', possible_number_pattern='\\d{3}'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='000|112', possible_number_pattern='\\d{3}', example_number='112'),
short_code=PhoneNumberDesc(national_number_pattern='000|112', possible_number_pattern='\\d{3}', example_number='112'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_data=True)
|
0Knowledge/googletest
|
refs/heads/master
|
test/gtest_xml_output_unittest.py
|
1815
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
|
geekboxzone/lollipop_external_chromium_org_testing_gtest
|
refs/heads/geekbox
|
test/gtest_xml_output_unittest.py
|
1815
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
|
Xaxetrov/OSCAR
|
refs/heads/master
|
oscar/agent/learning_agent.py
|
1
|
from oscar.agent.learning_structure import LearningStructure
from oscar.agent.custom_agent import CustomAgent
class LearningAgent(LearningStructure, CustomAgent):
"""
An abstract class to build learning agent
Sub class must implement the following LearningStructure methods:
- _step
- _format_observation
- _transform_action
(see LearningStructure code for more information on them)
"""
def __init__(self, train_mode=False, shared_memory=None):
"""
Constructor of the abstract class LearningAgent
Sub class must keep the same calling format to work with the hierarchy factory
self.observation_space and self.action_space must be set before calling this constructor
:param train_mode: if the agent must train or play (default: False -> play)
:param shared_memory: the memory used during training to communicate with the environment
(useless when playing, indispensable when training)
"""
LearningStructure.__init__(self, train_mode, shared_memory)
self.failed_meta_action_counter = 0
self.episode_steps = 0
def step(self, obs, locked_choice=None):
self.episode_steps += 1
return super().step(obs, locked_choice)
def _learning_step(self, obs):
result = super()._learning_step(obs)
result["failure_callback"] = self.failure_callback
return result
def failure_callback(self):
self.failed_meta_action_counter += 1
def reset(self):
super().reset()
print("Failed meta action :", self.failed_meta_action_counter)
self.failed_meta_action_counter = 0
self.episode_steps = 0
|
xsynergy510x/android_external_chromium_org
|
refs/heads/cm-12.1
|
chrome/test/chromeos/autotest/files/client/deps/chrome_test/common.py
|
187
|
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os, sys
dirname = os.path.dirname(sys.modules[__name__].__file__)
client_dir = os.path.abspath(os.path.join(dirname, "../../"))
sys.path.insert(0, client_dir)
import setup_modules
sys.path.pop(0)
setup_modules.setup(base_path=client_dir,
root_module_name="autotest_lib.client")
|
ReganBell/QReview
|
refs/heads/master
|
examples/graph/atlas.py
|
54
|
#!/usr/bin/env python
"""
Atlas of all graphs of 6 nodes or less.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2004 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.generators.atlas import *
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
import random
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas=graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U=nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree=[n for n in G if G.degree(n)==0]
for n in zerodegree:
G.remove_node(n)
U=nx.disjoint_union(U,G)
# list of graphs of all connected components
C=nx.connected_component_subgraphs(U)
UU=nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist=[] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G,nlist):
nlist.append(G)
UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1,G2):
return True
return False
if __name__ == '__main__':
import networkx as nx
G=atlas6()
print("graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
print(nx.number_connected_components(G),"connected components")
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot")
import matplotlib.pyplot as plt
plt.figure(1,figsize=(8,8))
# layout graphs with positions using graphviz neato
pos=nx.graphviz_layout(G,prog="neato")
# color nodes the same in each connected subgraph
C=nx.connected_component_subgraphs(G)
for g in C:
c=[random.random()]*nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.savefig("atlas.png",dpi=75)
|
xiaonanln/myleetcode-python
|
refs/heads/master
|
src/684. Redundant Connection.py
|
1
|
class UF(object):
def __init__(self, N):
self.list = [i for i in xrange(N+1)] # every V points to self
def connect(self, u, v):
id1 = self.id(u)
id2 = self.id(v)
self.list[id2] = id1
def id(self, v):
pv = self.list[v]
if pv == v:
return v
res = self.list[v] = self.id(pv)
return res
class Solution(object):
def findRedundantConnection(self, edges):
"""
:type edges: List[List[int]]
:rtype: List[int]
"""
N = max(v for u, v in edges)
# print 'N', N
uf = UF(N)
for u, v in edges:
if uf.id(u) != uf.id(v):
uf.connect(u, v)
else:
return [u, v]
print Solution().findRedundantConnection([[1,2], [2,3], [3,4], [1,4], [1,5]])
print Solution().findRedundantConnection([[1,2], [1,3], [2,3]])
|
christianurich/VIBe2UrbanSim
|
refs/heads/master
|
3rdparty/opus/src/biocomplexity/land_cover/dmu.py
|
2
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from numpy import greater_equal, less_equal
from opus_core.variables.variable import Variable
from biocomplexity.land_cover.variable_functions import my_attribute_label
class dmu(Variable):
"""Urbansim development type mixed-use, where(devtype==[9|10|11|12|13|14|15|16])"""
development_type_id = 'devt'
def dependencies(self):
return [my_attribute_label(self.development_type_id)]
def compute(self, dataset_pool):
devt = self.get_dataset().get_attribute(name=self.development_type_id)
return greater_equal(devt, 9) & less_equal(devt, 16)
from opus_core.tests import opus_unittest
from biocomplexity.variable_test_toolbox import VariableTestToolbox
from numpy import array
from numpy import ma
from biocomplexity.tests.expected_data_test import ExpectedDataTest
#class Tests(opus_unittest.OpusTestCase):
class Tests(ExpectedDataTest):
variable_name = "biocomplexity.land_cover.dmu"
def test_my_inputs(self):
values = VariableTestToolbox().compute_variable(self.variable_name,
{"land_cover":{
"devt": array([1, 10, 21, 12, 16, 9, 8, 17, 15])}},
dataset = "land_cover")
should_be = array([0, 1, 0, 1, 1, 1, 0, 0, 1])
self.assert_(ma.allequal(values, should_be),
msg = "Error in " + self.variable_name)
def test_tree(self):
values = VariableTestToolbox().compute_variable(self.variable_name,
{"land_cover":{
"lct":array([1, 2, 3]),
"devgrid_id":array([1, 1, 2])},
"gridcell":{
"grid_id": array([1, 2, 3]),
"development_type_id": array([10, 5, 3])}},
dataset = "land_cover")
should_be = array([1, 1, 0])
self.assert_(ma.allequal(values, should_be),
msg = "Error in " + self.variable_name)
if __name__ == "__main__":
opus_unittest.main()
|
CLOSER-Cohorts/us2caddies
|
refs/heads/master
|
us2caddies/caddies/objects/response_domain_text.py
|
1
|
__author__ = 'pwidqssg'
from base import CaddiesObject
class ResponseDomainText(CaddiesObject):
def __init__(self, id, maxlen = None, label = ''):
self.id = id
self.maxlen = maxlen
self.label = label
|
WilliamMayor/nics
|
refs/heads/master
|
tests/test_parser.py
|
1
|
from datetime import date
from datetime import datetime
from datetime import timedelta
from datetime import time
import pytz
from nics import parse
def test_can_parse_simple_lines():
assert ['first', 'second'] == list(parse.text_into_lines('first\r\nsecond'))
def test_can_parse_folded_lines_with_space():
assert ['first second'] == list(parse.text_into_lines('first\r\n second'))
def test_can_parse_folded_lines_with_tab():
assert ['first second'] == list(parse.text_into_lines('first\r\n\t second'))
def test_can_parse_multiple_folded_lines():
assert ['first second third'] == list(parse.text_into_lines('first\r\n second\r\n third'))
def test_replace_single_quoted():
assert 'A{1}C', ['B'] == parse.replace_quoted('A"B"C')
def test_replace_multiple_quoted():
assert 'A{1}C{2}', ['B', 'D'] == parse.replace_quoted('A"B"C"D"')
def test_can_use_replaced_quotes():
text, quoted = parse.replace_quoted('A"B"C"D"')
assert 'CD' == 'C{1}'.format(*quoted)
def test_can_parse_simple_content():
assert [('NAME', {}, 'VALUE')] == list(parse.lines_into_content(['NAME:VALUE']))
def test_can_parse_content_with_single_param():
assert [('NAME', {'FOO': ['BAR']}, 'VALUE')] == list(parse.lines_into_content(['NAME;FOO=BAR:VALUE']))
def test_can_parse_content_with_single_param_multiple_values():
assert [('NAME', {'FOO': ['BAR', 'BAR2']}, 'VALUE')] == list(parse.lines_into_content(['NAME;FOO=BAR,BAR2:VALUE']))
def test_can_parse_content_with_multiple_params():
assert [('NAME', {'FOO': ['BAR'], 'FOO2': ['BAR2']}, 'VALUE')] == list(parse.lines_into_content(['NAME;FOO=BAR;FOO2=BAR2:VALUE']))
def test_can_parse_content_with_quoted_param():
assert [('NAME', {'FOO': ['BAR;FOO2=BAR2']}, 'VALUE')] == list(parse.lines_into_content(['NAME;FOO="BAR;FOO2=BAR2":VALUE']))
def test_parse_boolean_values():
assert [('NAME', {'VALUE': ['BOOLEAN']}, [True])] == list(parse.lines_into_content(['NAME;VALUE=BOOLEAN:TRUE']))
assert [('NAME', {'VALUE': ['BOOLEAN']}, [False])] == list(parse.lines_into_content(['NAME;VALUE=BOOLEAN:FALSE']))
assert [('NAME', {'VALUE': ['BOOLEAN']}, [True])] == list(parse.lines_into_content(['NAME;VALUE=BOOLEAN:true']))
assert [('NAME', {'VALUE': ['BOOLEAN']}, [False])] == list(parse.lines_into_content(['NAME;VALUE=BOOLEAN:false']))
def test_parse_date_values():
assert [('NAME', {'VALUE': ['DATE']}, [date(2015, 1, 1)])] == list(parse.lines_into_content(['NAME;VALUE=DATE:20150101']))
assert [('NAME', {'VALUE': ['DATE']}, [date(2015, 1, 1), date(2015, 1, 2)])] == list(parse.lines_into_content(['NAME;VALUE=DATE:20150101,20150102']))
def test_parse_datetime_values():
assert [('NAME', {'VALUE': ['DATE-TIME']}, [datetime(2015, 1, 1, 10, 0, 0)])] == list(parse.lines_into_content(['NAME;VALUE=DATE-TIME:20150101T100000']))
assert [('NAME', {'VALUE': ['DATE-TIME']}, [datetime(2015, 1, 1, 10, 0, 0), datetime(2015, 1, 2, 10, 0, 0)])] == list(parse.lines_into_content(['NAME;VALUE=DATE-TIME:20150101T100000,20150102T100000']))
def test_parse_datetime_value_in_utc():
assert [('NAME', {'VALUE': ['DATE-TIME']}, [datetime(2015, 1, 1, 10, 0, 0, tzinfo=pytz.utc)])] == list(parse.lines_into_content(['NAME;VALUE=DATE-TIME:20150101T100000Z']))
def test_parse_duration_values():
assert [('NAME', {'VALUE': ['DURATION']}, [timedelta(days=1)])] == list(parse.lines_into_content(['NAME;VALUE=DURATION:P1D']))
assert [('NAME', {'VALUE': ['DURATION']}, [timedelta(days=1), timedelta(days=2)])] == list(parse.lines_into_content(['NAME;VALUE=DURATION:P1D,P2D']))
def test_parse_duration_negative():
assert [('NAME', {'VALUE': ['DURATION']}, [timedelta(days=-1)])] == list(parse.lines_into_content(['NAME;VALUE=DURATION:-P1D']))
def test_parse_duration_positive():
assert [('NAME', {'VALUE': ['DURATION']}, [timedelta(days=1)])] == list(parse.lines_into_content(['NAME;VALUE=DURATION:+P1D']))
def test_parse_duration_day_and_time():
assert [('NAME', {'VALUE': ['DURATION']}, [timedelta(days=1, hours=2, minutes=3, seconds=4)])] == list(parse.lines_into_content(['NAME;VALUE=DURATION:P1DT2H3M4S']))
def test_parse_duration_time():
assert [('NAME', {'VALUE': ['DURATION']}, [timedelta(hours=2, minutes=3, seconds=4)])] == list(parse.lines_into_content(['NAME;VALUE=DURATION:PT2H3M4S']))
def test_parse_duration_weeks():
assert [('NAME', {'VALUE': ['DURATION']}, [timedelta(days=14)])] == list(parse.lines_into_content(['NAME;VALUE=DURATION:P2W']))
def test_parse_float_values():
assert [('NAME', {'VALUE': ['FLOAT']}, [0.1])] == list(parse.lines_into_content(['NAME;VALUE=FLOAT:0.1']))
assert [('NAME', {'VALUE': ['FLOAT']}, [0.1])] == list(parse.lines_into_content(['NAME;VALUE=FLOAT:+0.1']))
assert [('NAME', {'VALUE': ['FLOAT']}, [-0.1])] == list(parse.lines_into_content(['NAME;VALUE=FLOAT:-0.1']))
assert [('NAME', {'VALUE': ['FLOAT']}, [1.0])] == list(parse.lines_into_content(['NAME;VALUE=FLOAT:1']))
assert [('NAME', {'VALUE': ['FLOAT']}, [0.1, 0.2])] == list(parse.lines_into_content(['NAME;VALUE=FLOAT:0.1,0.2']))
def test_parse_integer_values():
assert [('NAME', {'VALUE': ['INTEGER']}, [1])] == list(parse.lines_into_content(['NAME;VALUE=INTEGER:1']))
assert [('NAME', {'VALUE': ['INTEGER']}, [1])] == list(parse.lines_into_content(['NAME;VALUE=INTEGER:+1']))
assert [('NAME', {'VALUE': ['INTEGER']}, [-1])] == list(parse.lines_into_content(['NAME;VALUE=INTEGER:-1']))
assert [('NAME', {'VALUE': ['INTEGER']}, [1, 2])] == list(parse.lines_into_content(['NAME;VALUE=INTEGER:1,2']))
def test_parse_text_values():
assert [('NAME', {'VALUE': ['TEXT']}, ['foo'])] == list(parse.lines_into_content(['NAME;VALUE=TEXT:foo']))
assert [('NAME', {'VALUE': ['TEXT']}, ['foo', 'bar'])] == list(parse.lines_into_content(['NAME;VALUE=TEXT:foo,bar']))
def test_parse_text_with_newlines():
assert [('NAME', {'VALUE': ['TEXT']}, ['foo\nbar'])] == list(parse.lines_into_content(['NAME;VALUE=TEXT:foo\\nbar']))
def test_parse_text_with_commas():
assert [('NAME', {'VALUE': ['TEXT']}, ['foo, bar'])] == list(parse.lines_into_content(['NAME;VALUE=TEXT:foo\\, bar']))
def test_parse_text_with_backslashes():
assert [('NAME', {'VALUE': ['TEXT']}, ['foo\\bar'])] == list(parse.lines_into_content(['NAME;VALUE=TEXT:foo\\\\bar']))
def test_parse_text_with_semi_colons():
assert [('NAME', {'VALUE': ['TEXT']}, ['foo;bar'])] == list(parse.lines_into_content(['NAME;VALUE=TEXT:foo\\;bar']))
def test_parse_time_values():
assert [('NAME', {'VALUE': ['TIME']}, [time(10, 0, 0)])] == list(parse.lines_into_content(['NAME;VALUE=TIME:100000']))
assert [('NAME', {'VALUE': ['TIME']}, [time(10, 0, 0), time(10, 0, 0)])] == list(parse.lines_into_content(['NAME;VALUE=TIME:100000,100000']))
def test_parse_time_value_in_utc():
assert [('NAME', {'VALUE': ['TIME']}, [time(10, 0, 0, tzinfo=pytz.utc)])] == list(parse.lines_into_content(['NAME;VALUE=TIME:100000Z']))
def test_parse_utc_offset():
assert [('NAME', {'VALUE': ['UTC-OFFSET']}, [pytz.FixedOffset(60)])] == list(parse.lines_into_content(['NAME;VALUE=UTC-OFFSET:0100']))
assert [('NAME', {'VALUE': ['UTC-OFFSET']}, [pytz.FixedOffset(60)])] == list(parse.lines_into_content(['NAME;VALUE=UTC-OFFSET:+0100']))
assert [('NAME', {'VALUE': ['UTC-OFFSET']}, [pytz.FixedOffset(-60)])] == list(parse.lines_into_content(['NAME;VALUE=UTC-OFFSET:-0100']))
assert [('NAME', {'VALUE': ['UTC-OFFSET']}, [pytz.FixedOffset(90)])] == list(parse.lines_into_content(['NAME;VALUE=UTC-OFFSET:0130']))
|
x303597316/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/queries/tests.py
|
44
|
from __future__ import absolute_import,unicode_literals
import datetime
from operator import attrgetter
import pickle
import sys
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import DatabaseError, connection, connections, DEFAULT_DB_ALIAS
from django.db.models import Count, F, Q
from django.db.models.sql.where import WhereNode, EverythingNode, NothingNode
from django.db.models.sql.datastructures import EmptyResultSet
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import str_prefix
from django.utils import six
from django.utils import unittest
from django.utils.datastructures import SortedDict
from .models import (
Annotation, Article, Author, Celebrity, Child, Cover, Detail, DumbCategory,
ExtraInfo, Fan, Item, LeafA, Join, LeafB, LoopX, LoopZ, ManagedModel,
Member, NamedCategory, Note, Number, Plaything, PointerA, Ranking, Related,
Report, ReservedName, Tag, TvChef, Valid, X, Food, Eaten, Node, ObjectA,
ObjectB, ObjectC, CategoryItem, SimpleCategory, SpecialCategory,
OneToOneCategory, NullableName, ProxyCategory, SingleObject, RelatedObject,
ModelA, ModelB, ModelC, ModelD, Responsibility, Job, JobResponsibilities,
BaseA, FK1, Identifier, Program, Channel, Page, Paragraph, Chapter, Book,
MyObject, Order, OrderItem, Task, Staff, StaffUser, Ticket21203Parent,
Ticket21203Child, Classroom, School, Student)
class BaseQuerysetTest(TestCase):
def assertValueQuerysetEqual(self, qs, values):
return self.assertQuerysetEqual(qs, values, transform=lambda x: x)
class Queries1Tests(BaseQuerysetTest):
def setUp(self):
generic = NamedCategory.objects.create(name="Generic")
self.t1 = Tag.objects.create(name='t1', category=generic)
self.t2 = Tag.objects.create(name='t2', parent=self.t1, category=generic)
self.t3 = Tag.objects.create(name='t3', parent=self.t1)
t4 = Tag.objects.create(name='t4', parent=self.t3)
self.t5 = Tag.objects.create(name='t5', parent=self.t3)
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
self.n3 = Note.objects.create(note='n3', misc='foo', id=3)
ann1 = Annotation.objects.create(name='a1', tag=self.t1)
ann1.notes.add(self.n1)
ann2 = Annotation.objects.create(name='a2', tag=t4)
ann2.notes.add(n2, self.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
self.e2 = ExtraInfo.objects.create(info='e2', note=n2, value=41)
e1 = ExtraInfo.objects.create(info='e1', note=self.n1, value=42)
self.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
self.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=self.e2)
self.a4 = Author.objects.create(name='a4', num=4004, extra=self.e2)
self.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
self.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
self.i1 = Item.objects.create(name='one', created=self.time1, modified=self.time1, creator=self.a1, note=self.n3)
self.i1.tags = [self.t1, self.t2]
self.i2 = Item.objects.create(name='two', created=self.time2, creator=self.a2, note=n2)
self.i2.tags = [self.t1, self.t3]
self.i3 = Item.objects.create(name='three', created=time3, creator=self.a2, note=self.n3)
i4 = Item.objects.create(name='four', created=time4, creator=self.a4, note=self.n3)
i4.tags = [t4]
self.r1 = Report.objects.create(name='r1', creator=self.a1)
Report.objects.create(name='r2', creator=a3)
Report.objects.create(name='r3')
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
self.rank1 = Ranking.objects.create(rank=2, author=self.a2)
Cover.objects.create(title="first", item=i4)
Cover.objects.create(title="second", item=self.i2)
def test_ticket1050(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=True),
['<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__id__isnull=True),
['<Item: three>']
)
def test_ticket1801(self):
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i3),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
['<Author: a2>']
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertTrue(query.LOUTER not in [x[2] for x in query.alias_map.values()])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred')|Q(tags=self.t2)),
['<Item: one>']
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)),
[]
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred')|Q(tags=self.t2)),
[]
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertQuerysetEqual(list(qs), ['<Author: a2>'])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertQuerysetEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
['<Item: two>']
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3],
['<Item: one>', '<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3],
['<Item: one>', '<Item: two>']
)
def test_tickets_2080_3592(self):
self.assertQuerysetEqual(
Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='one') | Q(name='a3')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(name='a3') | Q(item__name='one')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='three') | Q(report__name='r3')),
['<Author: a2>']
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertQuerysetEqual(
Author.objects.filter(Q(id__in=[])|Q(id__in=[])),
[]
)
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values('creator').distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name='two').values('creator', 'name').distinct().count(),
4
)
self.assertEqual(
Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name', 'foo').distinct().count(),
4
)
self.assertEqual(
Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name').distinct().count(),
4
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values('creator', 'name').count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
self.assertQuerysetEqual(
q1,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(q2, ['<Item: one>'])
self.assertQuerysetEqual(
(q1 | q2).order_by('name'),
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>'])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertQuerysetEqual(
((q1 & q2) | q3).order_by('name'),
['<Item: four>', '<Item: one>']
)
def test_order_by_tables(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
combined_query = (q1 & q2).order_by('name').query
self.assertEqual(len([
t for t in combined_query.tables if combined_query.alias_refcount[t]
]), 1)
def test_order_by_join_unref(self):
"""
This test is related to the above one, testing that there aren't
old JOINs in the query.
"""
qs = Celebrity.objects.order_by('greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(qs.query))
qs = qs.order_by('id')
self.assertNotIn('OUTER JOIN', str(qs.query))
def test_tickets_4088_4306(self):
self.assertQuerysetEqual(
Report.objects.filter(creator=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__num=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), [])
self.assertQuerysetEqual(
Report.objects.filter(creator__id=self.a1.id),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__name='a1'),
['<Report: r1>']
)
def test_ticket4510(self):
self.assertQuerysetEqual(
Author.objects.filter(report__name='r1'),
['<Author: a1>']
)
def test_ticket7378(self):
self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>'])
def test_tickets_5324_6704(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__name='t4'),
['<Item: four>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct(),
['<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(),
['<Item: two>', '<Item: three>', '<Item: one>']
)
self.assertQuerysetEqual(
Author.objects.exclude(item__name='one').distinct().order_by('name'),
['<Author: a2>', '<Author: a3>', '<Author: a4>']
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'),
['<Item: three>']
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertTrue(query.LOUTER not in [x[2] for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1)|Q(item__note=self.n3))
self.assertEqual(
len([x[2] for x in qs.query.alias_map.values() if x[2] == query.LOUTER and qs.query.alias_refcount[x[1]]]),
1
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertQuerysetEqual(
Tag.objects.filter(parent__isnull=True).order_by('name'),
['<Tag: t1>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__isnull=True).order_by('name'),
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
def test_ticket2091(self):
t = Tag.objects.get(name='t4')
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[t]),
['<Item: four>']
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
self.assertRaisesMessage(
AssertionError,
'Cannot combine queries on two different base models.',
lambda: Author.objects.all() & Tag.objects.all()
)
self.assertRaisesMessage(
AssertionError,
'Cannot combine queries on two different base models.',
lambda: Author.objects.all() | Tag.objects.all()
)
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4)
self.assertEqual(
Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(),
4
)
def test_ticket2400(self):
self.assertQuerysetEqual(
Author.objects.filter(item__isnull=True),
['<Author: a3>']
)
self.assertQuerysetEqual(
Tag.objects.filter(item__isnull=True),
['<Tag: t5>']
)
def test_ticket2496(self):
self.assertQuerysetEqual(
Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1],
['<Item: four>']
)
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertQuerysetEqual(
Item.objects.order_by('note__note', 'name'),
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertQuerysetEqual(
Author.objects.order_by('extra', '-name'),
['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>']
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertQuerysetEqual(
Cover.objects.all(),
['<Cover: first>', '<Cover: second>']
)
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertQuerysetEqual(
Item.objects.order_by('creator', 'name'),
['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>']
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=False).order_by('tags', 'id'),
['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>']
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by('name')
self.assertQuerysetEqual(
qs,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertEqual(len(qs.query.tables), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by('note__note', 'name')
self.assertQuerysetEqual(
qs,
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertTrue(repr(qs[0].note), '<Note: n2>')
self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>')
def test_ticket3037(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(creator__name='a3', name='two')|Q(creator__name='a4', name='four')),
['<Item: four>']
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertValueQuerysetEqual(
Note.objects.values('misc').distinct().order_by('note', '-misc'),
[{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}]
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertTrue('note_id' in ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertValueQuerysetEqual(
ExtraInfo.objects.values('note_id'),
[{'note_id': 1}, {'note_id': 2}]
)
# ...or use the field name.
self.assertValueQuerysetEqual(
ExtraInfo.objects.values('note'),
[{'note': 1}, {'note': 2}]
)
def test_ticket2902(self):
# Parameters can be given to extra_select, *if* you use a SortedDict.
# (First we need to know which order the keys fall in "naturally" on
# your system, so we can put things in the wrong way around from
# normal. A normal dict would thus fail.)
s = [('a', '%s'), ('b', '%s')]
params = ['one', 'two']
if {'a': 1, 'b': 2}.keys() == ['a', 'b']:
s.reverse()
params.reverse()
# This slightly odd comparison works around the fact that PostgreSQL will
# return 'one' and 'two' as strings, not Unicode objects. It's a side-effect of
# using constants here and not a real concern.
d = Item.objects.extra(select=SortedDict(s), select_params=params).values('a', 'b')[0]
self.assertEqual(d, {'a': 'one', 'b': 'two'})
# Order by the number of tags attached to an item.
l = Item.objects.extra(select={'count': 'select count(*) from queries_item_tags where queries_item_tags.item_id = queries_item.id'}).order_by('-count')
self.assertEqual([o.count for o in l], [2, 2, 1, 0])
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertQuerysetEqual(
Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1)|Q(item__note=self.n3)),
['<Author: a1>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(extra__note=self.n1)|Q(item__note=self.n3)).filter(id=self.a1.id),
['<Author: a1>']
)
def test_ticket6981(self):
self.assertQuerysetEqual(
Tag.objects.select_related('parent').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket9926(self):
self.assertQuerysetEqual(
Tag.objects.select_related("parent", "category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.select_related('parent', "parent__category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.datetimes('created', 'month').count(), 1)
self.assertEqual(Item.objects.datetimes('created', 'day').count(), 2)
self.assertEqual(len(Item.objects.datetimes('created', 'day')), 2)
self.assertEqual(Item.objects.datetimes('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0))
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(select={'a': 1}),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(select={'a': 1}).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
name="one"
self.assertQuerysetEqual(
Item.objects.datetimes('created', 'day').extra(where=['name=%s'], params=[name]),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(where=['name=%s'], params=[name]).datetimes('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7155(self):
# Nullable dates
self.assertQuerysetEqual(
Item.objects.datetimes('modified', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7098(self):
# Make sure semi-deprecated ordering by related models syntax still
# works.
self.assertValueQuerysetEqual(
Item.objects.values('note__note').order_by('queries_note.note', 'id'),
[{'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}]
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertQuerysetEqual(
Tag.objects.filter(parent=self.t1, name='t3').order_by('name'),
['<Tag: t3>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
# More twisted cases, involving nested negations.
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one')),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'),
['<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'),
['<Item: four>', '<Item: one>', '<Item: three>']
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(
query2.get_compiler(qs.db).as_sql()[0],
query
)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer('name', 'creator')
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertQuerysetEqual(
self.n1.annotation_set.filter(Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5)),
['<Annotation: a1>']
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertQuerysetEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
['<Item: one>', '<Item: two>']
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
Eaten.objects.create(meal='m')
q = Eaten.objects.none()
with self.assertNumQueries(0):
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(meal='m'), [])
self.assertQuerysetEqual(q.exclude(meal='m'), [])
self.assertQuerysetEqual(q.complex_filter({'pk': 1}), [])
self.assertQuerysetEqual(q.select_related('food'), [])
self.assertQuerysetEqual(q.annotate(Count('food')), [])
self.assertQuerysetEqual(q.order_by('meal', 'food'), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(
q.extra(select={'foo': "1"}),
[]
)
q.query.low_mark = 1
self.assertRaisesMessage(
AssertionError,
'Cannot change a query once a slice has been taken',
q.extra, select={'foo': "1"}
)
self.assertQuerysetEqual(q.reverse(), [])
self.assertQuerysetEqual(q.defer('meal'), [])
self.assertQuerysetEqual(q.only('meal'), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(
len(Note.objects.order_by('extrainfo__info').distinct()),
3
)
# Pickling of DateQuerySets used to fail
qs = Item.objects.datetimes('created', 'month')
_ = pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertQuerysetEqual(
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')),
['<Tag: t2>', '<Tag: t3>']
)
# Multi-valued values() and values_list() querysets should raise errors.
self.assertRaisesMessage(
TypeError,
'Cannot use a multi-field ValuesQuerySet as a filter value.',
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id'))
)
self.assertRaisesMessage(
TypeError,
'Cannot use a multi-field ValuesListQuerySet as a filter value.',
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id'))
)
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertValueQuerysetEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{'id': 1}, {'id': 2}, {'id': 3}]
)
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')),
['<Annotation: a1>']
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
for i in [n_obj.pk]:
yield i
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>'])
# The subquery result cache should not be populated
self.assertTrue(subq._result_cache is None)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>'])
# The subquery result cache should not be populated
self.assertTrue(subq._result_cache is None)
subq = Author.objects.filter(num__lt=3000)
self.assertQuerysetEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name='a1')),
['<Author: a1>']
)
# The subquery result cache should not be populated
self.assertTrue(subq._result_cache is None)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertQuerysetEqual(
Item.objects.exclude(modified=self.time1).order_by('name'),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__name=self.t1.name),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket7181(self):
# Ordering by related tables should accomodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by('parent__name')), 5)
# Empty querysets can be merged with others.
self.assertQuerysetEqual(
Note.objects.none() | Note.objects.all(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(
Note.objects.all() | Note.objects.none(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), [])
self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket9411(self):
# Make sure bump_prefix() (an internal Query method) doesn't (re-)break. It's
# sufficient that this query runs without error.
qs = Tag.objects.values_list('id', flat=True).order_by('id')
qs.query.bump_prefix()
first = qs[0]
self.assertEqual(list(qs), list(range(first, first+5)))
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertQuerysetEqual(
Author.objects.filter(Q(item__note__extrainfo=self.e2)|Q(report=self.r1, name='xyz')),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(report=self.r1, name='xyz')|Q(item__note__extrainfo=self.e2)),
['<Author: a2>']
)
self.assertQuerysetEqual(
Annotation.objects.filter(Q(tag__parent=self.t1)|Q(notes__note='n1', name='a1')),
['<Annotation: a1>']
)
xx = ExtraInfo.objects.create(info='xx', note=self.n3)
self.assertQuerysetEqual(
Note.objects.filter(Q(extrainfo__author=self.a1)|Q(extrainfo=xx)),
['<Note: n1>', '<Note: n3>']
)
q = Note.objects.filter(Q(extrainfo__author=self.a1)|Q(extrainfo=xx)).query
self.assertEqual(
len([x[2] for x in q.alias_map.values() if x[2] == q.LOUTER and q.alias_refcount[x[1]]]),
1
)
def test_ticket17429(self):
"""
Ensure that Meta.ordering=None works the same as Meta.ordering=[]
"""
original_ordering = Tag._meta.ordering
Tag._meta.ordering = None
try:
self.assertQuerysetEqual(
Tag.objects.all(),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
ordered=False
)
finally:
Tag._meta.ordering = original_ordering
def test_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4'),
[repr(i) for i in Item.objects.filter(~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4')|Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4')|Q(tags__name='t3')))])
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name='t4')|~Q(tags__name='t3')),
[repr(i) for i in Item.objects.filter(~(Q(tags__name='t4')|~Q(tags__name='t3')))])
def test_nested_exclude(self):
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
def test_double_exclude(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name='t4')),
[repr(i) for i in Item.objects.filter(~Q(~Q(tags__name='t4')))])
def test_exclude_in(self):
self.assertQuerysetEqual(
Item.objects.exclude(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~Q(tags__name__in=['t4', 't3']))])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags__name__in=['t4', 't3'])),
[repr(i) for i in Item.objects.filter(~~Q(tags__name__in=['t4', 't3']))])
def test_ticket_10790_1(self):
# Querying direct fields with isnull should trim the left outer join.
# It also should not create INNER JOIN.
q = Tag.objects.filter(parent__isnull=True)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertTrue('JOIN' not in str(q.query))
q = Tag.objects.filter(parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertTrue('JOIN' not in str(q.query))
q = Tag.objects.exclude(parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertTrue('JOIN' not in str(q.query))
q = Tag.objects.exclude(parent__isnull=False)
self.assertQuerysetEqual(q, ['<Tag: t1>'])
self.assertTrue('JOIN' not in str(q.query))
q = Tag.objects.exclude(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'],
)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 1)
self.assertTrue('INNER JOIN' not in str(q.query))
def test_ticket_10790_2(self):
# Querying across several tables should strip only the last outer join,
# while preserving the preceeding inner joins.
q = Tag.objects.filter(parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 0)
self.assertTrue(str(q.query).count('INNER JOIN') == 1)
# Querying without isnull should not convert anything to left outer join.
q = Tag.objects.filter(parent__parent=self.t1)
self.assertQuerysetEqual(
q,
['<Tag: t4>', '<Tag: t5>'],
)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 0)
self.assertTrue(str(q.query).count('INNER JOIN') == 1)
def test_ticket_10790_3(self):
# Querying via indirect fields should populate the left outer join
q = NamedCategory.objects.filter(tag__isnull=True)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 1)
# join to dumbcategory ptr_id
self.assertTrue(str(q.query).count('INNER JOIN') == 1)
self.assertQuerysetEqual(q, [])
# Querying across several tables should strip only the last join, while
# preserving the preceding left outer joins.
q = NamedCategory.objects.filter(tag__parent__isnull=True)
self.assertTrue(str(q.query).count('INNER JOIN') == 1)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 1)
self.assertQuerysetEqual(q, ['<NamedCategory: Generic>'])
def test_ticket_10790_4(self):
# Querying across m2m field should not strip the m2m table from join.
q = Author.objects.filter(item__tags__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a2>', '<Author: a3>'],
)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 2)
self.assertTrue('INNER JOIN' not in str(q.query))
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'],
)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 3)
self.assertTrue('INNER JOIN' not in str(q.query))
def test_ticket_10790_5(self):
# Querying with isnull=False across m2m field should not create outer joins
q = Author.objects.filter(item__tags__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 0)
self.assertTrue(str(q.query).count('INNER JOIN') == 2)
q = Author.objects.filter(item__tags__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a4>']
)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 0)
self.assertTrue(str(q.query).count('INNER JOIN') == 3)
q = Author.objects.filter(item__tags__parent__parent__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a4>']
)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 0)
self.assertTrue(str(q.query).count('INNER JOIN') == 4)
def test_ticket_10790_6(self):
# Querying with isnull=True across m2m field should not create inner joins
# and strip last outer join
q = Author.objects.filter(item__tags__parent__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>',
'<Author: a2>', '<Author: a3>']
)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 4)
self.assertTrue(str(q.query).count('INNER JOIN') == 0)
q = Author.objects.filter(item__tags__parent__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>']
)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 3)
self.assertTrue(str(q.query).count('INNER JOIN') == 0)
def test_ticket_10790_7(self):
# Reverse querying with isnull should not strip the join
q = Author.objects.filter(item__isnull=True)
self.assertQuerysetEqual(
q,
['<Author: a3>']
)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 1)
self.assertTrue(str(q.query).count('INNER JOIN') == 0)
q = Author.objects.filter(item__isnull=False)
self.assertQuerysetEqual(
q,
['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>']
)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 0)
self.assertTrue(str(q.query).count('INNER JOIN') == 1)
def test_ticket_10790_8(self):
# Querying with combined q-objects should also strip the left outer join
q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1))
self.assertQuerysetEqual(
q,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertTrue(str(q.query).count('LEFT OUTER JOIN') == 0)
self.assertTrue(str(q.query).count('INNER JOIN') == 0)
def test_ticket_10790_combine(self):
# Combining queries should not re-populate the left outer join
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__isnull=False)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'],
)
self.assertTrue(str(q3.query).count('LEFT OUTER JOIN') == 0)
self.assertTrue(str(q3.query).count('INNER JOIN') == 0)
q3 = q1 & q2
self.assertQuerysetEqual(q3, [])
self.assertTrue(str(q3.query).count('LEFT OUTER JOIN') == 0)
self.assertTrue(str(q3.query).count('INNER JOIN') == 0)
q2 = Tag.objects.filter(parent=self.t1)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertTrue(str(q3.query).count('LEFT OUTER JOIN') == 0)
self.assertTrue(str(q3.query).count('INNER JOIN') == 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertTrue(str(q3.query).count('LEFT OUTER JOIN') == 0)
self.assertTrue(str(q3.query).count('INNER JOIN') == 0)
q1 = Tag.objects.filter(parent__isnull=True)
q2 = Tag.objects.filter(parent__parent__isnull=True)
q3 = q1 | q2
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertTrue(str(q3.query).count('LEFT OUTER JOIN') == 1)
self.assertTrue(str(q3.query).count('INNER JOIN') == 0)
q3 = q2 | q1
self.assertQuerysetEqual(
q3,
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>']
)
self.assertTrue(str(q3.query).count('LEFT OUTER JOIN') == 1)
self.assertTrue(str(q3.query).count('INNER JOIN') == 0)
def test_ticket19672(self):
self.assertQuerysetEqual(
Report.objects.filter(Q(creator__isnull=False) &
~Q(creator__extra__value=41)),
['<Report: r1>']
)
def test_ticket_20250(self):
# A negated Q along with an annotated queryset failed in Django 1.4
qs = Author.objects.annotate(Count('item'))
qs = qs.filter(~Q(extra__value=0))
self.assertTrue('SELECT' in str(qs.query))
self.assertQuerysetEqual(
qs,
['<Author: a1>', '<Author: a2>', '<Author: a3>', '<Author: a4>']
)
class Queries2Tests(TestCase):
def setUp(self):
Number.objects.create(num=4)
Number.objects.create(num=8)
Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertQuerysetEqual(Number.objects.filter(num__lt=4), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertQuerysetEqual(
Number.objects.filter(num__gt=8, num__lt=13),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
['<Number: 8>']
)
def test_ticket12239(self):
# Float was being rounded to integer on gte queries on integer field. Tests
# show that gt, lt, gte, and lte work as desired. Note that the fix changes
# get_prep_lookup for gte and lt queries only.
self.assertQuerysetEqual(
Number.objects.filter(num__gt=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gt=12), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.0),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12.0),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), [])
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lte=11.9),
['<Number: 4>', '<Number: 8>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.0),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.9),
['<Number: 4>', '<Number: 8>', '<Number: 12>'],
ordered=False
)
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(BaseQuerysetTest):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_ticket8683(self):
# Raise proper error when a DateQuerySet gets passed a wrong type of
# field
self.assertRaisesMessage(
AssertionError,
"'name' isn't a DateTimeField.",
Item.objects.datetimes, 'name', 'month'
)
class Queries4Tests(BaseQuerysetTest):
def setUp(self):
generic = NamedCategory.objects.create(name="Generic")
self.t1 = Tag.objects.create(name='t1', category=generic)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
self.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
self.a3 = Author.objects.create(name='a3', num=3003, extra=e2)
self.r1 = Report.objects.create(name='r1', creator=self.a1)
self.r2 = Report.objects.create(name='r2', creator=self.a3)
self.r3 = Report.objects.create(name='r3')
Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=self.a1)
Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=self.a3)
def test_ticket11811(self):
unsaved_category = NamedCategory(name="Other")
with six.assertRaisesRegex(self, ValueError,
'Unsaved model instance <NamedCategory: Other> '
'cannot be used in an ORM query.'):
Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category)
def test_ticket14876(self):
# Note: when combining the query we need to have information available
# about the join type of the trimmed "creator__isnull" join. If we
# don't have that information, then the join is created as INNER JOIN
# and results will be incorrect.
q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1'))
q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1'))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True))
q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False)
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by()
q2 = Item.objects.filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1')).order_by()
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by()
q2 = Item.objects.filter(Q(creator__report__name='e1')).order_by() | Item.objects.filter(Q(creator=self.a1)).order_by()
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
def test_combine_join_reuse(self):
# Test that we correctly recreate joins having identical connections
# in the rhs query, in case the query is ORed together. Related to
# ticket #18748
Report.objects.create(name='r4', creator=self.a1)
q1 = Author.objects.filter(report__name='r5')
q2 = Author.objects.filter(report__name='r4').filter(report__name='r1')
combined = q1|q2
self.assertEqual(str(combined.query).count('JOIN'), 2)
self.assertEqual(len(combined), 1)
self.assertEqual(combined[0].name, 'a1')
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL. This exercises that case.
ManagedModel.objects.create(data='mm1', tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data='mm'), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return '' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = ''
else:
expected_null_charfield_repr = None
self.assertValueQuerysetEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by("name"),
['e1', 'e2', expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertQuerysetEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
['<Report: r1>', '<Report: r2>', '<Report: r3>']
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, 'd2')
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by('name')
self.assertTrue('ORDER BY' in qs.query.get_compiler(qs.db).as_sql()[0])
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])),
[]
)
def test_ticket15316_filter_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1",
special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2",
special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False)
def test_ticket15316_exclude_false(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1",
special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2",
special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk)
def test_ticket15316_filter_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1",
special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2",
special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.filter(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk)
def test_ticket15316_exclude_true(self):
c1 = SimpleCategory.objects.create(name="category1")
c2 = SpecialCategory.objects.create(name="named category1",
special_name="special1")
c3 = SpecialCategory.objects.create(name="named category2",
special_name="special2")
ci1 = CategoryItem.objects.create(category=c1)
ci2 = CategoryItem.objects.create(category=c2)
ci3 = CategoryItem.objects.create(category=c3)
qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False)
def test_ticket15316_one2one_filter_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
c2 = OneToOneCategory.objects.create(category = c1, new_name="new1")
c3 = OneToOneCategory.objects.create(category = c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 2)
self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False)
def test_ticket15316_one2one_exclude_false(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
c2 = OneToOneCategory.objects.create(category = c1, new_name="new1")
c3 = OneToOneCategory.objects.create(category = c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False)
self.assertEqual(qs.count(), 1)
self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk)
def test_ticket15316_one2one_filter_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
c2 = OneToOneCategory.objects.create(category = c1, new_name="new1")
c3 = OneToOneCategory.objects.create(category = c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 1)
self.assertQuerysetEqual(qs, [ci1.pk], lambda x: x.pk)
def test_ticket15316_one2one_exclude_true(self):
c = SimpleCategory.objects.create(name="cat")
c0 = SimpleCategory.objects.create(name="cat0")
c1 = SimpleCategory.objects.create(name="category1")
c2 = OneToOneCategory.objects.create(category = c1, new_name="new1")
c3 = OneToOneCategory.objects.create(category = c0, new_name="new2")
ci1 = CategoryItem.objects.create(category=c)
ci2 = CategoryItem.objects.create(category=c0)
ci3 = CategoryItem.objects.create(category=c1)
qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=True)
self.assertEqual(qs.count(), 2)
self.assertQuerysetEqual(qs, [ci2.pk, ci3.pk], lambda x: x.pk, False)
class Queries5Tests(TestCase):
def setUp(self):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the
# Meta.ordering will be rank3, rank2, rank1.
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
a1 = Author.objects.create(name='a1', num=1001, extra=e1)
a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=e2)
self.rank1 = Ranking.objects.create(rank=2, author=a2)
Ranking.objects.create(rank=1, author=a3)
Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
self.assertQuerysetEqual(
Ranking.objects.all().order_by('rank'),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertQuerysetEqual(
Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
self.assertEqual(
[o.good for o in qs.extra(order_by=('-good',))],
[True, False, False]
)
self.assertQuerysetEqual(
qs.extra(order_by=('-good', 'id')),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values('id', 'rank').order_by('id')
self.assertEqual(
[d['rank'] for d in dicts],
[2, 1, 3]
)
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
dicts = qs.values().order_by('id')
for d in dicts: del d['id']; del d['author_id']
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]]
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=['django_site'])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
# Make sure that the IDs from different tables don't happen to match.
self.assertQuerysetEqual(
Ranking.objects.filter(author__name='a1'),
['<Ranking: 3: a1>']
)
self.assertEqual(
Ranking.objects.filter(author__name='a1').update(rank='4'),
1
)
r = Ranking.objects.filter(author__name='a1')[0]
self.assertNotEqual(r.id, r.author.id)
self.assertEqual(r.rank, 4)
r.rank = 3
r.save()
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
def test_ticket5261(self):
# Test different empty excludes.
self.assertQuerysetEqual(
Note.objects.exclude(Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.filter(~Q()|~Q()),
['<Note: n1>', '<Note: n2>']
)
self.assertQuerysetEqual(
Note.objects.exclude(~Q()&~Q()),
['<Note: n1>', '<Note: n2>']
)
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by('custom'), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
Plaything.objects.create(name="p1")
self.assertQuerysetEqual(
Plaything.objects.all(),
['<Plaything: p1>']
)
def test_join_already_in_query(self):
# Ordering by model related to nullable relations should not change
# the join type of already existing joins.
Plaything.objects.create(name="p1")
s = SingleObject.objects.create(name='s')
r = RelatedObject.objects.create(single=s, f=1)
Plaything.objects.create(name="p2", others=r)
qs = Plaything.objects.all().filter(others__isnull=False).order_by('pk')
self.assertTrue('JOIN' not in str(qs.query))
qs = Plaything.objects.all().filter(others__f__isnull=False).order_by('pk')
self.assertTrue('INNER' in str(qs.query))
qs = qs.order_by('others__single__name')
# The ordering by others__single__pk will add one new join (to single)
# and that join must be LEFT join. The already existing join to related
# objects must be kept INNER. So, we have both a INNER and a LEFT join
# in the query.
self.assertEqual(str(qs.query).count('LEFT'), 1)
self.assertEqual(str(qs.query).count('INNER'), 1)
self.assertQuerysetEqual(
qs,
['<Plaything: p2>']
)
class DisjunctiveFilterTests(TestCase):
def setUp(self):
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
ExtraInfo.objects.create(info='e1', note=self.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object releated to the LeafA we create.
LeafA.objects.create(data='first')
self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>'])
self.assertQuerysetEqual(
LeafA.objects.filter(Q(data='first')|Q(join__b__data='second')),
['<LeafA: first>']
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(note=self.n1)|ExtraInfo.objects.filter(info='e2')).filter(note=self.n1),
['<ExtraInfo: e1>']
)
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(info='e2')|ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1),
['<ExtraInfo: e1>']
)
class Queries6Tests(TestCase):
def setUp(self):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
t2 = Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
t4 = Tag.objects.create(name='t4', parent=t3)
t5 = Tag.objects.create(name='t5', parent=t3)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
ann1 = Annotation.objects.create(name='a1', tag=t1)
ann1.notes.add(n1)
ann2 = Annotation.objects.create(name='a2', tag=t4)
def test_parallel_iterators(self):
# Test that parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(next(i1)), '<Tag: t1>')
self.assertEqual(repr(next(i1)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t1>')
self.assertEqual(repr(next(i2)), '<Tag: t2>')
self.assertEqual(repr(next(i2)), '<Tag: t3>')
self.assertEqual(repr(next(i1)), '<Tag: t3>')
qs = X.objects.all()
self.assertEqual(bool(qs), False)
self.assertEqual(bool(qs), False)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(
qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'),
2
)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# pre-emptively discovered cases).
self.assertQuerysetEqual(
PointerA.objects.filter(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
PointerA.objects.exclude(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
Tag.objects.exclude(children=None),
['<Tag: t1>', '<Tag: t3>']
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertQuerysetEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimize the inner query without losing results.
self.assertQuerysetEqual(
Annotation.objects.exclude(tag__children__name="t2"),
['<Annotation: a2>']
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
['<Annotation: a1>']
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by('name')
self.assertIsNot(q1, q1.all())
class RawQueriesTests(TestCase):
def setUp(self):
n1 = Note.objects.create(note='n1', misc='foo', id=1)
def test_ticket14729(self):
# Test representation of raw query with one or few parameters passed as list
query = "SELECT * FROM queries_note WHERE note = %s"
params = ['n1']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), str_prefix("<RawQuerySet: %(_)s'SELECT * FROM queries_note WHERE note = n1'>"))
query = "SELECT * FROM queries_note WHERE note = %s and misc = %s"
params = ['n1', 'foo']
qs = Note.objects.raw(query, params=params)
self.assertEqual(repr(qs), str_prefix("<RawQuerySet: %(_)s'SELECT * FROM queries_note WHERE note = n1 and misc = foo'>"))
class GeneratorExpressionTests(TestCase):
def test_ticket10432(self):
# Using an empty generator expression as the rvalue for an "__in"
# lookup is legal.
self.assertQuerysetEqual(
Note.objects.filter(pk__in=(x for x in ())),
[]
)
class ComparisonTests(TestCase):
def setUp(self):
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
e1 = ExtraInfo.objects.create(info='e1', note=self.n1)
self.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
_ = Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
_ = Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="x%Y"),
['<Item: x%y>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__istartswith="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iendswith="A_b"),
['<Item: a_b>']
)
class ExistsSql(TestCase):
def setUp(self):
settings.DEBUG = True
def test_exists(self):
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertTrue("id" not in connection.queries[-1]['sql'] and "name" not in connection.queries[-1]['sql'])
def tearDown(self):
settings.DEBUG = False
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertEqual(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertEqual(Tag.objects.all().ordered, True)
self.assertEqual(Tag.objects.all().order_by().ordered, False)
def test_explicit_ordering(self):
self.assertEqual(Annotation.objects.all().order_by('id').ordered, True)
def test_order_by_extra(self):
self.assertEqual(Annotation.objects.all().extra(order_by=['id']).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count('notes'))
self.assertEqual(qs.ordered, False)
self.assertEqual(qs.order_by('num_notes').ordered, True)
class SubqueryTests(TestCase):
def setUp(self):
DumbCategory.objects.create(id=1)
DumbCategory.objects.create(id=2)
DumbCategory.objects.create(id=3)
DumbCategory.objects.create(id=4)
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
try:
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])
self.assertEqual(set(query.values_list('id', flat=True)), set([3,4]))
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2])
self.assertEqual(set(query.values_list('id', flat=True)), set([3,4]))
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2])
self.assertEqual(set(query.values_list('id', flat=True)), set([3]))
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])
self.assertEqual(set(query.values_list('id', flat=True)), set([1,2]))
except DatabaseError as e:
# Oracle and MySQL both have problems with sliced subselects.
# This prevents us from even evaluating this test case at all.
# Refs #10099
self.assertFalse(connections[DEFAULT_DB_ALIAS].features.allow_sliced_subqueries, str(e))
def test_slice_subquery_and_query(self):
"""
Slice a query that has a sliced subquery
"""
try:
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])[0:2]
self.assertEqual(set([x.id for x in query]), set([3,4]))
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:3])[1:3]
self.assertEqual(set([x.id for x in query]), set([3]))
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])[1:]
self.assertEqual(set([x.id for x in query]), set([2]))
except DatabaseError as e:
# Oracle and MySQL both have problems with sliced subselects.
# This prevents us from even evaluating this test case at all.
# Refs #10099
self.assertFalse(connections[DEFAULT_DB_ALIAS].features.allow_sliced_subqueries, str(e))
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
try:
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), set([1,2,3]))
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), set([1,3]))
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), set([3]))
except DatabaseError as e:
# Oracle and MySQL both have problems with sliced subselects.
# This prevents us from even evaluating this test case at all.
# Refs #10099
self.assertFalse(connections[DEFAULT_DB_ALIAS].features.allow_sliced_subqueries, str(e))
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"#13227 -- If a queryset is already evaluated, it can still be used as a query arg"
n = Note(note='Test1', misc='misc')
n.save()
e = ExtraInfo(info='good', note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Use the note queryset in a query, and evalute
# that query in a way that involves cloning.
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good')
def test_no_model_options_cloning(self):
"""
Test that cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta)
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model options shouldn't be cloned.")
try:
Note.objects.filter(pk__lte=F('pk') + 1).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
def test_no_fields_cloning(self):
"""
Test that cloning a queryset does not get out of hand. While complete
testing is impossible, this is a sanity check against invalid use of
deepcopy. refs #16759.
"""
opts_class = type(Note._meta.get_field_by_name("misc")[0])
note_deepcopy = getattr(opts_class, "__deepcopy__", None)
opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model fields shouldn't be cloned")
try:
Note.objects.filter(note=F('misc')).all()
finally:
if note_deepcopy is None:
delattr(opts_class, "__deepcopy__")
else:
opts_class.__deepcopy__ = note_deepcopy
class EmptyQuerySetTests(TestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an empty QuerySet and then cloning
# that should not cause an error
self.assertQuerysetEqual(
Number.objects.none().values('num').order_by('num'), []
)
def test_values_subquery(self):
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values("pk")),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values_list("pk")),
[]
)
def test_ticket_19151(self):
# #19151 -- Calling .values() or .values_list() on an empty QuerySet
# should return an empty QuerySet and not cause an error.
q = Author.objects.none()
self.assertQuerysetEqual(q.values(), [])
self.assertQuerysetEqual(q.values_list(), [])
class ValuesQuerysetTests(BaseQuerysetTest):
def setUp(self):
Number.objects.create(num=72)
self.identity = lambda x: x
def test_flat_values_list(self):
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertValueQuerysetEqual(qs, [72])
def test_extra_values(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select=SortedDict([('value_plus_x', 'num+%s'),
('value_minus_x', 'num-%s')]),
select_params=(1, 2))
qs = qs.order_by('value_minus_x')
qs = qs.values('num')
self.assertQuerysetEqual(qs, [{'num': 72}], self.identity)
def test_extra_values_order_twice(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'})
qs = qs.order_by('value_minus_one').order_by('value_plus_one')
qs = qs.values('num')
self.assertQuerysetEqual(qs, [{'num': 72}], self.identity)
def test_extra_values_order_multiple(self):
# Postgres doesn't allow constants in order by, so check for that.
qs = Number.objects.extra(select={
'value_plus_one': 'num+1',
'value_minus_one': 'num-1',
'constant_value': '1'
})
qs = qs.order_by('value_plus_one', 'value_minus_one', 'constant_value')
qs = qs.values('num')
self.assertQuerysetEqual(qs, [{'num': 72}], self.identity)
def test_extra_values_order_in_extra(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(
select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'},
order_by=['value_minus_one'])
qs = qs.values('num')
def test_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num')
self.assertQuerysetEqual(qs, [(72,)], self.identity)
def test_flat_extra_values_list(self):
# testing for ticket 14930 issues
qs = Number.objects.extra(select={'value_plus_one': 'num+1'})
qs = qs.order_by('value_plus_one')
qs = qs.values_list('num', flat=True)
self.assertQuerysetEqual(qs, [72], self.identity)
class WeirdQuerysetSlicingTests(BaseQuerysetTest):
def setUp(self):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
Article.objects.create(name='three', created=datetime.datetime.now())
Article.objects.create(name='four', created=datetime.datetime.now())
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
self.assertRaisesMessage(
AssertionError,
'Cannot change a query once a slice has been taken.',
Article.objects.all()[:0].latest, 'created'
)
def test_empty_resultset_sql(self):
# ticket #12192
self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1]))
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
_ = ReservedName.objects.create(name='a', order=42)
ReservedName.objects.create(name='b', order=37)
self.assertQuerysetEqual(
ReservedName.objects.all().order_by('order'),
['<ReservedName: b>', '<ReservedName: a>']
)
self.assertQuerysetEqual(
ReservedName.objects.extra(select={'stuff':'name'}, order_by=('order','stuff')),
['<ReservedName: b>', '<ReservedName: a>']
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
set([lunch, dinner]),
)
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])),
set([apple, pear])
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food=apple)),
set([lunch, dinner])
)
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Food.objects.filter(eaten=lunch)),
set([apple])
)
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(parent=node1)),
[node2]
)
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(node=node2)),
[node1]
)
class ConditionalTests(BaseQuerysetTest):
"""Tests whose execution depend on different environment conditions like
Python version or DB backend features"""
def setUp(self):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
t2 = Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
t4 = Tag.objects.create(name='t4', parent=t3)
t5 = Tag.objects.create(name='t5', parent=t3)
# In Python 2.6 beta releases, exceptions raised in __len__ are swallowed
# (Python issue 1242657), so these cases return an empty list, rather than
# raising an exception. Not a lot we can do about that, unfortunately, due to
# the way Python handles list() calls internally. Thus, we skip the tests for
# Python 2.6.
@unittest.skipIf(sys.version_info[:2] == (2, 6), "Python version is 2.6")
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
self.assertRaisesMessage(
FieldError,
'Infinite loop caused by ordering.',
lambda: list(LoopX.objects.all()) # Force queryset evaluation with list()
)
self.assertRaisesMessage(
FieldError,
'Infinite loop caused by ordering.',
lambda: list(LoopZ.objects.all()) # Force queryset evaluation with list()
)
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by('parent')), 5)
# ... but you can still order in a non-recursive fashion amongst linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(
LoopX.objects.all().order_by('y__x__y__x__id'),
[]
)
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
@skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping')
def test_null_ordering_added(self):
query = Tag.objects.values_list('parent_id', flat=True).order_by().query
query.group_by = ['parent_id']
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
# Sqlite 3 does not support passing in more than 1000 parameters except by
# changing a parameter at compilation time.
@skipUnlessDBFeature('supports_1000_query_parameters')
def test_ticket14244(self):
# Test that the "in" lookup works with lists of 1000 items or more.
# The numbers amount is picked to force three different IN batches
# for Oracle, yet to be less than 2100 parameter limit for MSSQL.
numbers = range(2050)
Number.objects.all().delete()
Number.objects.bulk_create(Number(num=num) for num in numbers)
self.assertEqual(
Number.objects.filter(num__in=numbers[:1000]).count(),
1000
)
self.assertEqual(
Number.objects.filter(num__in=numbers[:1001]).count(),
1001
)
self.assertEqual(
Number.objects.filter(num__in=numbers[:2000]).count(),
2000
)
self.assertEqual(
Number.objects.filter(num__in=numbers).count(),
len(numbers)
)
class UnionTests(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
def setUp(self):
objectas = []
objectbs = []
objectcs = []
a_info = ['one', 'two', 'three']
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux', objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name='deux')
Q2 = Q(objectc__objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__name='ein')
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name='one', objectc__objecta__name='two')
Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
class DefaultValuesInsertTest(TestCase):
def test_no_extra_params(self):
# Ticket #17056 -- affects Oracle
try:
DumbCategory.objects.create()
except TypeError:
self.fail("Creation of an instance of a model with only the PK field shouldn't error out after bulk insert refactoring (#17056)")
class ExcludeTest(TestCase):
def setUp(self):
f1 = Food.objects.create(name='apples')
Food.objects.create(name='oranges')
Eaten.objects.create(food=f1, meal='dinner')
j1 = Job.objects.create(name='Manager')
r1 = Responsibility.objects.create(description='Playing golf')
j2 = Job.objects.create(name='Programmer')
r2 = Responsibility.objects.create(description='Programming')
JobResponsibilities.objects.create(job=j1, responsibility=r1)
JobResponsibilities.objects.create(job=j2, responsibility=r2)
def test_to_field(self):
self.assertQuerysetEqual(
Food.objects.exclude(eaten__meal='dinner'),
['<Food: oranges>'])
self.assertQuerysetEqual(
Job.objects.exclude(responsibilities__description='Playing golf'),
['<Job: Programmer>'])
self.assertQuerysetEqual(
Responsibility.objects.exclude(jobs__name='Manager'),
['<Responsibility: Programming>'])
class ExcludeTest17600(TestCase):
"""
Some regressiontests for ticket #17600. Some of these likely duplicate
other existing tests.
"""
def setUp(self):
# Create a few Orders.
self.o1 = Order.objects.create(pk=1)
self.o2 = Order.objects.create(pk=2)
self.o3 = Order.objects.create(pk=3)
# Create some OrderItems for the first order with homogeneous
# status_id values
self.oi1 = OrderItem.objects.create(order=self.o1, status=1)
self.oi2 = OrderItem.objects.create(order=self.o1, status=1)
self.oi3 = OrderItem.objects.create(order=self.o1, status=1)
# Create some OrderItems for the second order with heterogeneous
# status_id values
self.oi4 = OrderItem.objects.create(order=self.o2, status=1)
self.oi5 = OrderItem.objects.create(order=self.o2, status=2)
self.oi6 = OrderItem.objects.create(order=self.o2, status=3)
# Create some OrderItems for the second order with heterogeneous
# status_id values
self.oi7 = OrderItem.objects.create(order=self.o3, status=2)
self.oi8 = OrderItem.objects.create(order=self.o3, status=3)
self.oi9 = OrderItem.objects.create(order=self.o3, status=4)
def test_exclude_plain(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1),
['<Order: 3>'])
def test_exclude_plain_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(items__status=1).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)).distinct(),
['<Order: 3>'])
def test_exclude_with_q_object_no_distinct(self):
"""
This should exclude Orders which have some items with status 1
"""
self.assertQuerysetEqual(
Order.objects.exclude(Q(items__status=1)),
['<Order: 3>'])
def test_exclude_with_q_is_equal_to_plain_exclude(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1).distinct()),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
def test_exclude_with_q_is_equal_to_plain_exclude_variation(self):
"""
Using exclude(condition) and exclude(Q(condition)) should
yield the same QuerySet
"""
self.assertEqual(
list(Order.objects.exclude(items__status=1)),
list(Order.objects.exclude(Q(items__status=1)).distinct()))
@unittest.expectedFailure
def test_only_orders_with_all_items_having_status_1(self):
"""
This should only return orders having ALL items set to status 1, or
those items not having any orders at all. The correct way to write
this query in SQL seems to be using two nested subqueries.
"""
self.assertQuerysetEqual(
Order.objects.exclude(~Q(items__status=1)).distinct(),
['<Order: 1>'])
class NullInExcludeTest(TestCase):
def setUp(self):
NullableName.objects.create(name='i1')
NullableName.objects.create()
def test_null_in_exclude_qs(self):
none_val = '' if connection.features.interprets_empty_strings_as_nulls else None
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[]),
['i1', none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i1']),
[none_val], attrgetter('name'))
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=['i3']),
['i1', none_val], attrgetter('name'))
inner_qs = NullableName.objects.filter(name='i1').values_list('name')
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=inner_qs),
[none_val], attrgetter('name'))
# Check that the inner queryset wasn't executed - it should be turned
# into subquery above
self.assertIs(inner_qs._result_cache, None)
@unittest.expectedFailure
def test_col_not_in_list_containing_null(self):
"""
The following case is not handled properly because
SQL's COL NOT IN (list containing null) handling is too weird to
abstract away.
"""
self.assertQuerysetEqual(
NullableName.objects.exclude(name__in=[None]),
['i1'], attrgetter('name'))
def test_double_exclude(self):
self.assertEqual(
list(NullableName.objects.filter(~~Q(name='i1'))),
list(NullableName.objects.filter(Q(name='i1'))))
self.assertNotIn(
'IS NOT NULL',
str(NullableName.objects.filter(~~Q(name='i1')).query))
class EmptyStringsAsNullTest(TestCase):
"""
Test that filtering on non-null character fields works as expected.
The reason for these tests is that Oracle treats '' as NULL, and this
can cause problems in query construction. Refs #17957.
"""
def setUp(self):
self.nc = NamedCategory.objects.create(name='')
def test_direct_exclude(self):
self.assertQuerysetEqual(
NamedCategory.objects.exclude(name__in=['nonexisting']),
[self.nc.pk], attrgetter('pk')
)
def test_joined_exclude(self):
self.assertQuerysetEqual(
DumbCategory.objects.exclude(namedcategory__name__in=['nonexisting']),
[self.nc.pk], attrgetter('pk')
)
class ProxyQueryCleanupTest(TestCase):
def test_evaluated_proxy_count(self):
"""
Test that generating the query string doesn't alter the query's state
in irreversible ways. Refs #18248.
"""
ProxyCategory.objects.create()
qs = ProxyCategory.objects.all()
self.assertEqual(qs.count(), 1)
str(qs.query)
self.assertEqual(qs.count(), 1)
class WhereNodeTest(TestCase):
class DummyNode(object):
def as_sql(self, qn, connection):
return 'dummy', []
def test_empty_full_handling_conjunction(self):
qn = connection.ops.quote_name
w = WhereNode(children=[EverythingNode()])
self.assertEqual(w.as_sql(qn, connection), ('', []))
w.negate()
self.assertRaises(EmptyResultSet, w.as_sql, qn, connection)
w = WhereNode(children=[NothingNode()])
self.assertRaises(EmptyResultSet, w.as_sql, qn, connection)
w.negate()
self.assertEqual(w.as_sql(qn, connection), ('', []))
w = WhereNode(children=[EverythingNode(), EverythingNode()])
self.assertEqual(w.as_sql(qn, connection), ('', []))
w.negate()
self.assertRaises(EmptyResultSet, w.as_sql, qn, connection)
w = WhereNode(children=[EverythingNode(), self.DummyNode()])
self.assertEqual(w.as_sql(qn, connection), ('dummy', []))
w = WhereNode(children=[self.DummyNode(), self.DummyNode()])
self.assertEqual(w.as_sql(qn, connection), ('(dummy AND dummy)', []))
w.negate()
self.assertEqual(w.as_sql(qn, connection), ('NOT (dummy AND dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()])
self.assertRaises(EmptyResultSet, w.as_sql, qn, connection)
w.negate()
self.assertEqual(w.as_sql(qn, connection), ('', []))
def test_empty_full_handling_disjunction(self):
qn = connection.ops.quote_name
w = WhereNode(children=[EverythingNode()], connector='OR')
self.assertEqual(w.as_sql(qn, connection), ('', []))
w.negate()
self.assertRaises(EmptyResultSet, w.as_sql, qn, connection)
w = WhereNode(children=[NothingNode()], connector='OR')
self.assertRaises(EmptyResultSet, w.as_sql, qn, connection)
w.negate()
self.assertEqual(w.as_sql(qn, connection), ('', []))
w = WhereNode(children=[EverythingNode(), EverythingNode()], connector='OR')
self.assertEqual(w.as_sql(qn, connection), ('', []))
w.negate()
self.assertRaises(EmptyResultSet, w.as_sql, qn, connection)
w = WhereNode(children=[EverythingNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(qn, connection), ('', []))
w.negate()
self.assertRaises(EmptyResultSet, w.as_sql, qn, connection)
w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(qn, connection), ('(dummy OR dummy)', []))
w.negate()
self.assertEqual(w.as_sql(qn, connection), ('NOT (dummy OR dummy)', []))
w = WhereNode(children=[NothingNode(), self.DummyNode()], connector='OR')
self.assertEqual(w.as_sql(qn, connection), ('dummy', []))
w.negate()
self.assertEqual(w.as_sql(qn, connection), ('NOT (dummy)', []))
def test_empty_nodes(self):
qn = connection.ops.quote_name
empty_w = WhereNode()
w = WhereNode(children=[empty_w, empty_w])
self.assertEqual(w.as_sql(qn, connection), (None, []))
w.negate()
self.assertEqual(w.as_sql(qn, connection), (None, []))
w.connector = 'OR'
self.assertEqual(w.as_sql(qn, connection), (None, []))
w.negate()
self.assertEqual(w.as_sql(qn, connection), (None, []))
w = WhereNode(children=[empty_w, NothingNode()], connector='OR')
self.assertRaises(EmptyResultSet, w.as_sql, qn, connection)
class IteratorExceptionsTest(TestCase):
def test_iter_exceptions(self):
qs = ExtraInfo.objects.only('author')
with self.assertRaises(AttributeError):
list(qs)
def test_invalid_qs_list(self):
# Test for #19895 - second iteration over invalid queryset
# raises errors.
qs = Article.objects.order_by('invalid_column')
self.assertRaises(FieldError, list, qs)
self.assertRaises(FieldError, list, qs)
class NullJoinPromotionOrTest(TestCase):
def setUp(self):
self.d1 = ModelD.objects.create(name='foo')
d2 = ModelD.objects.create(name='bar')
self.a1 = ModelA.objects.create(name='a1', d=self.d1)
c = ModelC.objects.create(name='c')
b = ModelB.objects.create(name='b', c=c)
self.a2 = ModelA.objects.create(name='a2', b=b, d=d2)
def test_ticket_17886(self):
# The first Q-object is generating the match, the rest of the filters
# should not remove the match even if they do not match anything. The
# problem here was that b__name generates a LOUTER JOIN, then
# b__c__name generates join to c, which the ORM tried to promote but
# failed as that join isn't nullable.
q_obj = (
Q(d__name='foo')|
Q(b__name='foo')|
Q(b__c__name='foo')
)
qset = ModelA.objects.filter(q_obj)
self.assertEqual(list(qset), [self.a1])
# We generate one INNER JOIN to D. The join is direct and not nullable
# so we can use INNER JOIN for it. However, we can NOT use INNER JOIN
# for the b->c join, as a->b is nullable.
self.assertEqual(str(qset.query).count('INNER JOIN'), 1)
def test_isnull_filter_promotion(self):
qs = ModelA.objects.filter(Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~~Q(b__name__isnull=True))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
qs = ModelA.objects.filter(~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('LEFT OUTER'), 1)
self.assertEqual(list(qs), [self.a1])
qs = ModelA.objects.filter(~~Q(b__name__isnull=False))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(list(qs), [self.a2])
def test_ticket_21748(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
i3 = Identifier.objects.create(name='i3')
Program.objects.create(identifier=i1)
Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
self.assertQuerysetEqual(
Identifier.objects.filter(program=None, channel=None),
[i3], lambda x: x)
self.assertQuerysetEqual(
Identifier.objects.exclude(program=None, channel=None).order_by('name'),
[i1, i2], lambda x: x)
def test_ticket_21748_double_negated_and(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
Program.objects.create(identifier=i2)
# Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for
# join promotion.
qs1_doubleneg = Identifier.objects.exclude(~Q(program__id=p1.id, channel__id=c1.id)).order_by('pk')
qs1_filter = Identifier.objects.filter(program__id=p1.id, channel__id=c1.id).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(2, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_double_negated_or(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Test OR + doubleneq. The expected result is that channel is LOUTER
# joined, program INNER joined
qs1_filter = Identifier.objects.filter(
Q(program__id=p2.id, channel__id=c1.id)
| Q(program__id=p1.id)
).order_by('pk')
qs1_doubleneg = Identifier.objects.exclude(
~Q(Q(program__id=p2.id, channel__id=c1.id)
| Q(program__id=p1.id))
).order_by('pk')
self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x)
self.assertEqual(str(qs1_filter.query).count('JOIN'),
str(qs1_doubleneg.query).count('JOIN'))
self.assertEqual(1, str(qs1_doubleneg.query).count('INNER JOIN'))
self.assertEqual(str(qs1_filter.query).count('INNER JOIN'),
str(qs1_doubleneg.query).count('INNER JOIN'))
def test_ticket_21748_complex_filter(self):
i1 = Identifier.objects.create(name='i1')
i2 = Identifier.objects.create(name='i2')
Identifier.objects.create(name='i3')
p1 = Program.objects.create(identifier=i1)
c1 = Channel.objects.create(identifier=i1)
p2 = Program.objects.create(identifier=i2)
# Finally, a more complex case, one time in a way where each
# NOT is pushed to lowest level in the boolean tree, and
# another query where this isn't done.
qs1 = Identifier.objects.filter(
~Q(~Q(program__id=p2.id, channel__id=c1.id)
& Q(program__id=p1.id))).order_by('pk')
qs2 = Identifier.objects.filter(
Q(Q(program__id=p2.id, channel__id=c1.id)
| ~Q(program__id=p1.id))).order_by('pk')
self.assertQuerysetEqual(qs1, qs2, lambda x: x)
self.assertEqual(str(qs1.query).count('JOIN'),
str(qs2.query).count('JOIN'))
self.assertEqual(0, str(qs1.query).count('INNER JOIN'))
self.assertEqual(str(qs1.query).count('INNER JOIN'),
str(qs2.query).count('INNER JOIN'))
class ReverseJoinTrimmingTest(TestCase):
def test_reverse_trimming(self):
# Check that we don't accidentally trim reverse joins - we can't know
# if there is anything on the other side of the join, so trimming
# reverse joins can't be done, ever.
t = Tag.objects.create()
qs = Tag.objects.filter(annotation__tag=t.pk)
self.assertIn('INNER JOIN', str(qs.query))
self.assertEqual(list(qs), [])
class JoinReuseTest(TestCase):
"""
Test that the queries reuse joins sensibly (for example, direct joins
are always reused).
"""
def test_fk_reuse(self):
qs = Annotation.objects.filter(tag__name='foo').filter(tag__name='bar')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_select_related(self):
qs = Annotation.objects.filter(tag__name='foo').select_related('tag')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_annotation(self):
qs = Annotation.objects.filter(tag__name='foo').annotate(cnt=Count('tag__name'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_disjunction(self):
qs = Annotation.objects.filter(Q(tag__name='foo') | Q(tag__name='bar'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_fk_reuse_order_by(self):
qs = Annotation.objects.filter(tag__name='foo').order_by('tag__name')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revo2o_reuse(self):
qs = Detail.objects.filter(member__name='foo').filter(member__name='foo')
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_revfk_noreuse(self):
qs = Author.objects.filter(report__name='r4').filter(report__name='r1')
self.assertEqual(str(qs.query).count('JOIN'), 2)
class DisjunctionPromotionTests(TestCase):
def test_disjuction_promotion_select_related(self):
fk1 = FK1.objects.create(f1='f1', f2='f2')
basea = BaseA.objects.create(a=fk1)
qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2))
self.assertEqual(str(qs.query).count(' JOIN '), 0)
qs = qs.select_related('a', 'b')
self.assertEqual(str(qs.query).count(' INNER JOIN '), 0)
self.assertEqual(str(qs.query).count(' LEFT OUTER JOIN '), 2)
with self.assertNumQueries(1):
self.assertQuerysetEqual(qs, [basea], lambda x: x)
self.assertEqual(qs[0].a, fk1)
self.assertIs(qs[0].b, None)
def test_disjunction_promotion1(self):
# Pre-existing join, add two ORed filters to the same join,
# all joins can be INNER JOINS.
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
# Reverse the order of AND and OR filters.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
def test_disjunction_promotion2(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# Now we have two different joins in an ORed condition, these
# must be OUTER joins. The pre-existing join should remain INNER.
qs = qs.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
# Reverse case.
qs = BaseA.objects.filter(Q(b__f1='foo') | Q(c__f2='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
def test_disjunction_promotion3(self):
qs = BaseA.objects.filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
# The ANDed a__f2 filter allows us to use keep using INNER JOIN
# even inside the ORed case. If the join to a__ returns nothing,
# the ANDed filter for a__f2 can't be true.
qs = qs.filter(Q(a__f1='foo') | Q(b__f2='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
@unittest.expectedFailure
def test_disjunction_promotion3_failing(self):
# Now the ORed filter creates LOUTER join, but we do not have
# logic to unpromote it for the AND filter after it. The query
# results will be correct, but we have one LOUTER JOIN too much
# currently.
qs = BaseA.objects.filter(
Q(a__f1='foo') | Q(b__f2='foo')).filter(a__f2='bar')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
@unittest.expectedFailure
def test_disjunction_promotion4_failing(self):
# Failure because no join repromotion
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = qs.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion4(self):
qs = BaseA.objects.filter(a__f1='foo')
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
@unittest.expectedFailure
def test_disjunction_promotion5_failing(self):
# Failure because no join repromotion logic.
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
# Note that the above filters on a force the join to an
# inner join even if it is trimmed.
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = qs.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# So, now the a__f1 join doesn't need promotion.
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1='foo') | Q(b__f1='foo'))
# Now the join to a is created as LOUTER
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
qs = qs.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
def test_disjunction_promotion6(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
qs = qs.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('INNER JOIN'), 2)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0)
def test_disjunction_promotion7(self):
qs = BaseA.objects.filter(Q(a=1) | Q(a=2))
self.assertEqual(str(qs.query).count('JOIN'), 0)
qs = BaseA.objects.filter(Q(a__f1='foo') | (Q(b__f1='foo') & Q(a__f1='bar')))
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | Q(b__f1='foo')) & (Q(a__f1='bar') | Q(c__f1='foo'))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
qs = BaseA.objects.filter(
(Q(a__f1='foo') | (Q(a__f1='bar')) & (Q(b__f1='bar') | Q(c__f1='foo')))
)
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
def test_disjunction_promotion_fexpression(self):
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1)
self.assertEqual(str(qs.query).count('INNER JOIN'), 1)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | Q(b__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(a__f2=F('b__f2')) | Q(c__f1='foo'))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3)
qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | (Q(pk=1) & Q(pk=2)))
self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2)
self.assertEqual(str(qs.query).count('INNER JOIN'), 0)
class ManyToManyExcludeTest(TestCase):
def test_exclude_many_to_many(self):
Identifier.objects.create(name='extra')
program = Program.objects.create(identifier=Identifier.objects.create(name='program'))
channel = Channel.objects.create(identifier=Identifier.objects.create(name='channel'))
channel.programs.add(program)
# channel contains 'program1', so all Identifiers except that one
# should be returned
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=channel).order_by('name'),
['<Identifier: channel>', '<Identifier: extra>']
)
self.assertQuerysetEqual(
Identifier.objects.exclude(program__channel=None).order_by('name'),
['<Identifier: program>']
)
def test_ticket_12823(self):
pg3 = Page.objects.create(text='pg3')
pg2 = Page.objects.create(text='pg2')
pg1 = Page.objects.create(text='pg1')
pa1 = Paragraph.objects.create(text='pa1')
pa1.page = [pg1, pg2]
pa2 = Paragraph.objects.create(text='pa2')
pa2.page = [pg2, pg3]
pa3 = Paragraph.objects.create(text='pa3')
ch1 = Chapter.objects.create(title='ch1', paragraph=pa1)
ch2 = Chapter.objects.create(title='ch2', paragraph=pa2)
ch3 = Chapter.objects.create(title='ch3', paragraph=pa3)
b1 = Book.objects.create(title='b1', chapter=ch1)
b2 = Book.objects.create(title='b2', chapter=ch2)
b3 = Book.objects.create(title='b3', chapter=ch3)
q = Book.objects.exclude(chapter__paragraph__page__text='pg1')
self.assertNotIn('IS NOT NULL', str(q.query))
self.assertEqual(len(q), 2)
self.assertNotIn(b1, q)
self.assertIn(b2, q)
self.assertIn(b3, q)
class RelabelCloneTest(TestCase):
def test_ticket_19964(self):
my1 = MyObject.objects.create(data='foo')
my1.parent = my1
my1.save()
my2 = MyObject.objects.create(data='bar', parent=my1)
parents = MyObject.objects.filter(parent=F('id'))
children = MyObject.objects.filter(parent__in=parents).exclude(parent=F('id'))
self.assertEqual(list(parents), [my1])
# Evaluating the children query (which has parents as part of it) does
# not change results for the parents query.
self.assertEqual(list(children), [my2])
self.assertEqual(list(parents), [my1])
class Ticket20101Tests(TestCase):
def test_ticket_20101(self):
"""
Tests QuerySet ORed combining in exclude subquery case.
"""
t = Tag.objects.create(name='foo')
a1 = Annotation.objects.create(tag=t, name='a1')
a2 = Annotation.objects.create(tag=t, name='a2')
a3 = Annotation.objects.create(tag=t, name='a3')
n = Note.objects.create(note='foo', misc='bar')
qs1 = Note.objects.exclude(annotation__in=[a1, a2])
qs2 = Note.objects.filter(annotation__in=[a3])
self.assertTrue(n in qs1)
self.assertFalse(n in qs2)
self.assertTrue(n in (qs1 | qs2))
class EmptyStringPromotionTests(TestCase):
def test_empty_string_promotion(self):
qs = RelatedObject.objects.filter(single__name='')
if connection.features.interprets_empty_strings_as_nulls:
self.assertIn('LEFT OUTER JOIN', str(qs.query))
else:
self.assertNotIn('LEFT OUTER JOIN', str(qs.query))
class ValuesSubqueryTests(TestCase):
def test_values_in_subquery(self):
# Check that if a values() queryset is used, then the given values
# will be used instead of forcing use of the relation's field.
o1 = Order.objects.create(id=-2)
o2 = Order.objects.create(id=-1)
oi1 = OrderItem.objects.create(order=o1, status=0)
oi1.status = oi1.pk
oi1.save()
OrderItem.objects.create(order=o2, status=0)
# The query below should match o1 as it has related order_item
# with id == status.
self.assertQuerysetEqual(
Order.objects.filter(items__in=OrderItem.objects.values_list('status')),
[o1.pk], lambda x: x.pk)
class DoubleInSubqueryTests(TestCase):
def test_double_subquery_in(self):
lfa1 = LeafA.objects.create(data='foo')
lfa2 = LeafA.objects.create(data='bar')
lfb1 = LeafB.objects.create(data='lfb1')
lfb2 = LeafB.objects.create(data='lfb2')
Join.objects.create(a=lfa1, b=lfb1)
Join.objects.create(a=lfa2, b=lfb2)
leaf_as = LeafA.objects.filter(data='foo').values_list('pk', flat=True)
joins = Join.objects.filter(a__in=leaf_as).values_list('b__id', flat=True)
qs = LeafB.objects.filter(pk__in=joins)
self.assertQuerysetEqual(
qs, [lfb1], lambda x: x)
class Ticket18785Tests(unittest.TestCase):
def test_ticket_18785(self):
# Test join trimming from ticket18785
qs = Item.objects.exclude(
note__isnull=False
).filter(
name='something', creator__extra__isnull=True
).order_by()
self.assertEqual(1, str(qs.query).count('INNER JOIN'))
self.assertEqual(0, str(qs.query).count('OUTER JOIN'))
class RelatedLookupTypeTests(TestCase):
def test_wrong_type_lookup(self):
oa = ObjectA.objects.create(name="oa")
wrong_type = Order.objects.create(id=oa.pk)
ob = ObjectB.objects.create(name="ob", objecta=oa, num=1)
# Currently Django doesn't care if the object is of correct
# type, it will just use the objecta's related fields attribute
# (id) for model lookup. Making things more restrictive could
# be a good idea...
self.assertQuerysetEqual(
ObjectB.objects.filter(objecta=wrong_type),
[ob], lambda x: x)
self.assertQuerysetEqual(
ObjectB.objects.filter(objecta__in=[wrong_type]),
[ob], lambda x: x)
class Ticket20955Tests(TestCase):
def test_ticket_20955(self):
jack = Staff.objects.create(name='jackstaff')
jackstaff = StaffUser.objects.create(staff=jack)
jill = Staff.objects.create(name='jillstaff')
jillstaff = StaffUser.objects.create(staff=jill)
task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task")
task_get = Task.objects.get(pk=task.pk)
# Load data so that assertNumQueries doesn't complain about the get
# version's queries.
task_get.creator.staffuser.staff
task_get.owner.staffuser.staff
task_select_related = Task.objects.select_related(
'creator__staffuser__staff', 'owner__staffuser__staff').get(pk=task.pk)
with self.assertNumQueries(0):
self.assertEqual(task_select_related.creator.staffuser.staff,
task_get.creator.staffuser.staff)
self.assertEqual(task_select_related.owner.staffuser.staff,
task_get.owner.staffuser.staff)
class Ticket21203Tests(TestCase):
def test_ticket_21203(self):
p = Ticket21203Parent.objects.create(parent_bool=True)
c = Ticket21203Child.objects.create(parent=p)
qs = Ticket21203Child.objects.select_related('parent').defer('parent__created')
self.assertQuerysetEqual(qs, [c], lambda x: x)
self.assertIs(qs[0].parent.parent_bool, True)
class ForeignKeyToBaseExcludeTests(TestCase):
def test_ticket_21787(self):
sc1 = SpecialCategory.objects.create(special_name='sc1', name='sc1')
sc2 = SpecialCategory.objects.create(special_name='sc2', name='sc2')
sc3 = SpecialCategory.objects.create(special_name='sc3', name='sc3')
c1 = CategoryItem.objects.create(category=sc1)
CategoryItem.objects.create(category=sc2)
self.assertQuerysetEqual(
SpecialCategory.objects.exclude(
categoryitem__id=c1.pk).order_by('name'),
[sc2, sc3], lambda x: x
)
self.assertQuerysetEqual(
SpecialCategory.objects.filter(categoryitem__id=c1.pk),
[sc1], lambda x: x
)
class Ticket22429Tests(TestCase):
def test_ticket_22429(self):
sc1 = School.objects.create()
st1 = Student.objects.create(school=sc1)
sc2 = School.objects.create()
st2 = Student.objects.create(school=sc2)
cr = Classroom.objects.create(school=sc1)
cr.students.add(st1)
queryset = Student.objects.filter(~Q(classroom__school=F('school')))
self.assertQuerysetEqual(queryset, [st2], lambda x: x)
|
TRex22/Sick-Beard
|
refs/heads/master
|
lib/hachoir_parser/misc/torrent.py
|
90
|
"""
.torrent metainfo file parser
http://wiki.theory.org/BitTorrentSpecification#Metainfo_File_Structure
Status: To statufy
Author: Christophe Gisquet <christophe.gisquet@free.fr>
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
String, RawBytes)
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.tools import makePrintable, timestampUNIX, humanFilesize
# Maximum number of bytes for string length
MAX_STRING_LENGTH = 6 # length in 0..999999
# Maximum number of bytes for integer value
MAX_INTEGER_SIZE = 21 # 21 decimal digits (or "-" sign and 20 digits)
class Integer(FieldSet):
# i<integer encoded in base ten ASCII>e
def createFields(self):
yield String(self, "start", 1, "Integer start delimiter (i)", charset="ASCII")
# Find integer end
addr = self.absolute_address+self.current_size
len = self.stream.searchBytesLength('e', False, addr, addr+(MAX_INTEGER_SIZE+1)*8)
if len is None:
raise ParserError("Torrent: Unable to find integer end delimiter (e)!")
if not len:
raise ParserError("Torrent: error, empty integer!")
yield String(self, "value", len, "Integer value", charset="ASCII")
yield String(self, "end", 1, "Integer end delimiter")
def createValue(self):
"""Read integer value (may raise ValueError)"""
return int(self["value"].value)
class TorrentString(FieldSet):
# <string length encoded in base ten ASCII>:<string data>
def createFields(self):
addr = self.absolute_address
len = self.stream.searchBytesLength(':', False, addr, addr+(MAX_STRING_LENGTH+1)*8)
if len is None:
raise ParserError("Torrent: unable to find string separator (':')")
if not len:
raise ParserError("Torrent: error: no string length!")
val = String(self, "length", len, "String length")
yield val
try:
len = int(val.value)
except ValueError:
len = -1
if len < 0:
raise ParserError("Invalid string length (%s)" % makePrintable(val.value, "ASCII", to_unicode=True))
yield String(self, "separator", 1, "String length/value separator")
if not len:
self.info("Empty string: len=%i" % len)
return
if len<512:
yield String(self, "value", len, "String value", charset="ISO-8859-1")
else:
# Probably raw data
yield RawBytes(self, "value", len, "Raw data")
def createValue(self):
if "value" in self:
field = self["value"]
if field.__class__ != RawBytes:
return field.value
else:
return None
else:
return None
class Dictionary(FieldSet):
# d<bencoded string><bencoded element>e
def createFields(self):
yield String(self, "start", 1, "Dictionary start delimiter (d)", charset="ASCII")
while self.stream.readBytes(self.absolute_address+self.current_size, 1) != "e":
yield DictionaryItem(self, "item[]")
yield String(self, "end", 1, "Dictionary end delimiter")
class List(FieldSet):
# l<bencoded values>e
def createFields(self):
yield String(self, "start", 1, "List start delimiter")
while self.stream.readBytes(self.absolute_address+self.current_size, 1) != "e":
yield Entry(self, "item[]")
yield String(self, "end", 1, "List end delimiter")
class DictionaryItem(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
# TODO: Remove this because it's not lazy?
key = self["key"]
if not key.hasValue():
return
key = key.value
self._name = str(key).replace(" ", "_")
def createDisplay(self):
if not self["value"].hasValue():
return None
if self._name in ("length", "piece_length"):
return humanFilesize(self.value)
return FieldSet.createDisplay(self)
def createValue(self):
if not self["value"].hasValue():
return None
if self._name == "creation_date":
return self.createTimestampValue()
else:
return self["value"].value
def createFields(self):
yield Entry(self, "key")
yield Entry(self, "value")
def createTimestampValue(self):
return timestampUNIX(self["value"].value)
# Map first chunk byte => type
TAGS = {'d': Dictionary, 'i': Integer, 'l': List}
for index in xrange(1, 9+1):
TAGS[str(index)] = TorrentString
# Create an entry
def Entry(parent, name):
addr = parent.absolute_address + parent.current_size
tag = parent.stream.readBytes(addr, 1)
if tag not in TAGS:
raise ParserError("Torrent: Entry of type %r not handled" % type)
cls = TAGS[tag]
return cls(parent, name)
class TorrentFile(Parser):
endian = LITTLE_ENDIAN
MAGIC = "d8:announce"
PARSER_TAGS = {
"id": "torrent",
"category": "misc",
"file_ext": ("torrent",),
"min_size": 50*8,
"mime": (u"application/x-bittorrent",),
"magic": ((MAGIC, 0),),
"description": "Torrent metainfo file"
}
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic"
return True
def createFields(self):
yield Dictionary(self, "root", size=self.size)
|
queria/my-tempest
|
refs/heads/juno
|
tempest/scenario/test_volume_boot_pattern.py
|
2
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = log.getLogger(__name__)
class TestVolumeBootPattern(manager.ScenarioTest):
"""
This test case attempts to reproduce the following steps:
* Create in Cinder some bootable volume importing a Glance image
* Boot an instance from the bootable volume
* Write content to the volume
* Delete an instance and Boot a new instance from the volume
* Check written content in the instance
* Create a volume snapshot while the instance is running
* Boot an additional instance from the new snapshot based volume
* Check written content in the instance booted from snapshot
"""
@classmethod
def resource_setup(cls):
super(TestVolumeBootPattern, cls).resource_setup()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
def _create_volume_from_image(self):
img_uuid = CONF.compute.image_ref
vol_name = data_utils.rand_name('volume-origin')
return self.create_volume(name=vol_name, imageRef=img_uuid)
def _boot_instance_from_volume(self, vol_id, keypair):
# NOTE(gfidente): the syntax for block_device_mapping is
# dev_name=id:type:size:delete_on_terminate
# where type needs to be "snap" if the server is booted
# from a snapshot, size instead can be safely left empty
bd_map = [{
'device_name': 'vda',
'volume_id': vol_id,
'delete_on_termination': '0'}]
self.security_group = self._create_security_group()
security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'block_device_mapping': bd_map,
'key_name': keypair['name'],
'security_groups': security_groups
}
return self.create_server(image='', create_kwargs=create_kwargs)
def _create_snapshot_from_volume(self, vol_id):
snap_name = data_utils.rand_name('snapshot')
_, snap = self.snapshots_client.create_snapshot(
volume_id=vol_id,
force=True,
display_name=snap_name)
self.addCleanup_with_wait(
waiter_callable=self.snapshots_client.wait_for_resource_deletion,
thing_id=snap['id'], thing_id_param='id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[self.snapshots_client.delete_snapshot, snap['id']])
self.snapshots_client.wait_for_snapshot_status(snap['id'], 'available')
self.assertEqual(snap_name, snap['display_name'])
return snap
def _create_volume_from_snapshot(self, snap_id):
vol_name = data_utils.rand_name('volume')
return self.create_volume(name=vol_name, snapshot_id=snap_id)
def _stop_instances(self, instances):
# NOTE(gfidente): two loops so we do not wait for the status twice
for i in instances:
self.servers_client.stop(i['id'])
for i in instances:
self.servers_client.wait_for_server_status(i['id'], 'SHUTOFF')
def _detach_volumes(self, volumes):
# NOTE(gfidente): two loops so we do not wait for the status twice
for v in volumes:
self.volumes_client.detach_volume(v['id'])
for v in volumes:
self.volumes_client.wait_for_volume_status(v['id'], 'available')
def _ssh_to_server(self, server, keypair):
if CONF.compute.use_floatingip_for_ssh:
_, floating_ip = self.floating_ips_client.create_floating_ip()
self.addCleanup(self.delete_wrapper,
self.floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], server['id'])
ip = floating_ip['ip']
else:
network_name_for_ssh = CONF.compute.network_for_ssh
ip = server.networks[network_name_for_ssh][0]
try:
return self.get_remote_client(
ip,
private_key=keypair['private_key'])
except Exception:
LOG.exception('ssh to server failed')
self._log_console_output(servers=[server])
raise
def _get_content(self, ssh_client):
return ssh_client.exec_command('cat /tmp/text')
def _write_text(self, ssh_client):
text = data_utils.rand_name('text-')
ssh_client.exec_command('echo "%s" > /tmp/text; sync' % (text))
return self._get_content(ssh_client)
def _delete_server(self, server):
self.servers_client.delete_server(server['id'])
self.servers_client.wait_for_server_termination(server['id'])
def _check_content_of_written_file(self, ssh_client, expected):
actual = self._get_content(ssh_client)
self.assertEqual(expected, actual)
@test.services('compute', 'volume', 'image')
def test_volume_boot_pattern(self):
keypair = self.create_keypair()
self.security_group = self._create_security_group()
# create an instance from volume
volume_origin = self._create_volume_from_image()
instance_1st = self._boot_instance_from_volume(volume_origin['id'],
keypair)
# write content to volume on instance
ssh_client_for_instance_1st = self._ssh_to_server(instance_1st,
keypair)
text = self._write_text(ssh_client_for_instance_1st)
# delete instance
self._delete_server(instance_1st)
# create a 2nd instance from volume
instance_2nd = self._boot_instance_from_volume(volume_origin['id'],
keypair)
# check the content of written file
ssh_client_for_instance_2nd = self._ssh_to_server(instance_2nd,
keypair)
self._check_content_of_written_file(ssh_client_for_instance_2nd, text)
# snapshot a volume
snapshot = self._create_snapshot_from_volume(volume_origin['id'])
# create a 3rd instance from snapshot
volume = self._create_volume_from_snapshot(snapshot['id'])
instance_from_snapshot = self._boot_instance_from_volume(volume['id'],
keypair)
# check the content of written file
ssh_client = self._ssh_to_server(instance_from_snapshot, keypair)
self._check_content_of_written_file(ssh_client, text)
# NOTE(gfidente): ensure resources are in clean state for
# deletion operations to succeed
self._stop_instances([instance_2nd, instance_from_snapshot])
self._detach_volumes([volume_origin, volume])
class TestVolumeBootPatternV2(TestVolumeBootPattern):
def _boot_instance_from_volume(self, vol_id, keypair):
bdms = [{'uuid': vol_id, 'source_type': 'volume',
'destination_type': 'volume', 'boot_index': 0,
'delete_on_termination': False}]
self.security_group = self._create_security_group()
security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'block_device_mapping_v2': bdms,
'key_name': keypair['name'],
'security_groups': security_groups
}
return self.create_server(image='', create_kwargs=create_kwargs)
|
yetship/blog_codes
|
refs/heads/master
|
python/basic/forbitpyc/flask-main.py
|
1
|
#!/usr/bin/env python
# encoding: utf-8
from flask import Flask
from a import A
app = Flask(__name__)
@app.route('/')
def index():
return A().a()
if __name__ == "__main__":
app.run()
|
thanhphat11/kernel-cm12.1-910
|
refs/heads/master
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
Kozea/Pynuts
|
refs/heads/master
|
tests/test_git.py
|
1
|
""" Test suite of the Git module. """
import os.path
import unittest
import shutil
import tempfile
import jinja2
from pynuts.git import (Git, ObjectTypeError, NotFoundError,
ConflictError)
from dulwich.repo import Repo
class TestGit(unittest.TestCase):
"""Test suite for the Git module"""
hello1_content = 'Hello, World!'
hello2_content = ('{% from "sub/name.jinja" import name %}'
'Hello {{ name() }}!')
name_content = '{% macro name() %}from Pynuts{% endmacro %}'
def setUp(self):
"""Create a temporary directory."""
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
"""Delete the temporary directory with its content."""
shutil.rmtree(self.tempdir)
def test_git(self):
"""Test the behaviour of the Git object."""
repo = Repo.init_bare(self.tempdir)
git = Git(repo, branch='master')
git2 = Git(repo, branch='master')
def get_hello():
"""Return the hello.jinja template."""
env = jinja2.Environment(loader=git.jinja_loader('templates'))
return env.get_template('hello.jinja')
self.assertRaises(jinja2.TemplateNotFound, get_hello)
self.assertRaises(ValueError, git.write, '', 'foo') # empty path
git.write('templates/hello.jinja', b'Hello, World!')
self.assertRaises(ObjectTypeError, git.write, 'templates', b'foo')
self.assertRaises(ObjectTypeError, git.write,
'templates/hello.jinja/foo', b'foo')
assert list(git.history()) == []
git.commit('Alice', 'alice@pynuts.org', 'First commit')
commit_1 = git.head.id
assert list(git.history()) == [commit_1]
self.assertRaises(ConflictError, git2.commit,
'Alice', 'alice@pynuts.org', '(not) First commit')
git.write('templates/hello.jinja',
b'{% from "sub/name.jinja" import name %}Hello {{ name() }}!')
git.write('templates/sub/name.jinja',
b'{% macro name() %}from Pynuts{% endmacro %}')
git.commit('Bob', 'bob@pynuts.org', 'Second commit')
commit_2 = git.head.id
assert commit_2 != commit_1
assert git.head.parents == [commit_1]
assert git.repository.refs[b'refs/heads/master'] == commit_2
assert list(git.history()) == [commit_2, commit_1]
# Make sure we read from the filesystem
git = Git(repo, branch='master', commit=commit_1)
self.assertRaises(ConflictError, git.commit,
'Bob', 'bob@pynuts.org', '(not) Second commit')
self.assertRaises(ValueError, git.read, '')
self.assertRaises(ValueError, git.read, '/')
self.assertRaises(ObjectTypeError, git.read, 'templates')
self.assertRaises(ObjectTypeError, git.read,
'templates/hello.jinja/foo')
self.assertRaises(NotFoundError, git.read, 'foo')
self.assertRaises(NotFoundError, git.read, 'foo/bar')
self.assertRaises(NotFoundError, git.read, 'templates/bar')
assert git.read('templates/hello.jinja') == b'Hello, World!'
assert git.read('/templates//hello.jinja') == b'Hello, World!'
template = get_hello()
assert template.filename.endswith(
'/<git commit %s>/templates/hello.jinja' % commit_1)
assert template.render() == 'Hello, World!'
git = Git(repo, branch='master')
assert git.head.id == commit_2
template = get_hello()
assert template.filename.endswith(
'/<git commit %s>/templates/hello.jinja' % commit_2)
assert template.render() == 'Hello from Pynuts!'
git = Git(repo, branch='inexistent')
git.tree = git.store_directory(os.path.join(self.tempdir, 'refs'))
assert git.read('heads/master').strip() == commit_2
|
tsufiev/horizon
|
refs/heads/master
|
openstack_dashboard/test/integration_tests/basewebobject.py
|
2
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import selenium.common.exceptions as Exceptions
import selenium.webdriver.support.ui as Support
from selenium.webdriver.support import wait
class BaseWebObject(object):
"""Base class for all web objects."""
def __init__(self, driver, conf):
self.driver = driver
self.conf = conf
self.explicit_wait = self.conf.selenium.explicit_wait
def _is_element_present(self, *locator):
try:
self._get_element(*locator)
return True
except Exceptions.NoSuchElementException:
return False
def _is_element_visible(self, *locator):
try:
return self._get_element(*locator).is_displayed()
except (Exceptions.NoSuchElementException,
Exceptions.ElementNotVisibleException):
return False
def _is_element_displayed(self, element):
try:
return element.is_displayed()
except Exception:
return False
def _is_text_visible(self, element, text):
try:
return element.text == text
except Exception:
return False
def _get_element(self, *locator):
return self.driver.find_element(*locator)
def _get_elements(self, *locator):
return self.driver.find_elements(*locator)
def _fill_field_element(self, data, field_element):
field_element.clear()
field_element.send_keys(data)
return field_element
def _select_dropdown(self, value, element):
select = Support.Select(element)
select.select_by_visible_text(value)
def _select_dropdown_by_value(self, value, element):
select = Support.Select(element)
select.select_by_value(value)
def _turn_off_implicit_wait(self):
self.driver.implicitly_wait(0)
def _turn_on_implicit_wait(self):
self.driver.implicitly_wait(self.conf.selenium.page_timeout)
def _wait_till_text_present_in_element(self, element, text, timeout=None):
if not timeout:
timeout = self.explicit_wait
wait.WebDriverWait(self.driver, timeout).until(
lambda x: self._is_text_visible(element, text))
def _wait_till_element_visible(self, element, timeout=None):
if not timeout:
timeout = self.explicit_wait
wait.WebDriverWait(self.driver, timeout).until(
lambda x: self._is_element_displayed(element))
def _wait_till_element_disappears(self, element, timeout=None):
if not timeout:
timeout = self.explicit_wait
wait.WebDriverWait(self.driver, timeout).until_not(
lambda x: self._is_element_displayed(element))
|
SiCKRAGETV/SickRage
|
refs/heads/master
|
sickrage/notifiers/emby.py
|
2
|
# Author: echel0n <echel0n@sickrage.ca>
# URL: https://sickrage.ca
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import json
import urllib
import urllib2
import sickrage
from sickrage.notifiers import Notifiers
class EMBYNotifier(Notifiers):
def __init__(self):
super(EMBYNotifier, self).__init__()
self.name = 'emby'
def notify_snatch(self, ep_name):
if sickrage.app.config.emby_notify_onsnatch:
self._notify_emby(self.notifyStrings[self.NOTIFY_SNATCH] + ': ' + ep_name)
def notify_download(self, ep_name):
if sickrage.app.config.emby_notify_ondownload:
self._notify_emby(self.notifyStrings[self.NOTIFY_DOWNLOAD] + ': ' + ep_name)
def notify_subtitle_download(self, ep_name, lang):
if sickrage.app.config.emby_notify_onsubtitledownload:
self._notify_emby(self.notifyStrings[self.NOTIFY_SUBTITLE_DOWNLOAD] + ' ' + ep_name + ": " + lang)
def notify_version_update(self, new_version="??"):
if sickrage.app.config.use_emby:
update_text = self.notifyStrings[self.NOTIFY_GIT_UPDATE_TEXT]
title = self.notifyStrings[self.NOTIFY_GIT_UPDATE]
self._notify_emby(title + " - " + update_text + new_version)
def _notify_emby(self, message, host=None, emby_apikey=None):
"""Handles notifying Emby host via HTTP API
Returns:
Returns True for no issue or False if there was an error
"""
# fill in omitted parameters
if not host:
host = sickrage.app.config.emby_host
if not emby_apikey:
emby_apikey = sickrage.app.config.emby_apikey
url = 'http://%s/emby/Notifications/Admin' % (host)
values = {'Name': 'SiCKRAGE', 'Description': message,
'ImageUrl': 'https://www.sickrage.ca/favicon.ico'}
data = json.dumps(values)
try:
req = urllib2.Request(url, data)
req.add_header('X-MediaBrowser-Token', emby_apikey)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req)
result = response.read()
response.close()
sickrage.app.log.debug('EMBY: HTTP response: ' + result.replace('\n', ''))
return True
except (urllib2.URLError, IOError) as e:
sickrage.app.log.warning('EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + e)
return False
def test_notify(self, host, emby_apikey):
return self._notify_emby('This is a test notification from SiCKRAGE', host, emby_apikey)
def mass_notify_login(self, ipaddress=""):
if sickrage.app.config.use_emby:
update_text = self.notifyStrings[self.NOTIFY_LOGIN_TEXT]
title = self.notifyStrings[self.NOTIFY_LOGIN]
self._notify_emby(title + " - " + update_text.format(ipaddress))
def update_library(self, show=None):
"""Handles updating the Emby Media Server host via HTTP API
Returns: True for no issue or False if there was an error
"""
if sickrage.app.config.use_emby:
if not sickrage.app.config.emby_host:
sickrage.app.log.debug('EMBY: No host specified, check your settings')
return False
if show:
if show.indexer == 1:
provider = 'tvdb'
elif show.indexer == 2:
sickrage.app.log.warning('EMBY: TVRage Provider no longer valid')
return False
else:
sickrage.app.log.warning('EMBY: Provider unknown')
return False
query = '?%sid=%s' % (provider, show.indexerid)
else:
query = ''
url = 'http://%s/emby/Library/Series/Updated%s' % (sickrage.app.config.emby_host, query)
values = {}
data = urllib.urlencode(values)
try:
req = urllib2.Request(url, data)
req.add_header('X-MediaBrowser-Token', sickrage.app.config.emby_apikey)
response = urllib2.urlopen(req)
result = response.read()
response.close()
sickrage.app.log.debug('EMBY: HTTP response: ' + result.replace('\n', ''))
return True
except (urllib2.URLError, IOError) as e:
sickrage.app.log.warning('EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + e)
return False
|
buchuki/pyjaco
|
refs/heads/devel
|
tests/functions/uplus.py
|
5
|
x = +7623
print x
|
kapilrastogi/Impala
|
refs/heads/cdh5-trunk
|
thirdparty/thrift-0.9.0/lib/py/src/transport/TTransport.py
|
105
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from cStringIO import StringIO
from struct import pack, unpack
from thrift.Thrift import TException
class TTransportException(TException):
"""Custom Transport Exception class"""
UNKNOWN = 0
NOT_OPEN = 1
ALREADY_OPEN = 2
TIMED_OUT = 3
END_OF_FILE = 4
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
class TTransportBase:
"""Base class for Thrift transport layer."""
def isOpen(self):
pass
def open(self):
pass
def close(self):
pass
def read(self, sz):
pass
def readAll(self, sz):
buff = ''
have = 0
while (have < sz):
chunk = self.read(sz - have)
have += len(chunk)
buff += chunk
if len(chunk) == 0:
raise EOFError()
return buff
def write(self, buf):
pass
def flush(self):
pass
# This class should be thought of as an interface.
class CReadableTransport:
"""base class for transports that are readable from C"""
# TODO(dreiss): Think about changing this interface to allow us to use
# a (Python, not c) StringIO instead, because it allows
# you to write after reading.
# NOTE: This is a classic class, so properties will NOT work
# correctly for setting.
@property
def cstringio_buf(self):
"""A cStringIO buffer that contains the current chunk we are reading."""
pass
def cstringio_refill(self, partialread, reqlen):
"""Refills cstringio_buf.
Returns the currently used buffer (which can but need not be the same as
the old cstringio_buf). partialread is what the C code has read from the
buffer, and should be inserted into the buffer before any more reads. The
return value must be a new, not borrowed reference. Something along the
lines of self._buf should be fine.
If reqlen bytes can't be read, throw EOFError.
"""
pass
class TServerTransportBase:
"""Base class for Thrift server transports."""
def listen(self):
pass
def accept(self):
pass
def close(self):
pass
class TTransportFactoryBase:
"""Base class for a Transport Factory"""
def getTransport(self, trans):
return trans
class TBufferedTransportFactory:
"""Factory transport that builds buffered transports"""
def getTransport(self, trans):
buffered = TBufferedTransport(trans)
return buffered
class TBufferedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and buffers its I/O.
The implementation uses a (configurable) fixed-size read buffer
but buffers all writes until a flush is performed.
"""
DEFAULT_BUFFER = 4096
def __init__(self, trans, rbuf_size=DEFAULT_BUFFER):
self.__trans = trans
self.__wbuf = StringIO()
self.__rbuf = StringIO("")
self.__rbuf_size = rbuf_size
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.__rbuf = StringIO(self.__trans.read(max(sz, self.__rbuf_size)))
return self.__rbuf.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
out = self.__wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = StringIO()
self.__trans.write(out)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, partialread, reqlen):
retstring = partialread
if reqlen < self.__rbuf_size:
# try to make a read of as much as we can.
retstring += self.__trans.read(self.__rbuf_size)
# but make sure we do read reqlen bytes.
if len(retstring) < reqlen:
retstring += self.__trans.readAll(reqlen - len(retstring))
self.__rbuf = StringIO(retstring)
return self.__rbuf
class TMemoryBuffer(TTransportBase, CReadableTransport):
"""Wraps a cStringIO object as a TTransport.
NOTE: Unlike the C++ version of this class, you cannot write to it
then immediately read from it. If you want to read from a
TMemoryBuffer, you must either pass a string to the constructor.
TODO(dreiss): Make this work like the C++ version.
"""
def __init__(self, value=None):
"""value -- a value to read from for stringio
If value is set, this will be a transport for reading,
otherwise, it is for writing"""
if value is not None:
self._buffer = StringIO(value)
else:
self._buffer = StringIO()
def isOpen(self):
return not self._buffer.closed
def open(self):
pass
def close(self):
self._buffer.close()
def read(self, sz):
return self._buffer.read(sz)
def write(self, buf):
self._buffer.write(buf)
def flush(self):
pass
def getvalue(self):
return self._buffer.getvalue()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self._buffer
def cstringio_refill(self, partialread, reqlen):
# only one shot at reading...
raise EOFError()
class TFramedTransportFactory:
"""Factory transport that builds framed transports"""
def getTransport(self, trans):
framed = TFramedTransport(trans)
return framed
class TFramedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and frames its I/O when writing."""
def __init__(self, trans,):
self.__trans = trans
self.__rbuf = StringIO()
self.__wbuf = StringIO()
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.readFrame()
return self.__rbuf.read(sz)
def readFrame(self):
buff = self.__trans.readAll(4)
sz, = unpack('!i', buff)
self.__rbuf = StringIO(self.__trans.readAll(sz))
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
wout = self.__wbuf.getvalue()
wsz = len(wout)
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = StringIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive copies
buf = pack("!i", wsz) + wout
self.__trans.write(buf)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, prefix, reqlen):
# self.__rbuf will already be empty here because fastbinary doesn't
# ask for a refill until the previous buffer is empty. Therefore,
# we can start reading new frames immediately.
while len(prefix) < reqlen:
self.readFrame()
prefix += self.__rbuf.getvalue()
self.__rbuf = StringIO(prefix)
return self.__rbuf
class TFileObjectTransport(TTransportBase):
"""Wraps a file-like object to make it work as a Thrift transport."""
def __init__(self, fileobj):
self.fileobj = fileobj
def isOpen(self):
return True
def close(self):
self.fileobj.close()
def read(self, sz):
return self.fileobj.read(sz)
def write(self, buf):
self.fileobj.write(buf)
def flush(self):
self.fileobj.flush()
|
Titulacion-Sistemas/PythonTitulacion-EV
|
refs/heads/master
|
Lib/site-packages/pylint/test/functional/invalid_slice_index.py
|
17
|
"""Errors for invalid slice indices"""
# pylint: disable=too-few-public-methods, no-self-use
TESTLIST = [1, 2, 3]
# Invalid indices
def function1():
"""functions used as indices"""
return TESTLIST[id:id:] # [invalid-slice-index,invalid-slice-index]
def function2():
"""strings used as indices"""
return TESTLIST['0':'1':] # [invalid-slice-index,invalid-slice-index]
def function3():
"""class without __index__ used as index"""
class NoIndexTest(object):
"""Class with no __index__ method"""
pass
return TESTLIST[NoIndexTest()::] # [invalid-slice-index]
# Valid indices
def function4():
"""integers used as indices"""
return TESTLIST[0:0:0] # no error
def function5():
"""None used as indices"""
return TESTLIST[None:None:None] # no error
def function6():
"""class with __index__ used as index"""
class IndexTest(object):
"""Class with __index__ method"""
def __index__(self):
"""Allow objects of this class to be used as slice indices"""
return 0
return TESTLIST[IndexTest():None:None] # no error
def function7():
"""class with __index__ in superclass used as index"""
class IndexType(object):
"""Class with __index__ method"""
def __index__(self):
"""Allow objects of this class to be used as slice indices"""
return 0
class IndexSubType(IndexType):
"""Class with __index__ in parent"""
pass
return TESTLIST[IndexSubType():None:None] # no error
def function8():
"""slice object used as index"""
return TESTLIST[slice(1, 2, 3)] # no error
|
elgambitero/FreeCAD_sf_master
|
refs/heads/master
|
src/Tools/fcbt.py
|
26
|
#!python
# FreeCAD Build Tool
# (c) 2004 Juergen Riegel
import os,sys,string
help1 = """
FreeCAD Build Tool
Usage:
fcbt <command name> [command parameter]
possible commands are:
- DistSrc (DS) Build a source Distr. of the current source tree
- DistBin (DB) Build a binary Distr. of the current source tree
- DistSetup (DI) Build a Setup Distr. of the current source tree
- DistSetup (DUI) Build a User Setup Distr. of the current source tree
- DistAll (DA) Run all three above modules
- NextBuildNumber (NBN) Increase the Build Number of this Version
- CreateModule (CM) Insert a new FreeCAD Module in the module directory
For help on the modules type:
fcbt <command name> ?
"""
if(len(sys.argv) < 2):
sys.stdout.write(help1)
sys.stdout.write("Insert command: ")
CmdRaw = sys.stdin.readline()[:-1]
else:
CmdRaw = sys.argv[1]
Cmd = string.lower(CmdRaw)
if Cmd == "distsrc" or Cmd == "ds" :
import fcbt.DistSrc
elif Cmd == "distbin" or Cmd == "db":
import fcbt.DistBin
elif Cmd == "distsetup" or Cmd == "di":
import fcbt.DistSetup
elif Cmd == "distsetup" or Cmd == "dui":
import fcbt.DistUserSetup
elif Cmd == "distall" or Cmd == "da":
import fcbt.DistSrc
import fcbt.DistBin
import fcbt.DistSetup
elif Cmd == "nextbuildnumber" or Cmd == "nbn":
import fcbt.NextBuildNumber
elif Cmd == "createmodule" or Cmd == "cm":
import fcbt.CreateModule
elif Cmd == "?" or Cmd == "help" or Cmd == "/h" or Cmd == "/?" or Cmd == "-h" or Cmd == "-help":
sys.stdout.write(help1)
else:
print CmdRaw + " is an unknown command!\n"
sys.exit(1)
|
Honry/crosswalk-test-suite
|
refs/heads/master
|
webapi/tct-csp-w3c-tests/csp-py/csp_ro_frame-src_self_allowed_int-manual.py
|
30
|
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "frame-src 'self'"
response.headers.set("Content-Security-Policy-Report-Only", _CSP)
response.headers.set("X-Content-Security-Policy-Report-Only", _CSP)
response.headers.set("X-WebKit-CSP-Report-Only", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_ro_frame-src_self_allowed_int</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#frame-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="frame-src 'self'"/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is a filled blue square.</p>
<iframe src="support/blue-100x100.png"/>
</body>
</html> """
|
heytcass/homeassistant-config
|
refs/heads/master
|
deps/cherrypy/test/checkerdemo.py
|
29
|
"""Demonstration app for cherrypy.checker.
This application is intentionally broken and badly designed.
To demonstrate the output of the CherryPy Checker, simply execute
this module.
"""
import os
import cherrypy
thisdir = os.path.dirname(os.path.abspath(__file__))
class Root:
pass
if __name__ == '__main__':
conf = {'/base': {'tools.staticdir.root': thisdir,
# Obsolete key.
'throw_errors': True,
},
# This entry should be OK.
'/base/static': {'tools.staticdir.on': True,
'tools.staticdir.dir': 'static'},
# Warn on missing folder.
'/base/js': {'tools.staticdir.on': True,
'tools.staticdir.dir': 'js'},
# Warn on dir with an abs path even though we provide root.
'/base/static2': {'tools.staticdir.on': True,
'tools.staticdir.dir': '/static'},
# Warn on dir with a relative path with no root.
'/static3': {'tools.staticdir.on': True,
'tools.staticdir.dir': 'static'},
# Warn on unknown namespace
'/unknown': {'toobles.gzip.on': True},
# Warn special on cherrypy.<known ns>.*
'/cpknown': {'cherrypy.tools.encode.on': True},
# Warn on mismatched types
'/conftype': {'request.show_tracebacks': 14},
# Warn on unknown tool.
'/web': {'tools.unknown.on': True},
# Warn on server.* in app config.
'/app1': {'server.socket_host': '0.0.0.0'},
# Warn on 'localhost'
'global': {'server.socket_host': 'localhost'},
# Warn on '[name]'
'[/extra_brackets]': {},
}
cherrypy.quickstart(Root(), config=conf)
|
Ronak6892/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/html5lib/html5lib/treewalkers/__init__.py
|
1229
|
"""A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
import sys
from ..utils import default_etree
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom"):
name = "%s.%s" % (__name__, treeType)
__import__(name)
mod = sys.modules[name]
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
from . import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "lxml":
from . import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
|
humrochagf/tapioca-discourse
|
refs/heads/master
|
tapioca_discourse/resource_mapping/upload.py
|
1
|
# -*- coding: utf-8 -*-
UPLOAD_MAPPING = {
'upload_file': {
'resource': 'uploads.json',
'docs': ('http://docs.discourse.org/#tag/'
'Upload%2Fpaths%2F~1uploads.json%2Fpost'),
'methods': ['POST'],
},
}
|
hopeall/odoo
|
refs/heads/8.0
|
addons/hw_scale/__openerp__.py
|
220
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Weighting Scale Hardware Driver',
'version': '1.0',
'category': 'Hardware Drivers',
'sequence': 6,
'summary': 'Hardware Driver for Weighting Scales',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Barcode Scanner Hardware Driver
================================
This module allows the point of sale to connect to a scale using a USB HSM Serial Scale Interface,
such as the Mettler Toledo Ariva.
""",
'author': 'OpenERP SA',
'depends': ['hw_proxy'],
'external_dependencies': {'python': ['serial']},
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
xin3liang/platform_external_chromium_org_tools_gyp
|
refs/heads/master
|
test/win/gyptest-link-update-manifest.py
|
226
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure binary is relinked when manifest settings are changed.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
import pywintypes
import win32api
import winerror
RT_MANIFEST = 24
class LoadLibrary(object):
"""Context manager for loading and releasing binaries in Windows.
Yields the handle of the binary loaded."""
def __init__(self, path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = win32api.LoadLibrary(self._path)
return self._handle
def __exit__(self, type, value, traceback):
win32api.FreeLibrary(self._handle)
def extract_manifest(path, resource_name):
"""Reads manifest from |path| and returns it as a string.
Returns None is there is no such manifest."""
with LoadLibrary(path) as handle:
try:
return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
except pywintypes.error as error:
if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
return None
else:
raise
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
gyp_template = '''
{
'targets': [
{
'target_name': 'test_update_manifest',
'type': 'executable',
'sources': ['hello.cc'],
'msvs_settings': {
'VCLinkerTool': {
'EnableUAC': 'true',
'UACExecutionLevel': '%(uac_execution_level)d',
},
'VCManifestTool': {
'EmbedManifest': 'true',
'AdditionalManifestFiles': '%(additional_manifest_files)s',
},
},
},
],
}
'''
gypfile = 'update-manifest.gyp'
def WriteAndUpdate(uac_execution_level, additional_manifest_files, do_build):
with open(os.path.join(CHDIR, gypfile), 'wb') as f:
f.write(gyp_template % {
'uac_execution_level': uac_execution_level,
'additional_manifest_files': additional_manifest_files,
})
test.run_gyp(gypfile, chdir=CHDIR)
if do_build:
test.build(gypfile, chdir=CHDIR)
exe_file = test.built_file_path('test_update_manifest.exe', chdir=CHDIR)
return extract_manifest(exe_file, 1)
manifest = WriteAndUpdate(0, '', True)
test.fail_test('asInvoker' not in manifest)
test.fail_test('35138b9a-5d96-4fbd-8e2d-a2440225f93a' in manifest)
# Make sure that updating .gyp and regenerating doesn't cause a rebuild.
WriteAndUpdate(0, '', False)
test.up_to_date(gypfile, test.ALL, chdir=CHDIR)
# But make sure that changing a manifest property does cause a relink.
manifest = WriteAndUpdate(2, '', True)
test.fail_test('requireAdministrator' not in manifest)
# Adding a manifest causes a rebuild.
manifest = WriteAndUpdate(2, 'extra.manifest', True)
test.fail_test('35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in manifest)
|
PopCap/GameIdea
|
refs/heads/master
|
Engine/Source/ThirdParty/HTML5/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/win32/test/test_win32inet.py
|
17
|
from win32inet import *
from win32inetcon import *
import winerror
from pywin32_testutil import str2bytes # py3k-friendly helper
import unittest
class CookieTests(unittest.TestCase):
def testCookies(self):
data = "TestData=Test"
InternetSetCookie("http://www.python.org", None, data)
got = InternetGetCookie("http://www.python.org", None)
self.assertEqual(got, data)
def testCookiesEmpty(self):
try:
InternetGetCookie("http://site-with-no-cookie.python.org", None)
self.fail("expected win32 exception")
except error, exc:
self.failUnlessEqual(exc.winerror, winerror.ERROR_NO_MORE_ITEMS)
class UrlTests(unittest.TestCase):
def testSimpleCanonicalize(self):
ret = InternetCanonicalizeUrl("foo bar")
self.assertEqual(ret, "foo%20bar")
def testLongCanonicalize(self):
# a 4k URL causes the underlying API to request a bigger buffer"
big = "x" * 2048
ret = InternetCanonicalizeUrl(big + " " + big)
self.assertEqual(ret, big + "%20" + big)
class TestNetwork(unittest.TestCase):
def setUp(self):
self.hi = InternetOpen("test", INTERNET_OPEN_TYPE_DIRECT, None, None, 0)
def tearDown(self):
self.hi.Close()
def testPythonDotOrg(self):
hdl = InternetOpenUrl(self.hi, "http://www.python.org", None,
INTERNET_FLAG_EXISTING_CONNECT)
chunks = []
while 1:
chunk = InternetReadFile(hdl, 1024)
if not chunk:
break
chunks.append(chunk)
data = str2bytes('').join(chunks)
assert data.find(str2bytes("Python"))>0, repr(data) # This must appear somewhere on the main page!
def testFtpCommand(self):
# ftp.python.org doesn't exist. ftp.gnu.org is what Python's urllib
# test code uses.
hcon = InternetConnect(self.hi, "ftp.gnu.org", INTERNET_INVALID_PORT_NUMBER,
None, None, # username/password
INTERNET_SERVICE_FTP, 0, 0)
try:
try:
hftp = FtpCommand(hcon, True, FTP_TRANSFER_TYPE_ASCII, 'NLST', 0)
except error:
print "Error info is", InternetGetLastResponseInfo()
InternetReadFile(hftp, 2048)
hftp.Close()
finally:
hcon.Close()
if __name__=='__main__':
unittest.main()
|
mkraemer67/pylearn2
|
refs/heads/master
|
pylearn2/utils/utlc.py
|
49
|
"""Several utilities for experimenting upon utlc datasets"""
# Standard library imports
import logging
import os
import inspect
import zipfile
from tempfile import TemporaryFile
# Third-party imports
import numpy
import theano
from pylearn2.datasets.utlc import load_ndarray_dataset, load_sparse_dataset
from pylearn2.utils import subdict, sharedX
logger = logging.getLogger(__name__)
##################################################
# Shortcuts and auxiliary functions
##################################################
def getboth(dict1, dict2, key, default=None):
"""
Try to retrieve key from dict1 if exists, otherwise try with dict2.
If the key is not found in any of them, raise an exception.
Parameters
----------
dict1 : dict
WRITEME
dict2 : dict
WRITEME
key : WRITEME
default : WRITEME
Returns
-------
WRITEME
"""
try:
return dict1[key]
except KeyError:
if default is None:
return dict2[key]
else:
return dict2.get(key, default)
##################################################
# Datasets loading and contest facilities
##################################################
def load_data(conf):
"""
Loads a specified dataset according to the parameters in the dictionary
Parameters
----------
conf : WRITEME
Returns
-------
WRITEME
"""
logger.info('... loading dataset')
# Special case for sparse format
if conf.get('sparse', False):
expected = inspect.getargspec(load_sparse_dataset)[0][1:]
data = load_sparse_dataset(conf['dataset'], **subdict(conf, expected))
valid, test = data[1:3]
# Sparse TERRY data on LISA servers contains an extra null first row in
# valid and test subsets.
if conf['dataset'] == 'terry':
valid = valid[1:]
test = test[1:]
assert valid.shape[0] == test.shape[0] == 4096, \
'Sparse TERRY data loaded has wrong number of examples'
if len(data) == 3:
return [data[0], valid, test]
else:
return [data[0], valid, test, data[3]]
# Load as the usual ndarray
expected = inspect.getargspec(load_ndarray_dataset)[0][1:]
data = load_ndarray_dataset(conf['dataset'], **subdict(conf, expected))
# Special case for on-the-fly normalization
if conf.get('normalize_on_the_fly', False):
return data
# Allocate shared variables
def shared_dataset(data_x):
"""Function that loads the dataset into shared variables"""
if conf.get('normalize', True):
return sharedX(data_x, borrow=True)
else:
return theano.shared(theano._asarray(data_x), borrow=True)
return map(shared_dataset, data)
def save_submission(conf, valid_repr, test_repr):
"""
Create a submission file given a configuration dictionary and a
representation for valid and test.
Parameters
----------
conf : WRITEME
valid_repr : WRITEME
test_repr : WRITEME
"""
logger.info('... creating zipfile')
# Ensure the given directory is correct
submit_dir = conf['savedir']
if not os.path.exists(submit_dir):
os.makedirs(submit_dir)
elif not os.path.isdir(submit_dir):
raise IOError('savedir %s is not a directory' % submit_dir)
basename = os.path.join(submit_dir, conf['dataset'] + '_' + conf['expname'])
# If there are too much features, outputs kernel matrices
if (valid_repr.shape[1] > valid_repr.shape[0]):
valid_repr = numpy.dot(valid_repr, valid_repr.T)
test_repr = numpy.dot(test_repr, test_repr.T)
# Quantitize data
valid_repr = numpy.floor((valid_repr / valid_repr.max())*999)
test_repr = numpy.floor((test_repr / test_repr.max())*999)
# Store the representations in two temporary files
valid_file = TemporaryFile()
test_file = TemporaryFile()
numpy.savetxt(valid_file, valid_repr, fmt="%.3f")
numpy.savetxt(test_file, test_repr, fmt="%.3f")
# Reread those files and put them together in a .zip
valid_file.seek(0)
test_file.seek(0)
submission = zipfile.ZipFile(basename + ".zip", "w",
compression=zipfile.ZIP_DEFLATED)
submission.writestr(basename + '_valid.prepro', valid_file.read())
submission.writestr(basename + '_final.prepro', test_file.read())
submission.close()
valid_file.close()
test_file.close()
def create_submission(conf, transform_valid, transform_test=None, features=None):
"""
Create a submission file given a configuration dictionary and a
computation function.
Note that it always reload the datasets to ensure valid & test
are not permuted.
Parameters
----------
conf : WRITEME
transform_valid : WRITEME
transform_test : WRITEME
features : WRITEME
"""
if transform_test is None:
transform_test = transform_valid
# Load the dataset, without permuting valid and test
kwargs = subdict(conf, ['dataset', 'normalize', 'normalize_on_the_fly', 'sparse'])
kwargs.update(randomize_valid=False, randomize_test=False)
valid_set, test_set = load_data(kwargs)[1:3]
# Sparse datasets are not stored as Theano shared vars.
if not conf.get('sparse', False):
valid_set = valid_set.get_value(borrow=True)
test_set = test_set.get_value(borrow=True)
# Prefilter features, if needed.
if features is not None:
valid_set = valid_set[:, features]
test_set = test_set[:, features]
# Valid and test representations
valid_repr = transform_valid(valid_set)
test_repr = transform_test(test_set)
# Convert into text info
save_submission(conf, valid_repr, test_repr)
##################################################
# Proxies for representation evaluations
##################################################
def compute_alc(valid_repr, test_repr):
"""
Returns the ALC of the valid set VS test set
Note: This proxy won't work in the case of transductive learning
(This is an assumption) but it seems to be a good proxy in the
normal case (i.e only train on training set)
Parameters
----------
valid_repr : WRITEME
test_repr : WRITEME
Returns
-------
WRITEME
"""
# Concatenate the sets, and give different one hot labels for valid and test
n_valid = valid_repr.shape[0]
n_test = test_repr.shape[0]
_labvalid = numpy.hstack((numpy.ones((n_valid, 1)),
numpy.zeros((n_valid, 1))))
_labtest = numpy.hstack((numpy.zeros((n_test, 1)),
numpy.ones((n_test, 1))))
dataset = numpy.vstack((valid_repr, test_repr))
label = numpy.vstack((_labvalid, _labtest))
logger.info('... computing the ALC')
raise NotImplementedError("This got broken by embed no longer being "
"where it used to be (if it even still exists, I haven't "
"looked for it)")
# return embed.score(dataset, label)
def lookup_alc(data, transform):
"""
.. todo::
WRITEME
"""
valid_repr = transform(data[1].get_value(borrow=True))
test_repr = transform(data[2].get_value(borrow=True))
return compute_alc(valid_repr, test_repr)
|
highweb-project/highweb-webcl-html5spec
|
refs/heads/highweb-20160310
|
tools/bisect_test.py
|
166
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
bisect_builds = __import__('bisect-builds')
class BisectTest(unittest.TestCase):
patched = []
max_rev = 10000
def monkey_patch(self, obj, name, new):
self.patched.append((obj, name, getattr(obj, name)))
setattr(obj, name, new)
def clear_patching(self):
for obj, name, old in self.patched:
setattr(obj, name, old)
self.patched = []
def setUp(self):
self.monkey_patch(bisect_builds.DownloadJob, 'Start', lambda *args: None)
self.monkey_patch(bisect_builds.DownloadJob, 'Stop', lambda *args: None)
self.monkey_patch(bisect_builds.DownloadJob, 'WaitFor', lambda *args: None)
self.monkey_patch(bisect_builds, 'RunRevision', lambda *args: (0, "", ""))
self.monkey_patch(bisect_builds.PathContext, 'ParseDirectoryIndex',
lambda *args: range(self.max_rev))
def tearDown(self):
self.clear_patching()
def bisect(self, good_rev, bad_rev, evaluate):
return bisect_builds.Bisect(good_rev=good_rev,
bad_rev=bad_rev,
evaluate=evaluate,
num_runs=1,
official_builds=False,
platform='linux',
profile=None,
try_args=())
def testBisectConsistentAnswer(self):
self.assertEqual(self.bisect(1000, 100, lambda *args: 'g'), (100, 101))
self.assertEqual(self.bisect(100, 1000, lambda *args: 'b'), (100, 101))
self.assertEqual(self.bisect(2000, 200, lambda *args: 'b'), (1999, 2000))
self.assertEqual(self.bisect(200, 2000, lambda *args: 'g'), (1999, 2000))
if __name__ == '__main__':
unittest.main()
|
hcsturix74/django
|
refs/heads/master
|
django/contrib/auth/migrations/0002_alter_permission_name_max_length.py
|
586
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='permission',
name='name',
field=models.CharField(max_length=255, verbose_name='name'),
),
]
|
alexproca/askbot-devel
|
refs/heads/master
|
askbot/utils/console.py
|
9
|
"""functions that directly handle user input
"""
import sys
import time
import logging
from askbot.utils import path
def start_printing_db_queries():
"""starts logging database queries into console,
should be used for debugging only"""
logger = logging.getLogger('django.db.backends')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
def choice_dialog(prompt_phrase, choices = None, invalid_phrase = None):
"""prints a prompt, accepts keyboard input
and makes sure that user response is one of given
in the choices argument, which is required
and must be a list
invalid_phrase must be a string with %(opt_string)s
placeholder
"""
assert(hasattr(choices, '__iter__'))
assert(not isinstance(choices, basestring))
while 1:
response = raw_input(
'\n%s\ntype %s: ' % (prompt_phrase, '/'.join(choices))
)
if response in choices:
return response
elif invalid_phrase != None:
opt_string = ','.join(choices)
print invalid_phrase % {'opt_string': opt_string}
time.sleep(1)
def numeric_choice_dialog(prompt_phrase, choices):
"""Prints a list of choices with numeric options and requires the
user to select a single choice from the list.
:param prompt_phrase: (str) Prompt to give the user asking them to
choose from the list.
:param choices: (list) List of string choices for the user to choose
from. The numeric value they will use to select from the list is the
list index of the choice.
:returns: (int) index number of the choice selected by the user
"""
assert(hasattr(choices, '__iter__'))
assert(not isinstance(choices, basestring))
choice_menu = "\n".join(["%d - %s" % (i,x) for i, x in enumerate(choices)])
while True:
response = raw_input('\n%s\n%s> ' % (choice_menu, prompt_phrase))
try:
index = int(response)
except ValueError:
index = False
if index is False or index < 0 or index >= len(choices):
print "\n*** Please enter a number between 0 and %d ***" % (len(choices)-1)
else:
return index
def numeric_multiple_choice_dialog(prompt_phrase, choices, all_option=False):
"""Prints a list of choices with numeric options and requires the
user to select zero or more choices from the list.
:param prompt_phrase: (str) Prompt to give the user asking them to
choose from the list.
:param choices: (list) List of string choices for the user to choose
from. The numeric value they will use to select from the list is the
list index of the choice.
:param all_option: (bool) Optional. If True, the first choice will be a
fake option to choose all options. This is a convenience to avoid requiring
the user provide a lot of input when there are a lot of options
:returns: (list) list of index numbers of the choices selected by
the user
"""
assert(hasattr(choices, '__iter__'))
assert(not isinstance(choices, basestring))
if all_option:
choices.insert(0, 'ALL')
choice_menu = "\n".join(["%d - %s" % (i,x) for i, x in enumerate(choices)])
choice_indexes = []
index = False
while True:
response = raw_input('\n%s\n%s> ' % (choice_menu, prompt_phrase))
selections = response.split()
print "selections: %s" % selections
for c in selections:
try:
index = int(c)
except ValueError:
index = False
if index < 0 or index >= len(choices):
index = False
print "\n*** Please enter only numbers between 0 and " +\
"%d separated by spaces ***" % (len(choices)-1)
break
else:
choice_indexes.append(index)
if index:
if all_option and 0 in choice_indexes and len(choice_indexes) > 1:
print "\n*** You cannot include other choices with the ALL " +\
"option ***"
else:
return choice_indexes
def simple_dialog(prompt_phrase, required=False):
"""asks user to enter a string, if `required` is True,
will repeat question until non-empty input is given
"""
while 1:
if required:
prompt_phrase += ' (required)'
response = raw_input(prompt_phrase + '\n> ').strip()
if response or required is False:
return response
time.sleep(1)
def get_yes_or_no(prompt_phrase, default=None):
"""Prompts user for a yes or no response with an optional default
value which will be inferred if the user just hits enter
:param prompt_phrase: (str) Question to prompt the user with
:param default: (str) Either 'yes' or 'no'. If a valid option is
provided, the user can simply press enter to accept the default.
If an invalid option is passed in, a `ValueError` is raised.
:returns: (str) 'yes' or 'no'
"""
while True:
prompt_phrase += ' (yes/no)'
if default:
prompt_phrase += '\n[%s] >' % default
else:
prompt_phrase += '\n >'
response = raw_input(prompt_phrase).strip()
if not response and default:
return default
if response in ('yes', 'no'):
return response
def open_new_file(prompt_phrase, extension = '', hint = None):
"""will ask for a file name to be typed
by user into the console path to the file can be
either relative or absolute. Extension will be appended
to the given file name.
Return value is the file object.
"""
if extension != '':
if extension[0] != '.':
extension = '.' + extension
else:
extension = ''
file_object = None
if hint:
file_path = path.extend_file_name(hint, extension)
file_object = path.create_file_if_does_not_exist(file_path, print_warning = True)
while file_object == None:
file_path = raw_input(prompt_phrase)
file_path = path.extend_file_name(file_path, extension)
file_object = path.create_file_if_does_not_exist(file_path, print_warning = True)
return file_object
def print_action(action_text, nowipe = False):
"""print the string to the standard output
then wipe it out to clear space
"""
#for some reason sys.stdout.write does not work here
#when action text is unicode
print action_text,
sys.stdout.flush()
if nowipe == False:
#return to the beginning of the word
sys.stdout.write('\b' * len(action_text))
#white out the printed text
sys.stdout.write(' ' * len(action_text))
#return again
sys.stdout.write('\b' * len(action_text))
else:
sys.stdout.write('\n')
def print_progress(elapsed, total, nowipe = False):
"""print dynamic output of progress of some
operation, in percent, to the console and clear the output with
a backspace character to have the number increment
in-place"""
output = '%6.2f%%' % (100 * float(elapsed)/float(total))
print_action(output, nowipe)
class ProgressBar(object):
"""A wrapper for an iterator, that prints
a progress bar along the way of iteration
"""
def __init__(self, iterable, length, message = ''):
self.iterable = iterable
self.length = length
self.counter = float(0)
self.max_barlen = 60
self.curr_barlen = 0
self.progress = ''
if message and length > 0:
print message
def __iter__(self):
return self
def print_progress_bar(self):
"""prints the progress bar"""
self.backspace_progress_percent()
tics_to_write = 0
if self.length < self.max_barlen:
tics_to_write = self.max_barlen/self.length
elif int(self.counter) % (self.length/self.max_barlen) == 0:
tics_to_write = 1
if self.curr_barlen + tics_to_write <= self.max_barlen:
sys.stdout.write('-' * tics_to_write)
self.curr_barlen += tics_to_write
self.print_progress_percent()
def backspace_progress_percent(self):
sys.stdout.write('\b'*len(self.progress))
def print_progress_percent(self):
"""prints percent of achieved progress"""
self.progress = ' %.2f%%' % (100 * (self.counter/self.length))
sys.stdout.write(self.progress)
sys.stdout.flush()
def finish_progress_bar(self):
"""brint the last bars, to make all bars equal length"""
self.backspace_progress_percent()
sys.stdout.write('-' * (self.max_barlen - self.curr_barlen))
def next(self):
try:
result = self.iterable.next()
except StopIteration:
if self.length > 0:
self.finish_progress_bar()
self.print_progress_percent()
sys.stdout.write('\n')
raise
self.print_progress_bar()
self.counter += 1
return result
|
feliperfranca/django-nonrel-example
|
refs/heads/master
|
django/contrib/localflavor/pe/forms.py
|
309
|
# -*- coding: utf-8 -*-
"""
PE-specific Form helpers.
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, CharField, Select
from django.utils.translation import ugettext_lazy as _
class PERegionSelect(Select):
"""
A Select widget that uses a list of Peruvian Regions as its choices.
"""
def __init__(self, attrs=None):
from pe_region import REGION_CHOICES
super(PERegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class PEDNIField(CharField):
"""
A field that validates `Documento Nacional de IdentidadŽ (DNI) numbers.
"""
default_error_messages = {
'invalid': _("This field requires only numbers."),
'max_digits': _("This field requires 8 digits."),
}
def __init__(self, *args, **kwargs):
super(PEDNIField, self).__init__(max_length=8, min_length=8, *args,
**kwargs)
def clean(self, value):
"""
Value must be a string in the XXXXXXXX formats.
"""
value = super(PEDNIField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not value.isdigit():
raise ValidationError(self.error_messages['invalid'])
if len(value) != 8:
raise ValidationError(self.error_messages['max_digits'])
return value
class PERUCField(RegexField):
"""
This field validates a RUC (Registro Unico de Contribuyentes). A RUC is of
the form XXXXXXXXXXX.
"""
default_error_messages = {
'invalid': _("This field requires only numbers."),
'max_digits': _("This field requires 11 digits."),
}
def __init__(self, *args, **kwargs):
super(PERUCField, self).__init__(max_length=11, min_length=11, *args,
**kwargs)
def clean(self, value):
"""
Value must be an 11-digit number.
"""
value = super(PERUCField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not value.isdigit():
raise ValidationError(self.error_messages['invalid'])
if len(value) != 11:
raise ValidationError(self.error_messages['max_digits'])
return value
|
3cky/kubernetes
|
refs/heads/master
|
cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py
|
202
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main hook file is called by Juju.
"""
import contextlib
import os
import socket
import subprocess
import sys
from charmhelpers.core import hookenv, host
from kubernetes_installer import KubernetesInstaller
from path import path
hooks = hookenv.Hooks()
@contextlib.contextmanager
def check_sentinel(filepath):
"""
A context manager method to write a file while the code block is doing
something and remove the file when done.
"""
fail = False
try:
yield filepath.exists()
except:
fail = True
filepath.touch()
raise
finally:
if fail is False and filepath.exists():
filepath.remove()
@hooks.hook('config-changed')
def config_changed():
"""
On the execution of the juju event 'config-changed' this function
determines the appropriate architecture and the configured version to
create kubernetes binary files.
"""
hookenv.log('Starting config-changed')
charm_dir = path(hookenv.charm_dir())
config = hookenv.config()
# Get the version of kubernetes to install.
version = config['version']
if version == 'master':
# The 'master' branch of kuberentes is used when master is configured.
branch = 'master'
elif version == 'local':
# Check for kubernetes binaries in the local files/output directory.
branch = None
else:
# Create a branch to a tag to get the release version.
branch = 'tags/{0}'.format(version)
# Get the package architecture, rather than arch from the kernel (uname -m).
arch = subprocess.check_output(['dpkg', '--print-architecture']).strip()
if not branch:
output_path = charm_dir / 'files/output'
installer = KubernetesInstaller(arch, version, output_path)
else:
# Build the kuberentes binaries from source on the units.
kubernetes_dir = path('/opt/kubernetes')
# Construct the path to the binaries using the arch.
output_path = kubernetes_dir / '_output/local/bin/linux' / arch
installer = KubernetesInstaller(arch, version, output_path)
if not kubernetes_dir.exists():
print('The source directory {0} does not exist'.format(kubernetes_dir))
print('Was the kubernetes code cloned during install?')
exit(1)
# Change to the kubernetes directory (git repository).
with kubernetes_dir:
# Create a command to get the current branch.
git_branch = 'git branch | grep "\*" | cut -d" " -f2'
current_branch = subprocess.check_output(git_branch, shell=True).strip()
print('Current branch: ', current_branch)
# Create the path to a file to indicate if the build was broken.
broken_build = charm_dir / '.broken_build'
# write out the .broken_build file while this block is executing.
with check_sentinel(broken_build) as last_build_failed:
print('Last build failed: ', last_build_failed)
# Rebuild if current version is different or last build failed.
if current_branch != version or last_build_failed:
installer.build(branch)
if not output_path.isdir():
broken_build.touch()
# Create the symoblic links to the right directories.
installer.install()
relation_changed()
hookenv.log('The config-changed hook completed successfully.')
@hooks.hook('etcd-relation-changed', 'minions-api-relation-changed')
def relation_changed():
template_data = get_template_data()
# Check required keys
for k in ('etcd_servers',):
if not template_data.get(k):
print "Missing data for", k, template_data
return
print "Running with\n", template_data
# Render and restart as needed
for n in ('apiserver', 'controller-manager', 'scheduler'):
if render_file(n, template_data) or not host.service_running(n):
host.service_restart(n)
# Render the file that makes the kubernetes binaries available to minions.
if render_file(
'distribution', template_data,
'conf.tmpl', '/etc/nginx/sites-enabled/distribution') or \
not host.service_running('nginx'):
host.service_reload('nginx')
# Render the default nginx template.
if render_file(
'nginx', template_data,
'conf.tmpl', '/etc/nginx/sites-enabled/default') or \
not host.service_running('nginx'):
host.service_reload('nginx')
# Send api endpoint to minions
notify_minions()
@hooks.hook('network-relation-changed')
def network_relation_changed():
relation_id = hookenv.relation_id()
hookenv.relation_set(relation_id, ignore_errors=True)
def notify_minions():
print("Notify minions.")
config = hookenv.config()
for r in hookenv.relation_ids('minions-api'):
hookenv.relation_set(
r,
hostname=hookenv.unit_private_ip(),
port=8080,
version=config['version'])
print("Notified minions of version " + config['version'])
def get_template_data():
rels = hookenv.relations()
config = hookenv.config()
version = config['version']
template_data = {}
template_data['etcd_servers'] = ",".join([
"http://%s:%s" % (s[0], s[1]) for s in sorted(
get_rel_hosts('etcd', rels, ('hostname', 'port')))])
template_data['minions'] = ",".join(get_rel_hosts('minions-api', rels))
template_data['api_bind_address'] = _bind_addr(hookenv.unit_private_ip())
template_data['bind_address'] = "127.0.0.1"
template_data['api_server_address'] = "http://%s:%s" % (
hookenv.unit_private_ip(), 8080)
arch = subprocess.check_output(['dpkg', '--print-architecture']).strip()
template_data['web_uri'] = "/kubernetes/%s/local/bin/linux/%s/" % (version,
arch)
if version == 'local':
template_data['alias'] = hookenv.charm_dir() + '/files/output/'
else:
directory = '/opt/kubernetes/_output/local/bin/linux/%s/' % arch
template_data['alias'] = directory
_encode(template_data)
return template_data
def _bind_addr(addr):
if addr.replace('.', '').isdigit():
return addr
try:
return socket.gethostbyname(addr)
except socket.error:
raise ValueError("Could not resolve private address")
def _encode(d):
for k, v in d.items():
if isinstance(v, unicode):
d[k] = v.encode('utf8')
def get_rel_hosts(rel_name, rels, keys=('private-address',)):
hosts = []
for r, data in rels.get(rel_name, {}).items():
for unit_id, unit_data in data.items():
if unit_id == hookenv.local_unit():
continue
values = [unit_data.get(k) for k in keys]
if not all(values):
continue
hosts.append(len(values) == 1 and values[0] or values)
return hosts
def render_file(name, data, src_suffix="upstart.tmpl", tgt_path=None):
tmpl_path = os.path.join(
os.environ.get('CHARM_DIR'), 'files', '%s.%s' % (name, src_suffix))
with open(tmpl_path) as fh:
tmpl = fh.read()
rendered = tmpl % data
if tgt_path is None:
tgt_path = '/etc/init/%s.conf' % name
if os.path.exists(tgt_path):
with open(tgt_path) as fh:
contents = fh.read()
if contents == rendered:
return False
with open(tgt_path, 'w') as fh:
fh.write(rendered)
return True
if __name__ == '__main__':
hooks.execute(sys.argv)
|
mitchrule/Miscellaneous
|
refs/heads/master
|
Django_Project/django/Lib/_weakrefset.py
|
162
|
# Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard(object):
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet(object):
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
# Caveat: the iterator will keep a strong reference to
# `item` until it is resumed or closed.
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
__hash__ = None
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def difference(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = difference
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def __ne__(self, other):
opposite = self.__eq__(other)
if opposite is NotImplemented:
return NotImplemented
return not opposite
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
|
simonlynen/yaml-cpp.new-api
|
refs/heads/master
|
test/gmock-1.7.0/gtest/scripts/fuse_gtest_files.py
|
2577
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
|
seocam/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/unspecified_app_with_conflict/migrations/0001_initial.py
|
2995
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
"Tribble",
[
("id", models.AutoField(primary_key=True)),
("fluffy", models.BooleanField(default=True)),
],
)
]
|
ltilve/ChromiumGStreamerBackend
|
refs/heads/master
|
build/android/pylib/device/logcat_monitor.py
|
24
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=unused-wildcard-import
# pylint: disable=wildcard-import
from devil.android.logcat_monitor import *
|
bsipocz/seaborn
|
refs/heads/master
|
examples/paired_pointplots.py
|
26
|
"""
Paired discrete plots
=====================
"""
import seaborn as sns
sns.set(style="whitegrid")
# Load the example Titanic dataset
titanic = sns.load_dataset("titanic")
# Set up a grid to plot survival probability against several variables
g = sns.PairGrid(titanic, y_vars="survived",
x_vars=["class", "sex", "who", "alone"],
size=5, aspect=.5)
# Draw a seaborn pointplot onto each Axes
g.map(sns.pointplot, color=sns.xkcd_rgb["plum"])
g.set(ylim=(0, 1))
sns.despine(fig=g.fig, left=True)
|
thegapnetball/hello
|
refs/heads/master
|
guestbook.py
|
1
|
import jinja2
import os
import webapp2
from google.appengine.api import users
from google.appengine.ext import ndb
# We set a parent key on the 'Greetings' to ensure that they are all in the same
# entity group. Queries across the single entity group will be consistent.
# However, the write rate should be limited to ~1/second.
def guestbook_key(guestbook_name='default_guestbook'):
return ndb.Key('Guestbook', guestbook_name)
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class Greeting(ndb.Model):
author = ndb.UserProperty()
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
class MainPage(webapp2.RequestHandler):
def get(self):
greetings_query = Greeting.query(ancestor=guestbook_key()).order(-Greeting.date)
greetings = greetings_query.fetch(10)
if users.get_current_user():
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Logon'
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(greetings=greetings,
url=url,
url_linktext=url_linktext))
class Guestbook(webapp2.RequestHandler):
def post(self):
greeting = Greeting(parent=guestbook_key())
if users.get_current_user():
greeting.author = users.get_current_user()
greeting.content = self.request.get('content')
greeting.put()
self.redirect('/')
application = webapp2.WSGIApplication([
('/', MainPage),
('/sign', Guestbook),
], debug=True)
|
jdmonaco/knlx
|
refs/heads/master
|
lib/knlx.py
|
1
|
# encoding: utf-8
"""
knlx.py -- Library of functions for reading Neuralynx files for events (.Nev),
position tracking (Pos.p), and continuous records (.Ncs)
Requires: numpy, bitstring
Copyright (c) 2011-2013 Johns Hopkins University. All rights reserved.
This software is provided AS IS under the terms of the Open Source MIT License.
See http://www.opensource.org/licenses/mit-license.php.
"""
import os
import sys
import numpy as np
import numpy.core.records as rec
try:
import bitstring as bstr
except ImportError:
sys.stderr.write('scanr: bitstring must be installed to read Neuralynx files\n')
# Constants
DEBUG = True
NCS_SAMPLE_RATE = 1001.0
# File parameters
HDR_SIZE = 8 * 16384
EVENT_STR_LEN = 8 * 128
NCS_DATA_POINTS = 512
# Record descriptions
NCS_RECORD = dict( timestamp = 'uintle:64',
channel = 'uintle:32',
Fs = 'uintle:32',
valid = 'uintle:32',
samples = 'intle:16' )
POS_RECORD = dict( timestamp = 'floatle:64',
pos = 'floatle:32' )
NEV_RECORD = dict( byte = 'intle:16',
timestamp = 'uintle:64',
extra = 'intle:32' )
def read_event_file(fn):
"""Load event record data from Neuralynx .Nev file fn
Returns (timestamp, event_string) tuple where timestamp is an array of
timestamp values and event_string is a corresponding list of event strings.
"""
if fn is None or not (os.path.exists(fn) and fn.endswith('.Nev')):
raise ValueError, 'invalid Nev file specified: %s' % fn
out = sys.stdout.write
out('Loading %s...\n' % fn)
bits = bstr.ConstBitStream(filename=fn)
bits.read(HDR_SIZE) # skip 16k header
timestamp_list = []
string_list = []
while True:
try:
bits.read(NEV_RECORD['byte']) # nstx
bits.read(NEV_RECORD['byte']) # npkt_id
bits.read(NEV_RECORD['byte']) # npkt_data_size
timestamp_list.append(bits.read(NEV_RECORD['timestamp']))
bits.read(NEV_RECORD['byte']) # nevent_id
bits.read(NEV_RECORD['byte']) # nttl
bits.read(NEV_RECORD['byte']) # ncrc
bits.read(NEV_RECORD['byte']) # ndummy1
bits.read(NEV_RECORD['byte']) # ndummy2
bits.readlist([NEV_RECORD['extra']] * 8) # dnExtra (length 8 array)
# Read out the event string, truncate it, and advance the read position
evstr = bits[bits.pos:bits.pos + EVENT_STR_LEN].tobytes()
evstr = evstr[:evstr.find('\x00')]
string_list.append(evstr)
bits.pos += EVENT_STR_LEN
if DEBUG:
out('At T=%d: %s\n' % (timestamp_list[-1], evstr))
except bstr.ReadError:
if DEBUG:
out('Reached EOF\n')
break
return np.array(timestamp_list), string_list
def read_position_file(fn):
"""Load position record data from Neuralynx Pos.p file fn
Returns (timestamp, x, y, dir) tuple where timestamp is an array of
timestamp values, x and y are position arrays, and dir is head direction.
"""
if fn is None or not (os.path.exists(fn) and fn.endswith('.p')):
raise ValueError, 'invalid position file specified: %s' % fn
out = sys.stdout.write
out('Loading %s...\n' % fn)
bits = bstr.ConstBitStream(filename=fn)
bits.pos = position_data_index(fn)
timestamp_list = []
x_list = []
y_list = []
dir_list = []
every = 1000
while True:
try:
timestamp_list.append(bits.read(POS_RECORD['timestamp']))
x_list.append(bits.read(POS_RECORD['pos']))
y_list.append(bits.read(POS_RECORD['pos']))
dir_list.append(bits.read(POS_RECORD['pos']))
if len(timestamp_list) % every == 0:
out('At T=%d: %.3f, %.3f\n' % (timestamp_list[-1], x_list[-1],
y_list[-1]))
except bstr.ReadError:
if DEBUG:
out('Reached EOF\n')
break
return np.array(timestamp_list, long), np.array(x_list, float), \
np.array(y_list, float), np.array(dir_list, float)
def write_position_ascii_file(fn='Pos.p'):
"""Write out a Pos.p.ascii file for the given Pos.p binary file
"""
# Get the data
ts, x, y, hd = read_position_file(fn)
# Get the header
bits = bstr.ConstBitStream(filename=fn)
header = bits[:position_data_index(fn)].tobytes()
# Write out ascii file
ascii_fn = os.path.join(os.path.split(fn)[0], 'Pos.p.ascii')
fd = file(ascii_fn, 'w')
fd.write(header)
for i in xrange(len(ts)):
fd.write('%d,%.4f,%.4f,%d\n'%(ts[i], x[i], y[i], int(hd[i])))
fd.close()
sys.stdout.write('Wrote %s.\n' % ascii_fn)
def position_data_index(fn):
"""Given the filename of a position file, return the bit index of the first
data record past the header.
"""
hdr_end = "%%ENDHEADER\r\n"
token = bstr.ConstBitArray(bytearray(hdr_end))
bits = bstr.ConstBitStream(filename=fn)
bits.find(token)
bits.read('bytes:%d'%len(hdr_end))
return bits.pos
def read_ncs_file(fn, verbose=True):
"""Load continuous record data from Neuralynx .Ncs file fn
Returns (timestamp, sample) tuple of data arrays.
"""
if fn is None or not (os.path.exists(fn) and fn.endswith('.Ncs')):
raise ValueError, 'invalid Ncs file specified: %s' % fn
if verbose:
out = sys.stdout.write
out('Loading %s...\n' % fn)
bits = bstr.ConstBitStream(filename=fn)
bits.read(HDR_SIZE) # skip 16k header
sample_read_str = [NCS_RECORD['samples']] * NCS_DATA_POINTS
timestamp_list = []
sample_list = []
prev_rec_ts = 0L
prev_rec_valid = 0
Fs_list = []
while True:
try:
rec_ts = bits.read(NCS_RECORD['timestamp'])
if verbose:
out('Reading record starting at timestamp %d:\n' % rec_ts)
rec_ch = bits.read(NCS_RECORD['channel'])
rec_fs = float(bits.read(NCS_RECORD['Fs']))
rec_valid = bits.read(NCS_RECORD['valid'])
if rec_fs not in Fs_list:
Fs_list.append(rec_fs)
if rec_valid != NCS_DATA_POINTS:
if verbose:
out('Found %d valid samples instead of %d.\n' % (rec_valid,
NCS_DATA_POINTS))
# Load samples, truncate to valid if necessary
samples = bits.readlist(sample_read_str)
sample_list.append(samples)
N_samples = len(samples)
if verbose:
out('Ch: %d, Fs: %.1f Hz, Nvalid = %d\n' % (rec_ch, rec_fs, rec_valid))
# Interpolate new timestamps
if prev_rec_ts:
delta = long(float(rec_ts - prev_rec_ts) / prev_rec_valid)
timestamp_list.append(
np.cumsum(
np.r_[long(prev_rec_ts),
np.repeat(delta, prev_rec_valid-1)]))
prev_rec_ts = rec_ts
prev_rec_valid = N_samples
except bstr.ReadError:
if verbose:
out('Reached EOF\n')
break
# Interpolate timestamps for last valid sample block before EOF
if len(timestamp_list):
timestamp_list.append(
np.linspace(
prev_rec_ts,
prev_rec_ts + prev_rec_valid*(np.diff(timestamp_list[-1][-2:])),
prev_rec_valid, endpoint=False).astype('i8'))
else:
return np.array([], dtype='i8'), array([], dtype='i2')
if verbose:
if len(Fs_list) > 1:
out('Found multiple sample frequencies: %s\n' % str(Fs_list)[1:-1])
elif len(Fs_list) == 1 and Fs_list[0] != NCS_SAMPLE_RATE:
out('Found non-standard sample rate: %.1f Hz\n' % Fs_list[0])
return np.concatenate(timestamp_list).astype('i8'), \
np.concatenate(sample_list).astype('i2')
|
StevenBlack/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/checkout_unittest.py
|
115
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import os
import shutil
import tempfile
import unittest2 as unittest
from .checkout import Checkout
from .changelog import ChangeLogEntry
from .scm import CommitMessage, SCMDetector
from .scm.scm_mock import MockSCM
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.filesystem import FileSystem # FIXME: This should not be needed.
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
_changelog1entry1 = u"""2010-03-25 Tor Arne Vestb\u00f8 <vestbo@webkit.org>
Unreviewed build fix to un-break webkit-patch land.
Move commit_message_for_this_commit from scm to checkout
https://bugs.webkit.org/show_bug.cgi?id=36629
* Scripts/webkitpy/common/checkout/api.py: import scm.CommitMessage
"""
_changelog1entry2 = u"""2010-03-25 Adam Barth <abarth@webkit.org>
Reviewed by Eric Seidel.
Move commit_message_for_this_commit from scm to checkout
https://bugs.webkit.org/show_bug.cgi?id=36629
* Scripts/webkitpy/common/checkout/api.py:
"""
_changelog1 = u"\n".join([_changelog1entry1, _changelog1entry2])
_changelog2 = u"""2010-03-25 Tor Arne Vestb\u00f8 <vestbo@webkit.org>
Unreviewed build fix to un-break webkit-patch land.
Second part of this complicated change by me, Tor Arne Vestb\u00f8!
* Path/To/Complicated/File: Added.
2010-03-25 Adam Barth <abarth@webkit.org>
Reviewed by Eric Seidel.
Filler change.
"""
class CommitMessageForThisCommitTest(unittest.TestCase):
expected_commit_message = u"""Unreviewed build fix to un-break webkit-patch land.
Tools:
Move commit_message_for_this_commit from scm to checkout
https://bugs.webkit.org/show_bug.cgi?id=36629
* Scripts/webkitpy/common/checkout/api.py: import scm.CommitMessage
LayoutTests:
Second part of this complicated change by me, Tor Arne Vestb\u00f8!
* Path/To/Complicated/File: Added.
"""
def setUp(self):
# FIXME: This should not need to touch the filesystem, however
# ChangeLog is difficult to mock at current.
self.filesystem = FileSystem()
self.temp_dir = str(self.filesystem.mkdtemp(suffix="changelogs"))
self.old_cwd = self.filesystem.getcwd()
self.filesystem.chdir(self.temp_dir)
self.webkit_base = WebKitFinder(self.filesystem).webkit_base()
# Trick commit-log-editor into thinking we're in a Subversion working copy so it won't
# complain about not being able to figure out what SCM is in use.
# FIXME: VCSTools.pm is no longer so easily fooled. It logs because "svn info" doesn't
# treat a bare .svn directory being part of an svn checkout.
self.filesystem.maybe_make_directory(".svn")
self.changelogs = map(self.filesystem.abspath, (self.filesystem.join("Tools", "ChangeLog"), self.filesystem.join("LayoutTests", "ChangeLog")))
for path, contents in zip(self.changelogs, (_changelog1, _changelog2)):
self.filesystem.maybe_make_directory(self.filesystem.dirname(path))
self.filesystem.write_text_file(path, contents)
def tearDown(self):
self.filesystem.rmtree(self.temp_dir)
self.filesystem.chdir(self.old_cwd)
def test_commit_message_for_this_commit(self):
executive = Executive()
def mock_run(*args, **kwargs):
# Note that we use a real Executive here, not a MockExecutive, so we can test that we're
# invoking commit-log-editor correctly.
env = os.environ.copy()
env['CHANGE_LOG_EMAIL_ADDRESS'] = 'vestbo@webkit.org'
kwargs['env'] = env
return executive.run_command(*args, **kwargs)
detector = SCMDetector(self.filesystem, executive)
real_scm = detector.detect_scm_system(self.webkit_base)
mock_scm = MockSCM()
mock_scm.run = mock_run
mock_scm.script_path = real_scm.script_path
checkout = Checkout(mock_scm)
checkout.modified_changelogs = lambda git_commit, changed_files=None: self.changelogs
commit_message = checkout.commit_message_for_this_commit(git_commit=None, return_stderr=True)
# Throw away the first line - a warning about unknown VCS root.
commit_message.message_lines = commit_message.message_lines[1:]
self.assertMultiLineEqual(commit_message.message(), self.expected_commit_message)
class CheckoutTest(unittest.TestCase):
def _make_checkout(self):
return Checkout(scm=MockSCM(), filesystem=MockFileSystem(), executive=MockExecutive())
def test_latest_entry_for_changelog_at_revision(self):
def mock_contents_at_revision(changelog_path, revision):
self.assertEqual(changelog_path, "foo")
self.assertEqual(revision, "bar")
# contents_at_revision is expected to return a byte array (str)
# so we encode our unicode ChangeLog down to a utf-8 stream.
# The ChangeLog utf-8 decoding should ignore invalid codepoints.
invalid_utf8 = "\255"
return _changelog1.encode("utf-8") + invalid_utf8
checkout = self._make_checkout()
checkout._scm.contents_at_revision = mock_contents_at_revision
entry = checkout._latest_entry_for_changelog_at_revision("foo", "bar")
self.assertMultiLineEqual(entry.contents(), _changelog1entry1) # Pylint is confused about this line, pylint: disable=E1101
# FIXME: This tests a hack around our current changed_files handling.
# Right now changelog_entries_for_revision tries to fetch deleted files
# from revisions, resulting in a ScriptError exception. Test that we
# recover from those and still return the other ChangeLog entries.
def test_changelog_entries_for_revision(self):
checkout = self._make_checkout()
checkout._scm.changed_files_for_revision = lambda revision: ['foo/ChangeLog', 'bar/ChangeLog']
def mock_latest_entry_for_changelog_at_revision(path, revision):
if path == "foo/ChangeLog":
return 'foo'
raise ScriptError()
checkout._latest_entry_for_changelog_at_revision = mock_latest_entry_for_changelog_at_revision
# Even though fetching one of the entries failed, the other should succeed.
entries = checkout.changelog_entries_for_revision(1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], 'foo')
def test_commit_info_for_revision(self):
checkout = self._make_checkout()
checkout._scm.changed_files_for_revision = lambda revision: ['path/to/file', 'another/file']
checkout._scm.committer_email_for_revision = lambda revision, changed_files=None: "committer@example.com"
checkout.changelog_entries_for_revision = lambda revision, changed_files=None: [ChangeLogEntry(_changelog1entry1)]
commitinfo = checkout.commit_info_for_revision(4)
self.assertEqual(commitinfo.bug_id(), 36629)
self.assertEqual(commitinfo.author_name(), u"Tor Arne Vestb\u00f8")
self.assertEqual(commitinfo.author_email(), "vestbo@webkit.org")
self.assertIsNone(commitinfo.reviewer_text())
self.assertIsNone(commitinfo.reviewer())
self.assertEqual(commitinfo.committer_email(), "committer@example.com")
self.assertIsNone(commitinfo.committer())
self.assertEqual(commitinfo.to_json(), {
'bug_id': 36629,
'author_email': 'vestbo@webkit.org',
'changed_files': [
'path/to/file',
'another/file',
],
'reviewer_text': None,
'author_name': u'Tor Arne Vestb\xf8',
})
checkout.changelog_entries_for_revision = lambda revision, changed_files=None: []
self.assertIsNone(checkout.commit_info_for_revision(1))
def test_bug_id_for_revision(self):
checkout = self._make_checkout()
checkout._scm.committer_email_for_revision = lambda revision: "committer@example.com"
checkout.changelog_entries_for_revision = lambda revision, changed_files=None: [ChangeLogEntry(_changelog1entry1)]
self.assertEqual(checkout.bug_id_for_revision(4), 36629)
def test_bug_id_for_this_commit(self):
checkout = self._make_checkout()
checkout.commit_message_for_this_commit = lambda git_commit, changed_files=None: CommitMessage(ChangeLogEntry(_changelog1entry1).contents().splitlines())
self.assertEqual(checkout.bug_id_for_this_commit(git_commit=None), 36629)
def test_modified_changelogs(self):
checkout = self._make_checkout()
checkout._scm.checkout_root = "/foo/bar"
checkout._scm.changed_files = lambda git_commit: ["file1", "ChangeLog", "relative/path/ChangeLog"]
expected_changlogs = ["/foo/bar/ChangeLog", "/foo/bar/relative/path/ChangeLog"]
self.assertEqual(checkout.modified_changelogs(git_commit=None), expected_changlogs)
def test_suggested_reviewers(self):
def mock_changelog_entries_for_revision(revision, changed_files=None):
if revision % 2 == 0:
return [ChangeLogEntry(_changelog1entry1)]
return [ChangeLogEntry(_changelog1entry2)]
def mock_revisions_changing_file(path, limit=5):
if path.endswith("ChangeLog"):
return [3]
return [4, 8]
checkout = self._make_checkout()
checkout._scm.checkout_root = "/foo/bar"
checkout._scm.changed_files = lambda git_commit: ["file1", "file2", "relative/path/ChangeLog"]
checkout._scm.revisions_changing_file = mock_revisions_changing_file
checkout.changelog_entries_for_revision = mock_changelog_entries_for_revision
reviewers = checkout.suggested_reviewers(git_commit=None)
reviewer_names = [reviewer.full_name for reviewer in reviewers]
self.assertEqual(reviewer_names, [u'Tor Arne Vestb\xf8'])
def test_apply_patch(self):
checkout = self._make_checkout()
checkout._executive = MockExecutive(should_log=True)
checkout._scm.script_path = lambda script: script
mock_patch = Mock()
mock_patch.contents = lambda: "foo"
mock_patch.reviewer = lambda: None
expected_logs = "MOCK run_command: ['svn-apply', '--force'], cwd=/mock-checkout, input=foo\n"
OutputCapture().assert_outputs(self, checkout.apply_patch, [mock_patch], expected_logs=expected_logs)
|
0x0all/nupic
|
refs/heads/master
|
py/nupic/frameworks/opf/exceptions.py
|
58
|
class CLAModelException(Exception):
""" base exception class for cla model exceptions """
def __init__(self, errorString, debugInfo=None):
"""
Parameters:
-----------------------------------------------------------------------
errorString: Error code/msg: e.g., "Invalid request object."
debugInfo: An optional sequence of debug information; must be
convertible to JSON; pass None to ignore
"""
super(CLAModelException, self).__init__(errorString, debugInfo)
self.errorString = errorString
self.debugInfo = debugInfo
return
class CLAModelInvalidArgument(CLAModelException):
"""
Raised when a supplied value to a method is invalid.
"""
pass
class CLAModelInvalidRangeError(CLAModelException):
"""
Raised when supplied ranges to a method are invalid.
"""
pass
|
abourget/formalchemy-abourget
|
refs/heads/master
|
formalchemy/tests/test_aliases.py
|
3
|
# -*- coding: utf-8 -*-
from formalchemy.tests import *
def test_aliases():
fs = FieldSet(Aliases)
fs.bind(Aliases)
assert fs.id.name == 'id'
def test_render_aliases():
"""
>>> alias = session.query(Aliases).first()
>>> alias
>>> fs = FieldSet(Aliases)
>>> print fs.render()
<div>
<label class="field_opt" for="Aliases--text">
Text
</label>
<input id="Aliases--text" name="Aliases--text" type="text" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Aliases--text").focus();
//]]>
</script>
"""
|
Pablo126/SSBW
|
refs/heads/master
|
Tarea4/tarea4/lib/python3.5/site-packages/pip/_vendor/distlib/_backport/__init__.py
|
1429
|
"""Modules copied from Python 3 standard libraries, for internal use only.
Individual classes and functions are found in d2._backport.misc. Intended
usage is to always import things missing from 3.1 from that module: the
built-in/stdlib objects will be used if found.
"""
|
xysec/androguard
|
refs/heads/master
|
androguard/decompiler/dad/tests/rpo_test.py
|
2
|
"""Tests for rpo."""
import sys
sys.path.append('.')
import unittest
from androguard.decompiler.dad import graph
from androguard.decompiler.dad import node
class NodeTest(node.Node):
def __init__(self, name):
super(NodeTest, self).__init__(name)
def __str__(self):
return '%s (%d)' % (self.name, self.num)
class RpoTest(unittest.TestCase):
def _getNode(self, node_map, n):
ret_node = node_map.get(n)
if not ret_node:
ret_node = node_map[n] = NodeTest(n)
self.graph.add_node(ret_node)
return ret_node
def _createGraphFrom(self, edges):
node_map = {}
for n, childs in edges.items():
if n is None:
continue
parent_node = self._getNode(node_map, n)
for child in childs:
child_node = self._getNode(node_map, child)
self.graph.add_edge(parent_node, child_node)
self.graph.entry = node_map[edges[None]]
return node_map
def _verifyRpo(self, node_map, expected_rpo):
for n1, n2 in expected_rpo.items():
self.assertEqual(node_map[n1].num, n2)
def setUp(self):
self.graph = graph.Graph()
def tearDown(self):
self.graph = None
def testTarjanGraph(self):
edges = {None: 'r',
'r': ['a', 'b', 'c'],
'a': ['d'],
'b': ['a', 'd', 'e'],
'c': ['f', 'g'],
'd': ['l'],
'e': ['h'],
'f': ['i'],
'g': ['i', 'j'],
'h': ['e', 'k'],
'i': ['k'],
'j': ['i'],
'k': ['i', 'r'],
'l': ['h']}
n_map = self._createGraphFrom(edges)
self.graph.compute_rpo()
#self.graph.draw('_testTarjan_graph', '/tmp')
expected_rpo = {'r': 1, 'a': 7, 'b': 6, 'c': 2,
'd': 8, 'e': 13, 'f': 5,
'g': 3, 'h': 10, 'i': 12,
'j': 4, 'k': 11, 'l': 9}
self._verifyRpo(n_map, expected_rpo)
def testFirstGraph(self):
edges = {None: 'r',
'r': ['w1', 'x1', 'z5'],
'w1': ['w2'], 'w2': ['w3'],
'w3': ['w4'], 'w4': ['w5'],
'x1': ['x2'], 'x2': ['x3'],
'x3': ['x4'], 'x4': ['x5'], 'x5': ['y1'],
'y1': ['w1', 'w2', 'w3', 'w4', 'w5', 'y2'],
'y2': ['w1', 'w2', 'w3', 'w4', 'w5', 'y3'],
'y3': ['w1', 'w2', 'w3', 'w4', 'w5', 'y4'],
'y4': ['w1', 'w2', 'w3', 'w4', 'w5', 'y5'],
'y5': ['w1', 'w2', 'w3', 'w4', 'w5', 'z1'],
'z1': ['z2'],
'z2': ['z1', 'z3'],
'z3': ['z2', 'z4'],
'z4': ['z3', 'z5'],
'z5': ['z4']}
n_map = self._createGraphFrom(edges)
self.graph.compute_rpo()
#self.graph.draw('_testFirst_graph', '/tmp')
expected_rpo = {'r': 1, 'x1': 2, 'x2': 3, 'x3': 4, 'x4': 5, 'x5': 6,
'w1': 17, 'w2': 18, 'w3': 19, 'w4': 20, 'w5': 21,
'y1': 7, 'y2': 8, 'y3': 9, 'y4': 10, 'y5': 11,
'z1': 12, 'z2': 13, 'z3': 14, 'z4': 15, 'z5': 16}
self._verifyRpo(n_map, expected_rpo)
def testSecondGraph(self):
edges = {None: 'r',
'r': ['y1', 'x12'],
'x11': ['x12', 'x22'],
'x12': ['x11'],
'x21': ['x22'],
'x22': ['x21'],
'y1': ['y2', 'x11'],
'y2': ['x21']}
n_map = self._createGraphFrom(edges)
self.graph.compute_rpo()
#self.graph.draw('_testSecond_graph', '/tmp')
expected_rpo = {'r': 1, 'x11': 3, 'x12': 4, 'x21': 6, 'x22': 7,
'y1': 2, 'y2': 5}
self._verifyRpo(n_map, expected_rpo)
def testThirdGraph(self):
edges = {None: 'r',
'r': ['w', 'y1'],
'w': ['x1', 'x2'],
'x2': ['x1'],
'y1': ['y2'],
'y2': ['x2']}
n_map = self._createGraphFrom(edges)
self.graph.compute_rpo()
##self.graph.draw('_testThird_graph', '/tmp')
expected_rpo = {'r': 1, 'w': 4, 'x1': 6, 'x2': 5, 'y1': 2, 'y2': 3}
self._verifyRpo(n_map, expected_rpo)
def testFourthGraph(self):
edges = {None: 'r',
'r': ['x1', 'y1', 'y2'],
'x1': ['x2'],
'x2': ['y1', 'y2']}
n_map = self._createGraphFrom(edges)
self.graph.compute_rpo()
#self.graph.draw('_testFourth_graph', '/tmp')
expected_rpo = {'r': 1, 'x1': 2, 'x2': 3, 'y1': 5, 'y2': 4}
self._verifyRpo(n_map, expected_rpo)
def testFifthGraph(self):
edges = {None: 'r',
'r': ['a', 'i'],
'a': ['b', 'c'],
'b': ['c', 'e', 'g'],
'c': ['d'],
'd': ['i'],
'e': ['c', 'f'],
'f': ['i'],
'g': ['h'],
'h': ['d', 'f', 'i']}
n_map = self._createGraphFrom(edges)
self.graph.compute_rpo()
#self.graph.draw('_testFifth_graph', '/tmp')
expected_rpo = {'r': 1, 'a': 2, 'b': 3, 'c': 8,
'd': 9, 'e': 6, 'f': 7, 'g': 4,
'h': 5, 'i': 10}
self._verifyRpo(n_map, expected_rpo)
def testLinearVitGraph(self):
edges = {None: 'r',
'r': ['w', 'y'],
'w': ['x1'],
'y': ['x7'],
'x1': ['x2'],
'x2': ['x1', 'x3'],
'x3': ['x2', 'x4'],
'x4': ['x3', 'x5'],
'x5': ['x4', 'x6'],
'x6': ['x5', 'x7'],
'x7': ['x6']}
n_map = self._createGraphFrom(edges)
self.graph.compute_rpo()
#self.graph.draw('_testLinearVit_graph', '/tmp')
expected_rpo = {'r': 1, 'w': 3, 'x1': 4, 'x2': 5, 'x3': 6,
'x4': 7, 'x5': 8, 'x6': 9, 'x7': 10, 'y': 2}
self._verifyRpo(n_map, expected_rpo)
def testCrossGraph(self):
edges = {None: 'r',
'r': ['a', 'd'],
'a': ['b'],
'b': ['c'],
'c': ['a', 'd', 'g'],
'd': ['e'],
'e': ['f'],
'f': ['a', 'd', 'g']}
n_map = self._createGraphFrom(edges)
self.graph.compute_rpo()
#self.graph.draw('_testCross_graph', '/tmp')
expected_rpo = {'r': 1, 'a': 2, 'b': 3, 'c': 4,
'd': 5, 'e': 6, 'f': 7, 'g': 8}
self._verifyRpo(n_map, expected_rpo)
def testTVerifyGraph(self):
edges = {None: 'n1',
'n1': ['n2', 'n8'],
'n2': ['n3'],
'n3': ['n4', 'n8', 'n9'],
'n4': ['n3', 'n5', 'n6', 'n7'],
'n5': ['n4'],
'n6': ['n5'],
'n7': ['n6'],
'n8': ['n9', 'n12'],
'n9': ['n10', 'n11', 'n12'],
'n10': ['n11'],
'n11': ['n7'],
'n12': ['n10']}
n_map = self._createGraphFrom(edges)
self.graph.compute_rpo()
#self.graph.draw('_testTVerify_graph', '/tmp')
expected_rpo = {'n1': 1, 'n2': 2, 'n3': 3,
'n4': 9, 'n5': 12, 'n6': 11,
'n7': 10, 'n8': 4, 'n9': 5,
'n10': 7, 'n11': 8, 'n12': 6}
self._verifyRpo(n_map, expected_rpo)
if __name__ == '__main__':
unittest.main()
|
rabitdash/practice
|
refs/heads/master
|
python-pj/mine_sweeper/main.py
|
4
|
from data.tools import Control
from data.states import init, run, halt
fuck = Control()
fuck.setup_states({'Init': init.Init(),
'Run': run.Run(),
'Halt': halt.Halt()
}, 'Init')
fuck.main()
|
dylanGeng/BuildingMachineLearningSystemsWithPython
|
refs/heads/master
|
ch10/neighbors.py
|
21
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
import numpy as np
import mahotas as mh
from glob import glob
from features import texture, color_histogram
from matplotlib import pyplot as plt
from sklearn.preprocessing import StandardScaler
from scipy.spatial import distance
basedir = '../SimpleImageDataset/'
haralicks = []
chists = []
print('Computing features...')
# Use glob to get all the images
images = glob('{}/*.jpg'.format(basedir))
# We sort the images to ensure that they are always processed in the same order
# Otherwise, this would introduce some variation just based on the random
# ordering that the filesystem uses
images.sort()
for fname in images:
imc = mh.imread(fname)
imc = imc[200:-200,200:-200]
haralicks.append(texture(mh.colors.rgb2grey(imc)))
chists.append(color_histogram(imc))
haralicks = np.array(haralicks)
chists = np.array(chists)
features = np.hstack([chists, haralicks])
print('Computing neighbors...')
sc = StandardScaler()
features = sc.fit_transform(features)
dists = distance.squareform(distance.pdist(features))
print('Plotting...')
fig, axes = plt.subplots(2, 9, figsize=(16,8))
# Remove ticks from all subplots
for ax in axes.flat:
ax.set_xticks([])
ax.set_yticks([])
for ci,i in enumerate(range(0,90,10)):
left = images[i]
dists_left = dists[i]
right = dists_left.argsort()
# right[0] is the same as left[i], so pick the next closest element
right = right[1]
right = images[right]
left = mh.imread(left)
right = mh.imread(right)
axes[0, ci].imshow(left)
axes[1, ci].imshow(right)
fig.tight_layout()
fig.savefig('figure_neighbors.png', dpi=300)
|
icejoywoo/tornado
|
refs/heads/master
|
demos/helloworld/helloworld.py
|
100
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
def main():
tornado.options.parse_command_line()
application = tornado.web.Application([
(r"/", MainHandler),
])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
Yukinoshita47/Yuki-Chan-The-Auto-Pentest
|
refs/heads/master
|
Module/Spaghetti/lib/__init__.py
|
1307
|
pass
|
cypod/arsenalsuite
|
refs/heads/master
|
cpp/lib/PyQt4/examples/network/fortuneserver.py
|
20
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
import random
from PyQt4 import QtCore, QtGui, QtNetwork
class Server(QtGui.QDialog):
def __init__(self, parent=None):
super(Server, self).__init__(parent)
statusLabel = QtGui.QLabel()
quitButton = QtGui.QPushButton("Quit")
quitButton.setAutoDefault(False)
self.tcpServer = QtNetwork.QTcpServer(self)
if not self.tcpServer.listen():
QtGui.QMessageBox.critical(self, "Fortune Server",
"Unable to start the server: %s." % self.tcpServer.errorString())
self.close()
return
statusLabel.setText("The server is running on port %d.\nRun the "
"Fortune Client example now." % self.tcpServer.serverPort())
self.fortunes = (
"You've been leading a dog's life. Stay off the furniture.",
"You've got to think about tomorrow.",
"You will be surprised by a loud noise.",
"You will feel hungry again in another hour.",
"You might have mail.",
"You cannot kill time without injuring eternity.",
"Computers are not intelligent. They only think they are.")
quitButton.clicked.connect(self.close)
self.tcpServer.newConnection.connect(self.sendFortune)
buttonLayout = QtGui.QHBoxLayout()
buttonLayout.addStretch(1)
buttonLayout.addWidget(quitButton)
buttonLayout.addStretch(1)
mainLayout = QtGui.QVBoxLayout()
mainLayout.addWidget(statusLabel)
mainLayout.addLayout(buttonLayout)
self.setLayout(mainLayout)
self.setWindowTitle("Fortune Server")
def sendFortune(self):
block = QtCore.QByteArray()
out = QtCore.QDataStream(block, QtCore.QIODevice.WriteOnly)
out.setVersion(QtCore.QDataStream.Qt_4_0)
out.writeUInt16(0)
fortune = self.fortunes[random.randint(0, len(self.fortunes) - 1)]
try:
# Python v3.
fortune = bytes(fortune, encoding='ascii')
except:
# Python v2.
pass
out.writeString(fortune)
out.device().seek(0)
out.writeUInt16(block.size() - 2)
clientConnection = self.tcpServer.nextPendingConnection()
clientConnection.disconnected.connect(clientConnection.deleteLater)
clientConnection.write(block)
clientConnection.disconnectFromHost()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
server = Server()
random.seed(None)
sys.exit(server.exec_())
|
sosguns2002/interactive-mining
|
refs/heads/master
|
interactive-mining-3rdparty-madis/madis/src/functions/sqltransform.py
|
4
|
# coding: utf-8
import setpath
import sqlparse.sql
import sqlparse
import re
from sqlparse.tokens import *
import zlib
import functions
try:
from collections import OrderedDict
except ImportError:
# Python 2.6
from lib.collections26 import OrderedDict
break_inversion_subquery = re.compile(r"""\s*((?:(?:(?:'[^']*?'|\w+:[^\s]+)\s*)*))((?i)of\s|from\s|)(.*?)\s*$""",
re.DOTALL | re.UNICODE)
find_parenthesis = re.compile(r"""\s*\((.*)\)\s*$""", re.DOTALL | re.UNICODE)
viewdetector = re.compile(r'(?i)\s*create\s+(?:temp|temporary)\s+view\s+', re.DOTALL | re.UNICODE)
inlineop = re.compile(r'\s*/\*\*+[\s\n]*((?:def\s+|class\s+).+)[^*]\*\*+/', re.DOTALL | re.UNICODE)
_statement_cache = OrderedDict()
_statement_cache_size = 1000
# delete reserved SQL keywords that collide with our vtables
if __name__ != "__main__":
for i in ['EXECUTE', 'NAMES', 'CACHE', 'EXEC', 'OUTPUT']:
if i in sqlparse.keywords.KEYWORDS:
del sqlparse.keywords.KEYWORDS[i]
#Parse comments for inline ops
def opcomments(s):
if r'/**' not in unicode(s):
return []
out = []
constr_comm = None
for i in s.tokens:
ui = unicode(i)
if type(i) == sqlparse.sql.Comment:
op = inlineop.match(ui)
if op != None:
out.append(op.groups()[0])
# Construct comment to work around sqlparse bug
if constr_comm is not None:
constr_comm += ui
if type(i) == sqlparse.sql.Token:
if ui == u'/*':
if constr_comm is not None:
constr_comm = None
else:
constr_comm = u'/*'
elif ui == u'*/':
if constr_comm is not None:
op = inlineop.match(constr_comm)
if op != None:
out.append(op.groups()[0])
constr_comm = None
return out
#Top level transform (runs once)
def transform(query, multiset_functions=None, vtables=[], row_functions=[], substitute=lambda x: x):
if type(query) not in (str, unicode):
return (query, [], [])
s = query
subsquery = substitute(s)
# Check cache
if subsquery in _statement_cache:
return _statement_cache[subsquery]
enableInlineops = False
if r'/**' in subsquery:
enableInlineops = True
out_vtables = []
st = sqlparse.parse(subsquery)
trans = Transclass(multiset_functions, vtables, row_functions)
s_out = ''
sqp = ('', [], [])
inlineops = []
for s in st:
# delete question mark
strs = re.match(r"(.*?);*\s*$", unicode(s), re.DOTALL | re.UNICODE).groups()[0]
st1 = sqlparse.parse(strs)
if len(st1) > 0:
if enableInlineops:
inlineops.append(opcomments(st1[0]))
sqp = trans.rectransform(st1[0])
strs = unicode(sqp[0])
s_out += strs
s_out += ';'
# Detect create temp view and mark its vtables as permanent
if viewdetector.match(strs):
out_vtables += [x + (False,) for x in sqp[1]]
else:
out_vtables += sqp[1]
result = (s_out, vt_distinct(out_vtables), sqp[2], inlineops)
if len(_statement_cache) < _statement_cache_size:
_statement_cache[subsquery] = result
else:
_statement_cache.popitem(last=False)
_statement_cache[subsquery] = result
return result
class Transclass:
direct_exec = []
multiset_functions = None
vtables = []
row_functions = []
def __init__(self, multiset_functions=None, vtables=[], row_functions=[]):
self.direct_exec = []
self.multiset_functions = multiset_functions
self.vtables = vtables
self.row_functions = row_functions
#recursive transform
def rectransform(self, s, s_orig=None):
if not (re.search(ur'(?i)(select|' + '|'.join([x for x in self.vtables]) + '|' + '|'.join(
self.multiset_functions) + '|' + '|'.join(self.row_functions) + ')', unicode(s), re.UNICODE)):
return unicode(s), [], self.direct_exec
out_vtables = []
if s_orig is None:
s_orig = s
query = None
# Expand functions with spaces between them and their parenthesis
for t in s_orig.tokens:
tfm = re.match('(\w+)\s\(', unicode(t), re.UNICODE)
if isinstance(t, sqlparse.sql.Function) and tfm and (
tfm.groups()[0] in self.vtables or tfm.groups()[0] in self.row_functions):
tidx = s_orig.token_index(t)
s_orig.tokens[tidx:tidx + 1] = t.tokens
fs = [x for x in expand_tokens(s)]
# Process external_query VTs
tmatch = re.match(r'\s*(\w+)\s+(.*|$)', unicode(s), re.DOTALL | re.UNICODE)
if tmatch is not None and tmatch.groups()[0].lower() in self.vtables:
op_for_inv = tmatch.groups()[0].lower()
if hasattr(self.vtables[op_for_inv], 'external_query'):
rest = tmatch.groups()[1]
op_for_inv = unicode(op_for_inv)
params, preposition, subq = break_inversion_subquery.match(rest).groups()
if subq != '':
paramslist = [format_query(subq)]
else:
paramslist = []
paramslist += [format_param(''.join(x)) for x in
re.findall(r"'([^']*?)'|(\w+:[^\s]+)", params, re.UNICODE)]
inv_s = ','.join(paramslist)
vname = vt_name(op_for_inv)
self.direct_exec += [(op_for_inv, paramslist, subq)]
s_orig.tokens[s_orig.token_index(s.tokens[0]):s_orig.token_index(s.tokens[-1]) + 1] = [sqlparse.sql.Token(Token.Keyword, 'select * from ' + vname + ' ')]
return unicode(s), vt_distinct([(vname, op_for_inv, inv_s)]), self.direct_exec
# Process internal parenthesis
for t in fs:
if type(t) is sqlparse.sql.Parenthesis:
subq = find_parenthesis.match(unicode(t))
if subq != None:
subq = subq.groups()[0]
t.tokens = sqlparse.parse(subq)[0].tokens
out_vtables += self.rectransform(t)[1]
t.tokens[0:0] = [sqlparse.sql.Token(Token.Punctuation, '(')]
t.tokens.append(sqlparse.sql.Token(Token.Punctuation, ')'))
# Process Inversions
#Process direct row inversion
t = re.match(r'\s*(\w+)(\s+.*|$)', unicode(s), re.DOTALL | re.UNICODE)
if t != None and t.groups()[0].lower() in self.row_functions:
op_for_inv = t.groups()[0]
rest = t.groups()[1]
params, preposition, subq = break_inversion_subquery.match(rest).groups()
paramslist = [format_param(''.join(x)) for x in re.findall(r"'([^']*?)'|(\w+:[^\s]+)", params, re.UNICODE)]
if subq != '':
if len(preposition) > 0:
subq, v, dv = self.rectransform(sqlparse.parse(subq)[0])
out_vtables += v
paramslist += ['(' + subq + ')']
else:
paramslist += [format_param(subq)]
inv_s = 'SELECT ' + op_for_inv + '(' + ','.join(paramslist) + ')'
subs = sqlparse.parse(inv_s)[0]
s_orig.tokens[s_orig.token_index(s.tokens[0]):s_orig.token_index(s.tokens[-1]) + 1] = subs.tokens
s = subs
fs = [x for x in expand_tokens(s)]
# Process vtable inversion
for t in fs:
if t.ttype == Token.Keyword.DML:
break
strt = unicode(t).lower()
if strt in self.vtables:
#print "FOUND INVERSION:", strt, fs
tindex = fs.index(t)
# Break if '.' exists before vtable
if tindex > 0 and unicode(fs[tindex - 1]) == '.':
break
op_for_inv = strt
try:
rest = ''.join([unicode(x) for x in fs[tindex + 1:]])
except KeyboardInterrupt:
raise
except:
rest = ''
params, preposition, subq = break_inversion_subquery.match(rest).groups()
orig_subq = subq
if subq != '':
subq, v, dv = self.rectransform(sqlparse.parse(subq)[0])
out_vtables += v
if not hasattr(self.vtables[strt], 'external_stream'):
if subq != '':
paramslist = [format_query(subq)]
else:
paramslist = []
paramslist += [format_param(''.join(x)) for x in
re.findall(r"'([^']*?)'|(\w+:[^\s]+)", params, re.UNICODE)]
inv_s = ''.join(
[unicode(x) for x in fs[:fs.index(t)]]) + 'SELECT * FROM ' + op_for_inv + '(' + ','.join(
paramslist) + ')'
else:
paramslist = [format_param(''.join(x)) for x in
re.findall(r"'([^']*?)'|(\w+:[^\s]+)", params, re.UNICODE)]
inv_s = ''.join(
[unicode(x) for x in fs[:fs.index(t)]]) + 'SELECT * FROM ' + op_for_inv + '(' + ','.join(
paramslist) + ') ' + subq
subs = sqlparse.parse(inv_s)[0]
self.direct_exec += [(op_for_inv, paramslist, orig_subq)]
s_orig.tokens[s_orig.token_index(s.tokens[0]):s_orig.token_index(s.tokens[-1]) + 1] = subs.tokens
s = subs
break
# find first select
s_start = s.token_next_match(0, Token.Keyword.DML, r'(?i)select', True)
if s_start is not None:
# find keyword that ends substatement
s_end = s.token_next_match(s.token_index(s_start), Token.Keyword, (
r'(?i)union', r'(?i)order', r'(?i)limit', r'(?i)intersect', r'(?i)except', r'(?i)having'), True)
if len(s.tokens) < 3:
return unicode(s), vt_distinct(out_vtables), self.direct_exec
if s_end is None:
if s.tokens[-1].value == ')':
s_end = s.tokens[-2]
else:
s_end = s.tokens[-1]
else:
if s.token_index(s_end) + 1 >= len(s.tokens):
raise functions.MadisError("'" + unicode(s_end).upper() + "' should be followed by something")
out_vtables += self.rectransform(
sqlparse.sql.Statement(s.tokens_between(s.tokens[s.token_index(s_end) + 1], s.tokens[-1])), s)[1]
s_end = s.tokens[s.token_index(s_end) - 1]
query = sqlparse.sql.Statement(s.tokens_between(s_start, s_end))
else:
return unicode(s), vt_distinct(out_vtables), self.direct_exec
# find from and select_parameters range
from_range = None
from_start = query.token_next_match(0, Token.Keyword, r'(?i)from', True)
# process virtual tables in from range
if from_start is not None:
from_end = query.token_next_by_instance(query.token_index(from_start), sqlparse.sql.Where)
if from_start == query.tokens[-1]:
raise functions.MadisError("Error in FROM range of: '" + str(query) + "'")
if from_end is None:
from_end = query.tokens[-1]
from_range = sqlparse.sql.Statement(
query.tokens_between(query.tokens[query.token_index(from_start) + 1], from_end))
else:
from_range = sqlparse.sql.Statement(
query.tokens_between(query.tokens[query.token_index(from_start) + 1], from_end, exclude_end=True))
for t in [x for x in expand_type(from_range, (sqlparse.sql.Identifier, sqlparse.sql.IdentifierList))]:
if unicode(t).lower() in ('group', 'order'):
break
if type(t) is sqlparse.sql.Function:
vname = vt_name(unicode(t))
fname = t.tokens[0].get_real_name().lower()
if fname in self.vtables:
out_vtables += [(vname, fname, unicode(t.tokens[1])[1:-1])]
t.tokens = [sqlparse.sql.Token(Token.Keyword, vname)]
else:
raise functions.MadisError("Virtual table '" + fname + "' does not exist")
if from_start is not None:
select_range = sqlparse.sql.Statement(query.tokens_between(query.tokens[1], from_start, exclude_end=True))
else:
select_range = sqlparse.sql.Statement(query.tokens_between(query.tokens[1], query.tokens[-1]))
# Process EXPAND functions
for t in flatten_with_type(select_range, sqlparse.sql.Function):
if hasattr(t.tokens[0], 'get_real_name'):
fname = t.tokens[0].get_real_name()
else:
fname = unicode(t.tokens[0])
fname = fname.lower().strip()
if fname in self.multiset_functions:
t = s_orig.group_tokens(sqlparse.sql.Parenthesis, s_orig.tokens_between(s_start, s_end))
vname = vt_name(unicode(t))
out_vtables += [(vname, 'expand', format_query(t))]
s_orig.tokens[s_orig.token_index(t)] = sqlparse.sql.Token(Token.Keyword, 'select * from ' + vname + ' ')
break
return unicode(s), vt_distinct(out_vtables), self.direct_exec
def vt_name(s):
tmp = re.sub(r'([^\w])', '_', 'vt_' + unicode(zlib.crc32(s.encode('utf-8'))), re.UNICODE)
return re.sub(r'_+', '_', tmp, re.UNICODE)
def format_query(s):
q = "'query:" + unicode(s).replace("'", "''") + "'"
q = q.replace('\n', ' ')
return q
def format_param(s):
return "'" + unicode(s).replace("'", "''") + "'"
def format_identifiers(s):
return unicode(s).replace(' ', '').replace('\t', '')
def flatten_with_type(inpt, clss):
"""Generator yielding ungrouped tokens.
This method is recursively called for all child tokens.
"""
for token in inpt.tokens:
if isinstance(token, clss):
yield token
else:
if token.is_group() or type(token) is sqlparse.sql.Parenthesis:
for i in flatten_with_type(token, clss):
yield i
def expand_type(inpt, clss):
"""Generator yielding ungrouped tokens.
This method is recursively called for all child tokens.
"""
for token in inpt.tokens:
if token.is_group() and isinstance(token, clss):
for i in expand_type(token, clss):
yield i
else:
yield token
def expand_tokens(inpt):
"""Generator yielding ungrouped tokens.
This method is recursively called for all child tokens.
"""
for token in inpt.tokens:
if (token.is_group() and isinstance(token, (
sqlparse.sql.Identifier, sqlparse.sql.IdentifierList, sqlparse.sql.Where))):
for i in expand_tokens(token):
yield i
else:
yield token
def vt_distinct(vt):
vtout = OrderedDict()
for i in vt:
if i[0] not in vtout:
vtout[i[0]] = i
else:
if not vtout[i[0]][-1] == False:
vtout[i[0]] = i
return vtout.values()
if __name__ == "__main__":
sql = []
multiset_functions = ['nnfunc1', 'nnfunc2', 'apriori', 'ontop', 'strsplit']
def file():
pass
file.external_stream = True
def execv():
pass
execv.no_results = True
vtables = {'file': file, 'lnf': True, 'funlalakis': True, 'filela': True, 'sendto': True, 'helpvt': True,
'output': True, 'names': True, 'cache': True, 'testvt': True, 'exec': execv, 'flow': True}
row_functions = ['help', 'set', 'execute', 'var', 'toggle', 'strsplit', 'min', 'ifthenelse', 'keywords']
sql += ["select a,b,(apriori(a,b,c,'fala:a')) from lalatable"]
sql += ["create table a from select a,b,(apriori(a,b,c,'fala:a')) from lalatable, lala14, lala15"]
sql += ["create table a from select a,b,(apriori(a,b,c,'fala:a')) from lalatable, lala14, lala15"]
sql += ["select a,b,(apriori(a,b,c,'fala:a')) from lalatable where a=15 and b=23 and c=(1234)"]
sql += ["select a,b,(apriori(a,b,c,'fala:a')) from lalatable where a=15 and b=23 and c=(1234) group by a order by"]
sql += [
"select a,b,(apriori(a,b,c,'fala:a')) from ('asdfadsf') where a=15 and b=23 and c=(1234) group by a order by"]
sql += [
"select a,b,(apriori(a,b,c,'fala:a')) from ('asdfadsf') where a=15 and b=23 and c=(1234) group by a order by b union select a,b from funlalakis('1234'), (select a from lnf('1234') )"]
sql += ["select c1,c2 from file('test.tsv', 'param1');select a from filela('test.tsv') group by la"]
sql += ['insert into la values(1,2,3,4)']
sql += ["select apriori(a) from (select apriori('b') from table2)"]
sql += [
"select userid, top1, top2 from (select userid,ontop(3,preference,collid,preference) from colpreferences group by userid)order by top2 ; "]
sql += ["select ontop(a), apriori(b) from lala"]
sql += ["select ontop(a) from (select apriori(b) from table) order by a"]
sql += [
"select userid,ontop(3,preference,collid,preference),ontop(1,preference,collid) from colpreferences group by userid;"]
sql += ["create table lala as select apriori(a) from table;"]
sql += [
"create table lila as select userid,ontop(3,preference,collid,preference),ontop(1,preference,collid) from colpreferences group by userid; "]
sql += ["select * from file(test.txt)"]
sql += ["select sum(b) from test_table group by a pivot b,c"]
sql += ["select * from (helpvt lala)"]
sql += ["output 'list'"]
sql += ["(help lala)"]
sql += [r"select * from tab1 union help 'lala'"]
sql += [r"select * from file('list'),(select * from file('list'))"]
sql += [r"create table ta as help list"]
sql += [r"select * from (help lala)"]
sql += [r"output 'lala' select apriori(a,b) from extable"]
sql += [r"select apriori(a,b) from extable"]
sql += [r"select * from file('/lala','param1:t')"]
sql += [r"output '/lala' 'param1' select * from tab"]
sql += [r"select apriori(a,b) from file(/lala/lalakis)"]
sql += ["(select a from (sendto 'fileout.lala' 'tsv' select * from file('file.lala')))"]
sql += ["sendto 'lala1' sendto 'fileout.lala' 'tsv' select * from file('file.lala'))"]
sql += ["help 'lala'"]
sql += ["names file 'lala'; helpvt lala"]
sql += [r"select * from file() as a, file() as b;"]
sql += [r"select file from (file 'alla) as lala"]
sql += [r" .help select * from file('lsls')"]
sql += [r" .execute select * from file('lsls')"]
sql += [r"limit 1"]
sql += [r"file 'lala'"]
sql += [r"select * from lala union file 'lala' union file 'lala'"]
sql += [r"file 'lala' limit 1"]
sql += [r"create table lala file 'lala'"]
sql += [r"SELECT * FROM (file 'lala')"]
sql += [r"(file 'lala') union (file 'lala1')"]
sql += [r"select (5+5) from (file 'lala1')"]
sql += [
r"select * from ( output 'bla' select * from file('collection-general.csv','dialect:line') where rowid!=1 ) "]
sql += [r"select * from testtable where x not in (file 'lalakis')"]
#sql+=[r".help ασδαδδ"]
sql += [r"names (file 'testfile')"]
#sql+=[r"select * from (select lala from table limit)"]
sql += [r"""create table session_to_country(
sesid text NOT NULL primary key,
geoip_ccode text
); """]
sql += [r"""create table ip_country as select iplong,CC from (cache select cast(C3 as integer) as ipfrom,cast(C4 as
integer) as ipto, C5 as CC from file('file:GeoIPCountryCSV_09_2007.zip','compression:t','dialect:csv') ),tmpdistlong
where iplong>=ipfrom and iplong <=ipto;
"""]
sql += [r"cache select * from lala;"]
sql += [r"var 'lala' from var 'lala1'"]
sql += [r"toggle tracing"]
sql += [r"select strsplit('8,9','dialect:csv')"]
sql += [r"testvt"]
sql += [r"select date('now')"]
sql += [r"exec select * from lala"]
sql += [r"var 'usercc' from select min(grade) from (testvt) where grade>5;"]
sql += [r"var 'usercc' from select 5;"]
sql += [r"(exec flow file 'lala' 'lala1' asdfasdf:asdfdsaf);"]
sql += [
r"UPDATE merged_similarity SET merged_similarity = ((ifthenelse(colsim,colsim,0)*0.3)+(ifthenelse(colsim,colsim,0)*0.3))"]
sql += [r"toggle tracing ;"]
sql += [r"select sesid, query from tac group by sesid having keywords('query')='lala'"]
sql += [
r"select sesid, query from tac group by sesid having keywords('query')='lala' union select * from file('lala')"]
sql += [r"select * from (select 5 as a) where a=4 or (a=5 and a not in (select 3));"]
sql += [r"select * from a where ((a.r in (select c1 from f)));"]
sql += [r"select upper(a.output) from a"]
sql += [r"select upper(execute) from a"]
sql += [r"exec select a.5 from (flow file 'lala')"]
sql += [r"select max( (select 5))"]
sql += [r"cache select 5; create temp view as cache select 7; cache select 7"]
sql += [r"select * from /** def lala(x): pass **/ tab"]
sql += [r"select * from /* def lala(x): pass **/ tab"]
sql += [r"/** def lala():return 6 **/ \n"]
sql += [r"/** def lala():return 6 **/ "]
for s in sql:
print "====== " + unicode(s) + " ==========="
a = transform(s, multiset_functions, vtables, row_functions)
print "Query In:", s
print "Query Out:", a[0].encode('utf-8')
print "Vtables:", a[1]
print "Direct exec:", a[2]
print "Inline Ops:", a[3]
|
colinnewell/odoo
|
refs/heads/8.0
|
openerp/conf/__init__.py
|
442
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Library-wide configuration variables.
For now, configuration code is in openerp.tools.config. It is in mainly
unprocessed form, e.g. addons_path is a string with commas-separated
paths. The aim is to have code related to configuration (command line
parsing, configuration file loading and saving, ...) in this module
and provide real Python variables, e.g. addons_paths is really a list
of paths.
To initialize properly this module, openerp.tools.config.parse_config()
must be used.
"""
import deprecation
# Paths to search for OpenERP addons.
addons_paths = []
# List of server-wide modules to load. Those modules are supposed to provide
# features not necessarily tied to a particular database. This is in contrast
# to modules that are always bound to a specific database when they are
# installed (i.e. the majority of OpenERP addons). This is set with the --load
# command-line option.
server_wide_modules = []
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
zsdonghao/tensorlayer
|
refs/heads/master
|
examples/reinforcement_learning/tutorial_atari_pong.py
|
1
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""Monte-Carlo Policy Network π(a|s) (REINFORCE).
To understand Reinforcement Learning, we let computer to learn how to play
Pong game from the original screen inputs. Before we start, we highly recommend
you to go through a famous blog called “Deep Reinforcement Learning: Pong from
Pixels” which is a minimalistic implementation of deep reinforcement learning by
using python-numpy and OpenAI gym environment.
The code here is the reimplementation of Karpathy's Blog by using TensorLayer.
Compare with Karpathy's code, we store observation for a batch, he store
observation for a episode only, they store gradients instead. (so we will use
more memory if the observation is very large.)
Link
-----
http://karpathy.github.io/2016/05/31/rl/
"""
import time
import gym
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import DenseLayer, InputLayer
tf.logging.set_verbosity(tf.logging.DEBUG)
tl.logging.set_verbosity(tl.logging.DEBUG)
# hyper-parameters
image_size = 80
D = image_size * image_size
H = 200
batch_size = 10
learning_rate = 1e-4
gamma = 0.99
decay_rate = 0.99
render = False # display the game environment
# resume = True # load existing policy network
model_file_name = "model_pong"
np.set_printoptions(threshold=np.nan)
def prepro(I):
"""Prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector."""
I = I[35:195]
I = I[::2, ::2, 0]
I[I == 144] = 0
I[I == 109] = 0
I[I != 0] = 1
return I.astype(np.float).ravel()
env = gym.make("Pong-v0")
observation = env.reset()
prev_x = None
running_reward = None
reward_sum = 0
episode_number = 0
xs, ys, rs = [], [], []
# observation for training and inference
t_states = tf.placeholder(tf.float32, shape=[None, D])
# policy network
network = InputLayer(t_states, name='input')
network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='hidden')
network = DenseLayer(network, n_units=3, name='output')
probs = network.outputs
sampling_prob = tf.nn.softmax(probs)
t_actions = tf.placeholder(tf.int32, shape=[None])
t_discount_rewards = tf.placeholder(tf.float32, shape=[None])
loss = tl.rein.cross_entropy_reward_loss(probs, t_actions, t_discount_rewards)
train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss)
with tf.Session() as sess:
tl.layers.initialize_global_variables(sess)
# if resume:
# load_params = tl.files.load_npz(name=model_file_name+'.npz')
# tl.files.assign_params(sess, load_params, network)
tl.files.load_and_assign_npz(sess, model_file_name + '.npz', network)
network.print_params()
network.print_layers()
start_time = time.time()
game_number = 0
while True:
if render:
env.render()
cur_x = prepro(observation)
x = cur_x - prev_x if prev_x is not None else np.zeros(D)
x = x.reshape(1, D)
prev_x = cur_x
prob = sess.run(sampling_prob, feed_dict={t_states: x})
# action. 1: STOP 2: UP 3: DOWN
# action = np.random.choice([1,2,3], p=prob.flatten())
action = tl.rein.choice_action_by_probs(prob.flatten(), [1, 2, 3])
observation, reward, done, _ = env.step(action)
reward_sum += reward
xs.append(x) # all observations in an episode
ys.append(action - 1) # all fake labels in an episode (action begins from 1, so minus 1)
rs.append(reward) # all rewards in an episode
if done:
episode_number += 1
game_number = 0
if episode_number % batch_size == 0:
print('batch over...... updating parameters......')
epx = np.vstack(xs)
epy = np.asarray(ys)
epr = np.asarray(rs)
disR = tl.rein.discount_episode_rewards(epr, gamma)
disR -= np.mean(disR)
disR /= np.std(disR)
xs, ys, rs = [], [], []
sess.run(train_op, feed_dict={t_states: epx, t_actions: epy, t_discount_rewards: disR})
if episode_number % (batch_size * 100) == 0:
tl.files.save_npz(network.all_params, name=model_file_name + '.npz')
running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
print('resetting env. episode reward total was %f. running mean: %f' % (reward_sum, running_reward))
reward_sum = 0
observation = env.reset() # reset env
prev_x = None
if reward != 0:
print(
(
'episode %d: game %d took %.5fs, reward: %f' %
(episode_number, game_number, time.time() - start_time, reward)
), ('' if reward == -1 else ' !!!!!!!!')
)
start_time = time.time()
game_number += 1
|
hfut721/RPN
|
refs/heads/master
|
lib/datasets/__init__.py
|
212
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
|
mikeireland/pynrm
|
refs/heads/master
|
popCSV.py
|
1
|
"""Notes:
This should additionally have the total integration time for the scan.
Also - a counter for the number fo visits in a night for 1 target.
"""
import numpy as np
import pbclass, os, matplotlib.pyplot as plt
try:
import pyfits
except:
import astropy.io.fits as pyfits
import tools, warnings
warnings.filterwarnings('ignore')
#A global
last_fnum = 0
last_block_string = ""
last_name=""
counter = 0
last_start = ""
def write_textfile(g, block_string, name,f_ix,fnum_prefix="",add_filename=False,total_int=None,instname='NIRC2'):
global last_fnum
global last_block_string
global last_name
global counter
global last_start
if instname=='NACO':
newName = name
hdr = pyfits.getheader(newName)
name = hdr['ORIGFILE']
nexp = hdr['ESO TPL NEXP']
expno = hdr['ESO TPL EXPNO']
start = hdr['ESO TPL START']
counter+=1
if start==last_start:
return
last_counter = counter
counter = 0
#counter = nexp
#print(name,last_counter)
numstr = last_start+" {0:02d} ".format(last_counter)
last_start = start
if len(fnum_prefix) > 0 and fnum_prefix in name:
current_fnum = int(name[name.find(fnum_prefix)+len(fnum_prefix):name.find(".fit")])
else:
current_fnum=f_ix
elif instname=='NIRC2':
counter+=1
if (last_block_string == block_string):
return
last_counter = counter
counter = 0
if len(fnum_prefix) > 0 and fnum_prefix in name:
current_fnum = int(name[name.find(fnum_prefix)+len(fnum_prefix):name.find(".fit")])
else:
current_fnum=f_ix
if last_fnum==0:
last_fnum = current_fnum
numstr = "{0:04d} {1:02d} ".format(last_fnum, last_counter)
if (last_block_string == ""):
last_block_string=block_string
last_name=name
return
if add_filename:
g.write(numstr+last_block_string + ' ' + last_name)
else:
g.write(numstr+last_block_string)
if total_int:
g.write("{0:5.1f}".format(last_counter*total_int)+'\n')
else:
g.write('\n')
last_fnum = current_fnum
last_name = name
last_block_string=block_string
def popCSV(keys,operations,colheads,path,outfile,textfile='',blockkeys=[],threshold=20000,fnum_prefix="n",add_filename=False,total_int_keys=None,instname='NIRC2'):
"""Populate a CSV file containing information about the fits headers
Parameters
----------
threshold: int
The threshold before the file is considered saturated"""
total_int=None
if ( (len(textfile)>0) & (len(blockkeys)>0) ):
try:
g=open(textfile,'w')
allFiles = os.listdir(path)
if instname=='NACO':
g.write("START NF ")
elif instname=='NIRC2':
g.write("FNUM NF ")
for count, i in enumerate(blockkeys):
if (count < 3):
g.write("{0:20s}".format(i))
else:
g.write("{0:10s}".format(i))
g.write("\n")
except:
print("Can not open text file for writing!")
raise UserWarning
textfile_open=True
else:
print("No text file open - only creating CSV file")
textfile_open=False
with open(outfile,'w') as f:
# write out the column headers to the file.
f.write(",".join(colheads))
# The fits header keywords stored to retrieve values from files
# Use the walk command to step through all of the files in all directories starting at the "root" defined above
for root, dirs, files in tools.sortedWalk(path):
print('Surveying directory ',root)
pb=pbclass.progressbarClass(np.size(files)-1)
j=0
for f_ix,name in enumerate(files):
if "fit" in name:
pathAndName = os.path.join(root,name)
try:
prihdr = pyfits.getheader(pathAndName,ignore_missing_end=True) # get the primary header and values
image = pyfits.getdata(pathAndName,ignore_missing_end=True) # get the image data to be analysed
except Exception:
print('Unable to open fits file')
values = [root, name] # The first two entries for the row
if instname=='NACO' and 'ORIGFILE' in prihdr.keys():
oldName = prihdr['ORIGFILE']
fnum_prefix = oldName[0:oldName.find('.fit')-4]
#name = oldName
#extract values from header in keys
if instname=='NACO':
for i in keys:
if i in prihdr:
if i=='OBJECT':
if prihdr[i] == 'Object name not set':
if 'Flat' in prihdr['ESO TPL ID']:
values.append('flats')
elif 'Dark' in prihdr['ESO TPL ID']:
values.append('darks')
else:
values.append("")
else:
values.append(str(prihdr[i]))
else:
values.append(str(prihdr[i]))
else:
if i=='SHRNAME':
if 'Dark' in prihdr['ESO TPL ID']:
values.append('closed')
else:
values.append('open')
elif i=='SLITNAME':
values.append('none')
elif i=='ESO TEL ALT':
values.append('45')
elif i=='COADDS':
values.append('1')
else:
values.append("")
elif instname=='NIRC2':
for i in keys:
try:
values.append(str(prihdr[i]))
except Exception:
values.append("")
#Now for the "Block":
block_string = ""
for count,i in enumerate(blockkeys):
try:
if i in prihdr:
if i=='OBJECT' and instname=='NACO':
if prihdr[i] == 'Object name not set':
if 'Flat' in prihdr['ESO TPL ID']:
objName = 'flats'
elif 'Dark' in prihdr['ESO TPL ID']:
objName = 'darks'
else:
objName = prihdr[i]
if (count<3):
block_string += "{0:20s}".format(objName)
else:
block_string += "{0:10s}".format(objName)
else:
if (count<3):
block_string += "{0:20s}".format(str(prihdr[i]))
else:
block_string += "{0:10s}".format(str(prihdr[i]))
else:
if i=='COADDS':
string = '1'
else:
string = 'unknown'
if (count<3):
block_string += "{0:20s}".format(string)
else:
block_string += "{0:10s}".format(string)
except Exception:
print("Error with key" + i)
#start with manual operations specific to telescopes
if ("CURRINST" in prihdr.keys() and prihdr["CURRINST"] == "NIRC2"):
if prihdr["SAMPMODE"] == 2:
values.append("1")
else:
values.append(str(prihdr["MULTISAM"]))
else:
values.append("")
# filtered version of the file used for peak and saturated
#filtered = tools.filter_image(pathAndName)
if len(image.shape)==2:
filtered = tools.filter_image(image)
else:
filtered = np.median(image,axis=0)
# peak pixel
peakpix = tools.peak_coords(filtered)
values.append(str(peakpix[0])) # X coord of peak pixel
values.append(str(peakpix[1])) # Y coord of peak pixel
values.append(str(peakpix[2])) # value of peak pixel
# median pixel value in the image
values.append(str(np.median(image)))
# saturated
#takes a numpy array, divides by the number of coadds and compares against threshold. returns true if peak pixel is above threshold
if "COADDS" in prihdr.keys():
saturated= np.max(image/prihdr["COADDS"]) > threshold
else:
saturated=np.max(image) > threshold
values.append(str(saturated))
line = "\n" + ",".join(values)
f.write(line)
#Now write our block text file...
if total_int_keys:
total_int=1
for akey in total_int_keys:
if akey in prihdr.keys():
total_int *= prihdr[akey]
if textfile_open:
write_textfile(g, block_string, name,f_ix,fnum_prefix=fnum_prefix,add_filename=add_filename,total_int=total_int,instname=instname)
j+=1
pb.progress(j)
if "fit" in name:
write_textfile(g, "", name,f_ix,fnum_prefix=fnum_prefix,add_filename=add_filename,total_int=total_int,instname=instname)
return 1
|
dave1010/doctrine2
|
refs/heads/master
|
docs/en/_exts/configurationblock.py
|
2577
|
#Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
|
HelloOrganization/facemimic
|
refs/heads/master
|
views/todos.py
|
1
|
# coding: utf-8
from leancloud import Object
from leancloud import Query
from leancloud import LeanCloudError
from flask import Blueprint
from flask import request
from flask import redirect
from flask import url_for
from flask import render_template
class Todo(Object):
pass
todos_view = Blueprint('todos', __name__)
@todos_view.route('')
def show():
try:
todos = Query(Todo).descending('createdAt').find()
except LeanCloudError, e:
if e.code == 101: # 服务端对应的 Class 还没创建
todos = []
else:
raise e
return render_template('todos.html', todos=todos)
@todos_view.route('', methods=['POST'])
def add():
content = request.form['content']
todo = Todo(content=content)
todo.save()
return redirect(url_for('todos.show'))
|
doismellburning/edx-platform
|
refs/heads/master
|
lms/djangoapps/survey/migrations/0002_auto__add_field_surveyanswer_course_key.py
|
39
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SurveyAnswer.course_key'
db.add_column('survey_surveyanswer', 'course_key',
self.gf('xmodule_django.models.CourseKeyField')(max_length=255, null=True, db_index=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SurveyAnswer.course_key'
db.delete_column('survey_surveyanswer', 'course_key')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.surveyanswer': {
'Meta': {'object_name': 'SurveyAnswer'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'field_value': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyForm']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'survey.surveyform': {
'Meta': {'object_name': 'SurveyForm'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'form': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
}
}
complete_apps = ['survey']
|
WarrenWeckesser/scipy
|
refs/heads/master
|
scipy/spatial/tests/test_distance.py
|
10
|
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os.path
from functools import wraps, partial
import numpy as np
import warnings
from numpy.linalg import norm
from numpy.testing import (verbose, assert_,
assert_array_equal, assert_equal,
assert_almost_equal, assert_allclose,
suppress_warnings)
import pytest
from pytest import raises as assert_raises
import scipy.spatial.distance
from scipy.spatial import _distance_pybind
from scipy.spatial.distance import (
squareform, pdist, cdist, num_obs_y, num_obs_dm, is_valid_dm, is_valid_y,
_validate_vector, _METRICS_NAMES, _METRICS)
# these were missing: chebyshev cityblock kulsinski
# jensenshannon, matching and seuclidean are referenced by string name.
from scipy.spatial.distance import (braycurtis, canberra, chebyshev, cityblock,
correlation, cosine, dice, euclidean,
hamming, jaccard, jensenshannon,
kulsinski, mahalanobis, matching,
minkowski, rogerstanimoto, russellrao,
seuclidean, sokalmichener, sokalsneath,
sqeuclidean, yule)
from scipy.spatial.distance import wminkowski as old_wminkowski
_filenames = [
"cdist-X1.txt",
"cdist-X2.txt",
"iris.txt",
"pdist-boolean-inp.txt",
"pdist-chebyshev-ml-iris.txt",
"pdist-chebyshev-ml.txt",
"pdist-cityblock-ml-iris.txt",
"pdist-cityblock-ml.txt",
"pdist-correlation-ml-iris.txt",
"pdist-correlation-ml.txt",
"pdist-cosine-ml-iris.txt",
"pdist-cosine-ml.txt",
"pdist-double-inp.txt",
"pdist-euclidean-ml-iris.txt",
"pdist-euclidean-ml.txt",
"pdist-hamming-ml.txt",
"pdist-jaccard-ml.txt",
"pdist-jensenshannon-ml-iris.txt",
"pdist-jensenshannon-ml.txt",
"pdist-minkowski-3.2-ml-iris.txt",
"pdist-minkowski-3.2-ml.txt",
"pdist-minkowski-5.8-ml-iris.txt",
"pdist-seuclidean-ml-iris.txt",
"pdist-seuclidean-ml.txt",
"pdist-spearman-ml.txt",
"random-bool-data.txt",
"random-double-data.txt",
"random-int-data.txt",
"random-uint-data.txt",
]
_tdist = np.array([[0, 662, 877, 255, 412, 996],
[662, 0, 295, 468, 268, 400],
[877, 295, 0, 754, 564, 138],
[255, 468, 754, 0, 219, 869],
[412, 268, 564, 219, 0, 669],
[996, 400, 138, 869, 669, 0]], dtype='double')
_ytdist = squareform(_tdist)
# A hashmap of expected output arrays for the tests. These arrays
# come from a list of text files, which are read prior to testing.
# Each test loads inputs and outputs from this dictionary.
eo = {}
def load_testing_files():
for fn in _filenames:
name = fn.replace(".txt", "").replace("-ml", "")
fqfn = os.path.join(os.path.dirname(__file__), 'data', fn)
fp = open(fqfn)
eo[name] = np.loadtxt(fp)
fp.close()
eo['pdist-boolean-inp'] = np.bool_(eo['pdist-boolean-inp'])
eo['random-bool-data'] = np.bool_(eo['random-bool-data'])
eo['random-float32-data'] = np.float32(eo['random-double-data'])
eo['random-int-data'] = np.int_(eo['random-int-data'])
eo['random-uint-data'] = np.uint(eo['random-uint-data'])
load_testing_files()
def _is_32bit():
return np.intp(0).itemsize < 8
def _chk_asarrays(arrays, axis=None):
arrays = [np.asanyarray(a) for a in arrays]
if axis is None:
# np < 1.10 ravel removes subclass from arrays
arrays = [np.ravel(a) if a.ndim != 1 else a
for a in arrays]
axis = 0
arrays = tuple(np.atleast_1d(a) for a in arrays)
if axis < 0:
if not all(a.ndim == arrays[0].ndim for a in arrays):
raise ValueError("array ndim must be the same for neg axis")
axis = range(arrays[0].ndim)[axis]
return arrays + (axis,)
def _chk_weights(arrays, weights=None, axis=None,
force_weights=False, simplify_weights=True,
pos_only=False, neg_check=False,
nan_screen=False, mask_screen=False,
ddof=None):
chked = _chk_asarrays(arrays, axis=axis)
arrays, axis = chked[:-1], chked[-1]
simplify_weights = simplify_weights and not force_weights
if not force_weights and mask_screen:
force_weights = any(np.ma.getmask(a) is not np.ma.nomask for a in arrays)
if nan_screen:
has_nans = [np.isnan(np.sum(a)) for a in arrays]
if any(has_nans):
mask_screen = True
force_weights = True
arrays = tuple(np.ma.masked_invalid(a) if has_nan else a
for a, has_nan in zip(arrays, has_nans))
if weights is not None:
weights = np.asanyarray(weights)
elif force_weights:
weights = np.ones(arrays[0].shape[axis])
else:
return arrays + (weights, axis)
if ddof:
weights = _freq_weights(weights)
if mask_screen:
weights = _weight_masked(arrays, weights, axis)
if not all(weights.shape == (a.shape[axis],) for a in arrays):
raise ValueError("weights shape must match arrays along axis")
if neg_check and (weights < 0).any():
raise ValueError("weights cannot be negative")
if pos_only:
pos_weights = np.nonzero(weights > 0)[0]
if pos_weights.size < weights.size:
arrays = tuple(np.take(a, pos_weights, axis=axis) for a in arrays)
weights = weights[pos_weights]
if simplify_weights and (weights == 1).all():
weights = None
return arrays + (weights, axis)
def _freq_weights(weights):
if weights is None:
return weights
int_weights = weights.astype(int)
if (weights != int_weights).any():
raise ValueError("frequency (integer count-type) weights required %s" % weights)
return int_weights
def _weight_masked(arrays, weights, axis):
if axis is None:
axis = 0
weights = np.asanyarray(weights)
for a in arrays:
axis_mask = np.ma.getmask(a)
if axis_mask is np.ma.nomask:
continue
if a.ndim > 1:
not_axes = tuple(i for i in range(a.ndim) if i != axis)
axis_mask = axis_mask.any(axis=not_axes)
weights *= 1 - axis_mask.astype(int)
return weights
def within_tol(a, b, tol):
return np.abs(a - b).max() < tol
def _assert_within_tol(a, b, atol=0, rtol=0, verbose_=False):
if verbose_:
print(np.abs(a - b).max())
assert_allclose(a, b, rtol=rtol, atol=atol)
def _rand_split(arrays, weights, axis, split_per, seed=None):
# inverse operation for stats.collapse_weights
weights = np.array(weights, dtype=np.float64) # modified inplace; need a copy
seeded_rand = np.random.RandomState(seed)
def mytake(a, ix, axis):
record = np.asanyarray(np.take(a, ix, axis=axis))
return record.reshape([a.shape[i] if i != axis else 1
for i in range(a.ndim)])
n_obs = arrays[0].shape[axis]
assert all(a.shape[axis] == n_obs for a in arrays), "data must be aligned on sample axis"
for i in range(int(split_per) * n_obs):
split_ix = seeded_rand.randint(n_obs + i)
prev_w = weights[split_ix]
q = seeded_rand.rand()
weights[split_ix] = q * prev_w
weights = np.append(weights, (1. - q) * prev_w)
arrays = [np.append(a, mytake(a, split_ix, axis=axis),
axis=axis) for a in arrays]
return arrays, weights
def _rough_check(a, b, compare_assert=partial(assert_allclose, atol=1e-5),
key=lambda x: x, w=None):
check_a = key(a)
check_b = key(b)
try:
if np.array(check_a != check_b).any(): # try strict equality for string types
compare_assert(check_a, check_b)
except AttributeError: # masked array
compare_assert(check_a, check_b)
except (TypeError, ValueError): # nested data structure
for a_i, b_i in zip(check_a, check_b):
_rough_check(a_i, b_i, compare_assert=compare_assert)
# diff from test_stats:
# n_args=2, weight_arg='w', default_axis=None
# ma_safe = False, nan_safe = False
def _weight_checked(fn, n_args=2, default_axis=None, key=lambda x: x, weight_arg='w',
squeeze=True, silent=False,
ones_test=True, const_test=True, dup_test=True,
split_test=True, dud_test=True, ma_safe=False, ma_very_safe=False, nan_safe=False,
split_per=1.0, seed=0, compare_assert=partial(assert_allclose, atol=1e-5)):
"""runs fn on its arguments 2 or 3 ways, checks that the results are the same,
then returns the same thing it would have returned before"""
@wraps(fn)
def wrapped(*args, **kwargs):
result = fn(*args, **kwargs)
arrays = args[:n_args]
rest = args[n_args:]
weights = kwargs.get(weight_arg, None)
axis = kwargs.get('axis', default_axis)
chked = _chk_weights(arrays, weights=weights, axis=axis, force_weights=True, mask_screen=True)
arrays, weights, axis = chked[:-2], chked[-2], chked[-1]
if squeeze:
arrays = [np.atleast_1d(a.squeeze()) for a in arrays]
try:
# WEIGHTS CHECK 1: EQUAL WEIGHTED OBESERVATIONS
args = tuple(arrays) + rest
if ones_test:
kwargs[weight_arg] = weights
_rough_check(result, fn(*args, **kwargs), key=key)
if const_test:
kwargs[weight_arg] = weights * 101.0
_rough_check(result, fn(*args, **kwargs), key=key)
kwargs[weight_arg] = weights * 0.101
try:
_rough_check(result, fn(*args, **kwargs), key=key)
except Exception as e:
raise type(e)((e, arrays, weights)) from e
# WEIGHTS CHECK 2: ADDL 0-WEIGHTED OBS
if dud_test:
# add randomly resampled rows, weighted at 0
dud_arrays, dud_weights = _rand_split(arrays, weights, axis, split_per=split_per, seed=seed)
dud_weights[:weights.size] = weights # not exactly 1 because of masked arrays
dud_weights[weights.size:] = 0
dud_args = tuple(dud_arrays) + rest
kwargs[weight_arg] = dud_weights
_rough_check(result, fn(*dud_args, **kwargs), key=key)
# increase the value of those 0-weighted rows
for a in dud_arrays:
indexer = [slice(None)] * a.ndim
indexer[axis] = slice(weights.size, None)
indexer = tuple(indexer)
a[indexer] = a[indexer] * 101
dud_args = tuple(dud_arrays) + rest
_rough_check(result, fn(*dud_args, **kwargs), key=key)
# set those 0-weighted rows to NaNs
for a in dud_arrays:
indexer = [slice(None)] * a.ndim
indexer[axis] = slice(weights.size, None)
indexer = tuple(indexer)
a[indexer] = a[indexer] * np.nan
if kwargs.get("nan_policy", None) == "omit" and nan_safe:
dud_args = tuple(dud_arrays) + rest
_rough_check(result, fn(*dud_args, **kwargs), key=key)
# mask out those nan values
if ma_safe:
dud_arrays = [np.ma.masked_invalid(a) for a in dud_arrays]
dud_args = tuple(dud_arrays) + rest
_rough_check(result, fn(*dud_args, **kwargs), key=key)
if ma_very_safe:
kwargs[weight_arg] = None
_rough_check(result, fn(*dud_args, **kwargs), key=key)
del dud_arrays, dud_args, dud_weights
# WEIGHTS CHECK 3: DUPLICATE DATA (DUMB SPLITTING)
if dup_test:
dup_arrays = [np.append(a, a, axis=axis) for a in arrays]
dup_weights = np.append(weights, weights) / 2.0
dup_args = tuple(dup_arrays) + rest
kwargs[weight_arg] = dup_weights
_rough_check(result, fn(*dup_args, **kwargs), key=key)
del dup_args, dup_arrays, dup_weights
# WEIGHT CHECK 3: RANDOM SPLITTING
if split_test and split_per > 0:
split_arrays, split_weights = _rand_split(arrays, weights, axis, split_per=split_per, seed=seed)
split_args = tuple(split_arrays) + rest
kwargs[weight_arg] = split_weights
_rough_check(result, fn(*split_args, **kwargs), key=key)
except NotImplementedError as e:
# when some combination of arguments makes weighting impossible,
# this is the desired response
if not silent:
warnings.warn("%s NotImplemented weights: %s" % (fn.__name__, e))
return result
return wrapped
wcdist = _weight_checked(cdist, default_axis=1, squeeze=False)
wcdist_no_const = _weight_checked(cdist, default_axis=1, squeeze=False, const_test=False)
wpdist = _weight_checked(pdist, default_axis=1, squeeze=False, n_args=1)
wpdist_no_const = _weight_checked(pdist, default_axis=1, squeeze=False, const_test=False, n_args=1)
wrogerstanimoto = _weight_checked(rogerstanimoto)
wmatching = whamming = _weight_checked(hamming, dud_test=False)
wyule = _weight_checked(yule)
wdice = _weight_checked(dice)
wcityblock = _weight_checked(cityblock)
wchebyshev = _weight_checked(chebyshev)
wcosine = _weight_checked(cosine)
wcorrelation = _weight_checked(correlation)
wkulsinski = _weight_checked(kulsinski)
wminkowski = _weight_checked(minkowski, const_test=False)
wjaccard = _weight_checked(jaccard)
weuclidean = _weight_checked(euclidean, const_test=False)
wsqeuclidean = _weight_checked(sqeuclidean, const_test=False)
wbraycurtis = _weight_checked(braycurtis)
wcanberra = _weight_checked(canberra, const_test=False)
wsokalsneath = _weight_checked(sokalsneath)
wsokalmichener = _weight_checked(sokalmichener)
wrussellrao = _weight_checked(russellrao)
class TestCdist:
def setup_method(self):
self.rnd_eo_names = ['random-float32-data', 'random-int-data',
'random-uint-data', 'random-double-data',
'random-bool-data']
self.valid_upcasts = {'bool': [np.uint, np.int_, np.float32, np.double],
'uint': [np.int_, np.float32, np.double],
'int': [np.float32, np.double],
'float32': [np.double]}
def test_cdist_extra_args(self):
# Tests that args and kwargs are correctly handled
def _my_metric(x, y, arg, kwarg=1, kwarg2=2):
return arg + kwarg + kwarg2
X1 = [[1., 2., 3.], [1.2, 2.3, 3.4], [2.2, 2.3, 4.4]]
X2 = [[7., 5., 8.], [7.5, 5.8, 8.4], [5.5, 5.8, 4.4]]
kwargs = {'N0tV4l1D_p4raM': 3.14, "w":np.arange(3)}
args = [3.14] * 200
with suppress_warnings() as w:
w.filter(DeprecationWarning)
for metric in _METRICS_NAMES:
assert_raises(TypeError, cdist, X1, X2,
metric=metric, **kwargs)
assert_raises(TypeError, cdist, X1, X2,
metric=eval(metric), **kwargs)
assert_raises(TypeError, cdist, X1, X2,
metric="test_" + metric, **kwargs)
assert_raises(TypeError, cdist, X1, X2,
metric=metric, *args)
assert_raises(TypeError, cdist, X1, X2,
metric=eval(metric), *args)
assert_raises(TypeError, cdist, X1, X2,
metric="test_" + metric, *args)
assert_raises(TypeError, cdist, X1, X2, _my_metric)
assert_raises(TypeError, cdist, X1, X2, _my_metric, *args)
assert_raises(TypeError, cdist, X1, X2, _my_metric, **kwargs)
assert_raises(TypeError, cdist, X1, X2, _my_metric,
kwarg=2.2, kwarg2=3.3)
assert_raises(TypeError, cdist, X1, X2, _my_metric, 1, 2, kwarg=2.2)
assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1, 2.2, 3.3)
assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1, 2.2)
assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1)
assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1,
kwarg=2.2, kwarg2=3.3)
# this should work
assert_allclose(cdist(X1, X2, metric=_my_metric,
arg=1.1, kwarg2=3.3), 5.4)
def test_cdist_euclidean_random_unicode(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = wcdist_no_const(X1, X2, 'euclidean')
Y2 = wcdist_no_const(X1, X2, 'test_euclidean')
_assert_within_tol(Y1, Y2, eps, verbose > 2)
@pytest.mark.parametrize("p", [1.0, 1.23, 2.0, 3.8, 4.6, np.inf])
def test_cdist_minkowski_random(self, p):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = wcdist_no_const(X1, X2, 'minkowski', p=p)
Y2 = wcdist_no_const(X1, X2, 'test_minkowski', p=p)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_cosine_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = wcdist(X1, X2, 'cosine')
# Naive implementation
def norms(X):
return np.linalg.norm(X, axis=1).reshape(-1, 1)
Y2 = 1 - np.dot((X1 / norms(X1)), (X2 / norms(X2)).T)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_mahalanobis(self):
# 1-dimensional observations
x1 = np.array([[2], [3]])
x2 = np.array([[2], [5]])
dist = cdist(x1, x2, metric='mahalanobis')
assert_allclose(dist, [[0.0, np.sqrt(4.5)], [np.sqrt(0.5), np.sqrt(2)]])
# 2-dimensional observations
x1 = np.array([[0, 0], [-1, 0]])
x2 = np.array([[0, 2], [1, 0], [0, -2]])
dist = cdist(x1, x2, metric='mahalanobis')
rt2 = np.sqrt(2)
assert_allclose(dist, [[rt2, rt2, rt2], [2, 2 * rt2, 2]])
# Too few observations
assert_raises(ValueError,
cdist, [[0, 1]], [[2, 3]], metric='mahalanobis')
def test_cdist_custom_notdouble(self):
class myclass:
pass
def _my_metric(x, y):
if not isinstance(x[0], myclass) or not isinstance(y[0], myclass):
raise ValueError("Type has been changed")
return 1.123
data = np.array([[myclass()]], dtype=object)
cdist_y = cdist(data, data, metric=_my_metric)
right_y = 1.123
assert_equal(cdist_y, right_y, verbose=verbose > 2)
def _check_calling_conventions(self, X1, X2, metric, eps=1e-07, **kwargs):
# helper function for test_cdist_calling_conventions
try:
y1 = cdist(X1, X2, metric=metric, **kwargs)
y2 = cdist(X1, X2, metric=eval(metric), **kwargs)
y3 = cdist(X1, X2, metric="test_" + metric, **kwargs)
except Exception as e:
e_cls = e.__class__
if verbose > 2:
print(e_cls.__name__)
print(e)
assert_raises(e_cls, cdist, X1, X2, metric=metric, **kwargs)
assert_raises(e_cls, cdist, X1, X2, metric=eval(metric), **kwargs)
assert_raises(e_cls, cdist, X1, X2, metric="test_" + metric, **kwargs)
else:
_assert_within_tol(y1, y2, rtol=eps, verbose_=verbose > 2)
_assert_within_tol(y1, y3, rtol=eps, verbose_=verbose > 2)
def test_cdist_calling_conventions(self):
# Ensures that specifying the metric with a str or scipy function
# gives the same behaviour (i.e. same result or same exception).
# NOTE: The correctness should be checked within each metric tests.
for eo_name in self.rnd_eo_names:
# subsampling input data to speed-up tests
# NOTE: num samples needs to be > than dimensions for mahalanobis
X1 = eo[eo_name][::5, ::-2]
X2 = eo[eo_name][1::5, ::2]
for metric in _METRICS_NAMES:
if verbose > 2:
print("testing: ", metric, " with: ", eo_name)
if metric == 'wminkowski':
continue
if metric in {'dice', 'yule', 'kulsinski', 'matching',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath'} and 'bool' not in eo_name:
# python version permits non-bools e.g. for fuzzy logic
continue
self._check_calling_conventions(X1, X2, metric)
# Testing built-in metrics with extra args
if metric == "seuclidean":
X12 = np.vstack([X1, X2]).astype(np.double)
V = np.var(X12, axis=0, ddof=1)
self._check_calling_conventions(X1, X2, metric, V=V)
elif metric == "mahalanobis":
X12 = np.vstack([X1, X2]).astype(np.double)
V = np.atleast_2d(np.cov(X12.T))
VI = np.array(np.linalg.inv(V).T)
self._check_calling_conventions(X1, X2, metric, VI=VI)
def test_cdist_dtype_equivalence(self):
# Tests that the result is not affected by type up-casting
eps = 1e-07
tests = [(eo['random-bool-data'], self.valid_upcasts['bool']),
(eo['random-uint-data'], self.valid_upcasts['uint']),
(eo['random-int-data'], self.valid_upcasts['int']),
(eo['random-float32-data'], self.valid_upcasts['float32'])]
for metric in _METRICS_NAMES:
for test in tests:
X1 = test[0][::5, ::-2]
X2 = test[0][1::5, ::2]
try:
y1 = cdist(X1, X2, metric=metric)
except Exception as e:
e_cls = e.__class__
if verbose > 2:
print(e_cls.__name__)
print(e)
for new_type in test[1]:
X1new = new_type(X1)
X2new = new_type(X2)
assert_raises(e_cls, cdist, X1new, X2new, metric=metric)
else:
for new_type in test[1]:
y2 = cdist(new_type(X1), new_type(X2), metric=metric)
_assert_within_tol(y1, y2, eps, verbose > 2)
def test_cdist_out(self):
# Test that out parameter works properly
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
out_r, out_c = X1.shape[0], X2.shape[0]
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
message="'wminkowski' metric is deprecated")
for metric in _METRICS_NAMES:
kwargs = dict()
if metric in ['minkowski', 'wminkowski']:
kwargs['p'] = 1.23
if metric == 'wminkowski':
kwargs['w'] = 1.0 / X1.std(axis=0)
out1 = np.empty((out_r, out_c), dtype=np.double)
Y1 = cdist(X1, X2, metric, **kwargs)
Y2 = cdist(X1, X2, metric, out=out1, **kwargs)
# test that output is numerically equivalent
_assert_within_tol(Y1, Y2, eps, verbose > 2)
# test that Y_test1 and out1 are the same object
assert_(Y2 is out1)
# test for incorrect shape
out2 = np.empty((out_r-1, out_c+1), dtype=np.double)
assert_raises(ValueError,
cdist, X1, X2, metric, out=out2, **kwargs)
# test for C-contiguous order
out3 = np.empty(
(2 * out_r, 2 * out_c), dtype=np.double)[::2, ::2]
out4 = np.empty((out_r, out_c), dtype=np.double, order='F')
assert_raises(ValueError,
cdist, X1, X2, metric, out=out3, **kwargs)
assert_raises(ValueError,
cdist, X1, X2, metric, out=out4, **kwargs)
# test for incorrect dtype
out5 = np.empty((out_r, out_c), dtype=np.int64)
assert_raises(ValueError,
cdist, X1, X2, metric, out=out5, **kwargs)
def test_striding(self):
# test that striding is handled correct with calls to
# _copy_array_if_base_present
eps = 1e-07
X1 = eo['cdist-X1'][::2, ::2]
X2 = eo['cdist-X2'][::2, ::2]
X1_copy = X1.copy()
X2_copy = X2.copy()
# confirm equivalence
assert_equal(X1, X1_copy)
assert_equal(X2, X2_copy)
# confirm contiguity
assert_(not X1.flags.c_contiguous)
assert_(not X2.flags.c_contiguous)
assert_(X1_copy.flags.c_contiguous)
assert_(X2_copy.flags.c_contiguous)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "'wminkowski' metric is deprecated")
for metric in _METRICS_NAMES:
kwargs = dict()
if metric in ['minkowski', 'wminkowski']:
kwargs['p'] = 1.23
if metric == 'wminkowski':
kwargs['w'] = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, metric, **kwargs)
Y2 = cdist(X1_copy, X2_copy, metric, **kwargs)
# test that output is numerically equivalent
_assert_within_tol(Y1, Y2, eps, verbose > 2)
class TestPdist:
def setup_method(self):
self.rnd_eo_names = ['random-float32-data', 'random-int-data',
'random-uint-data', 'random-double-data',
'random-bool-data']
self.valid_upcasts = {'bool': [np.uint, np.int_, np.float32, np.double],
'uint': [np.int_, np.float32, np.double],
'int': [np.float32, np.double],
'float32': [np.double]}
def test_pdist_extra_args(self):
# Tests that args and kwargs are correctly handled
def _my_metric(x, y, arg, kwarg=1, kwarg2=2):
return arg + kwarg + kwarg2
X1 = [[1., 2.], [1.2, 2.3], [2.2, 2.3]]
kwargs = {'N0tV4l1D_p4raM': 3.14, "w":np.arange(2)}
args = [3.14] * 200
with suppress_warnings() as w:
w.filter(DeprecationWarning)
for metric in _METRICS_NAMES:
assert_raises(TypeError, pdist, X1, metric=metric, **kwargs)
assert_raises(TypeError, pdist, X1,
metric=eval(metric), **kwargs)
assert_raises(TypeError, pdist, X1,
metric="test_" + metric, **kwargs)
assert_raises(TypeError, pdist, X1, metric=metric, *args)
assert_raises(TypeError, pdist, X1, metric=eval(metric), *args)
assert_raises(TypeError, pdist, X1,
metric="test_" + metric, *args)
assert_raises(TypeError, pdist, X1, _my_metric)
assert_raises(TypeError, pdist, X1, _my_metric, *args)
assert_raises(TypeError, pdist, X1, _my_metric, **kwargs)
assert_raises(TypeError, pdist, X1, _my_metric,
kwarg=2.2, kwarg2=3.3)
assert_raises(TypeError, pdist, X1, _my_metric, 1, 2, kwarg=2.2)
assert_raises(TypeError, pdist, X1, _my_metric, 1.1, 2.2, 3.3)
assert_raises(TypeError, pdist, X1, _my_metric, 1.1, 2.2)
assert_raises(TypeError, pdist, X1, _my_metric, 1.1)
assert_raises(TypeError, pdist, X1, _my_metric, 1.1,
kwarg=2.2, kwarg2=3.3)
# these should work
assert_allclose(pdist(X1, metric=_my_metric,
arg=1.1, kwarg2=3.3), 5.4)
def test_pdist_euclidean_random(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = wpdist_no_const(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_u(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = wpdist_no_const(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-euclidean']
Y_test1 = wpdist_no_const(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_nonC(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test2 = wpdist_no_const(X, 'test_euclidean')
_assert_within_tol(Y_test2, Y_right, eps)
@pytest.mark.slow
def test_pdist_euclidean_iris_double(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test1 = wpdist_no_const(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
@pytest.mark.slow
def test_pdist_euclidean_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-euclidean-iris']
Y_test1 = wpdist_no_const(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
@pytest.mark.slow
def test_pdist_euclidean_iris_nonC(self):
# Test pdist(X, 'test_euclidean') [the non-C implementation] on the
# Iris data set.
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test2 = wpdist_no_const(X, 'test_euclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_seuclidean_random(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_random_float32(self):
eps = 1e-05
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
# Check no error is raise when V has float32 dtype (#11171).
V = np.var(X, axis=0, ddof=1)
Y_test2 = pdist(X, 'seuclidean', V=V)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_seuclidean_random_nonC(self):
# Test pdist(X, 'test_sqeuclidean') [the non-C implementation]
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test2 = pdist(X, 'test_seuclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_seuclidean_iris(self):
eps = 1e-05
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_iris_float32(self):
# Tests pdist(X, 'seuclidean') on the Iris data set (float32).
eps = 1e-05
X = np.float32(eo['iris'])
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_iris_nonC(self):
# Test pdist(X, 'test_seuclidean') [the non-C implementation] on the
# Iris data set.
eps = 1e-05
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test2 = pdist(X, 'test_seuclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cosine_random(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test1 = wpdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cosine']
Y_test1 = wpdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_random_nonC(self):
# Test pdist(X, 'test_cosine') [the non-C implementation]
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test2 = wpdist(X, 'test_cosine')
_assert_within_tol(Y_test2, Y_right, eps)
@pytest.mark.slow
def test_pdist_cosine_iris(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test1 = wpdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
@pytest.mark.slow
def test_pdist_cosine_iris_float32(self):
eps = 1e-07
X = np.float32(eo['iris'])
Y_right = eo['pdist-cosine-iris']
Y_test1 = wpdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
@pytest.mark.slow
def test_pdist_cosine_iris_nonC(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test2 = wpdist(X, 'test_cosine')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cosine_bounds(self):
# Test adapted from @joernhees's example at gh-5208: case where
# cosine distance used to be negative. XXX: very sensitive to the
# specific norm computation.
x = np.abs(np.random.RandomState(1337).rand(91))
X = np.vstack([x, x])
assert_(wpdist(X, 'cosine')[0] >= 0,
msg='cosine distance should be non-negative')
def test_pdist_cityblock_random(self):
eps = 1e-06
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test1 = wpdist_no_const(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_random_float32(self):
eps = 1e-06
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cityblock']
Y_test1 = wpdist_no_const(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_random_nonC(self):
eps = 1e-06
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test2 = wpdist_no_const(X, 'test_cityblock')
_assert_within_tol(Y_test2, Y_right, eps)
@pytest.mark.slow
def test_pdist_cityblock_iris(self):
eps = 1e-14
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test1 = wpdist_no_const(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
@pytest.mark.slow
def test_pdist_cityblock_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-cityblock-iris']
Y_test1 = wpdist_no_const(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
@pytest.mark.slow
def test_pdist_cityblock_iris_nonC(self):
# Test pdist(X, 'test_cityblock') [the non-C implementation] on the
# Iris data set.
eps = 1e-14
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test2 = wpdist_no_const(X, 'test_cityblock')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_correlation_random(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test1 = wpdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-correlation']
Y_test1 = wpdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_random_nonC(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test2 = wpdist(X, 'test_correlation')
_assert_within_tol(Y_test2, Y_right, eps)
@pytest.mark.slow
def test_pdist_correlation_iris(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test1 = wpdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
@pytest.mark.slow
def test_pdist_correlation_iris_float32(self):
eps = 1e-07
X = eo['iris']
Y_right = np.float32(eo['pdist-correlation-iris'])
Y_test1 = wpdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
@pytest.mark.slow
def test_pdist_correlation_iris_nonC(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test2 = wpdist(X, 'test_correlation')
_assert_within_tol(Y_test2, Y_right, eps)
@pytest.mark.parametrize("p", [1.0, 2.0, 3.2, np.inf])
def test_pdist_minkowski_random_p(self, p):
eps = 1e-05
X = eo['pdist-double-inp']
Y1 = wpdist_no_const(X, 'minkowski', p=p)
Y2 = wpdist_no_const(X, 'test_minkowski', p=p)
_assert_within_tol(Y1, Y2, eps)
def test_pdist_minkowski_random(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_random_float32(self):
eps = 1e-05
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_random_nonC(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test2 = wpdist_no_const(X, 'test_minkowski', p=3.2)
_assert_within_tol(Y_test2, Y_right, eps)
@pytest.mark.slow
def test_pdist_minkowski_3_2_iris(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
_assert_within_tol(Y_test1, Y_right, eps)
@pytest.mark.slow
def test_pdist_minkowski_3_2_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
_assert_within_tol(Y_test1, Y_right, eps)
@pytest.mark.slow
def test_pdist_minkowski_3_2_iris_nonC(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test2 = wpdist_no_const(X, 'test_minkowski', p=3.2)
_assert_within_tol(Y_test2, Y_right, eps)
@pytest.mark.slow
def test_pdist_minkowski_5_8_iris(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = wpdist_no_const(X, 'minkowski', p=5.8)
_assert_within_tol(Y_test1, Y_right, eps)
@pytest.mark.slow
def test_pdist_minkowski_5_8_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = wpdist_no_const(X, 'minkowski', p=5.8)
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
@pytest.mark.slow
def test_pdist_minkowski_5_8_iris_nonC(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test2 = wpdist_no_const(X, 'test_minkowski', p=5.8)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_mahalanobis(self):
# 1-dimensional observations
x = np.array([2.0, 2.0, 3.0, 5.0]).reshape(-1, 1)
dist = pdist(x, metric='mahalanobis')
assert_allclose(dist, [0.0, np.sqrt(0.5), np.sqrt(4.5),
np.sqrt(0.5), np.sqrt(4.5), np.sqrt(2.0)])
# 2-dimensional observations
x = np.array([[0, 0], [-1, 0], [0, 2], [1, 0], [0, -2]])
dist = pdist(x, metric='mahalanobis')
rt2 = np.sqrt(2)
assert_allclose(dist, [rt2, rt2, rt2, rt2, 2, 2 * rt2, 2, 2, 2 * rt2, 2])
# Too few observations
assert_raises(ValueError,
wpdist, [[0, 1], [2, 3]], metric='mahalanobis')
def test_pdist_hamming_random(self):
eps = 1e-07
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test1 = wpdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_hamming_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = wpdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_hamming_random_nonC(self):
eps = 1e-07
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test2 = wpdist(X, 'test_hamming')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_dhamming_random(self):
eps = 1e-07
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = wpdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_dhamming_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = wpdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_dhamming_random_nonC(self):
eps = 1e-07
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test2 = wpdist(X, 'test_hamming')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_jaccard_random(self):
eps = 1e-08
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test1 = wpdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_jaccard_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = wpdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_jaccard_random_nonC(self):
eps = 1e-08
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test2 = wpdist(X, 'test_jaccard')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_djaccard_random(self):
eps = 1e-08
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = wpdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_djaccard_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = wpdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_djaccard_allzeros(self):
eps = 1e-08
Y = pdist(np.zeros((5, 3)), 'jaccard')
_assert_within_tol(np.zeros(10), Y, eps)
def test_pdist_djaccard_random_nonC(self):
eps = 1e-08
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test2 = wpdist(X, 'test_jaccard')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_jensenshannon_random(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-jensenshannon']
Y_test1 = pdist(X, 'jensenshannon')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_jensenshannon_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-jensenshannon']
Y_test1 = pdist(X, 'jensenshannon')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_jensenshannon_random_nonC(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-jensenshannon']
Y_test2 = pdist(X, 'test_jensenshannon')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_jensenshannon_iris(self):
if _is_32bit():
# Test failing on 32-bit Linux on Azure otherwise, see gh-12810
eps = 1.5e-10
else:
eps = 1e-12
X = eo['iris']
Y_right = eo['pdist-jensenshannon-iris']
Y_test1 = pdist(X, 'jensenshannon')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_jensenshannon_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-jensenshannon-iris']
Y_test1 = pdist(X, 'jensenshannon')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_jensenshannon_iris_nonC(self):
eps = 5e-12
X = eo['iris']
Y_right = eo['pdist-jensenshannon-iris']
Y_test2 = pdist(X, 'test_jensenshannon')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_djaccard_allzeros_nonC(self):
eps = 1e-08
Y = pdist(np.zeros((5, 3)), 'test_jaccard')
_assert_within_tol(np.zeros(10), Y, eps)
def test_pdist_chebyshev_random(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-chebyshev']
Y_test1 = pdist(X, 'chebyshev')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_chebyshev_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-chebyshev']
Y_test1 = pdist(X, 'chebyshev')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_chebyshev_random_nonC(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-chebyshev']
Y_test2 = pdist(X, 'test_chebyshev')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_chebyshev_iris(self):
eps = 1e-15
X = eo['iris']
Y_right = eo['pdist-chebyshev-iris']
Y_test1 = pdist(X, 'chebyshev')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_chebyshev_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-chebyshev-iris']
Y_test1 = pdist(X, 'chebyshev')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_chebyshev_iris_nonC(self):
eps = 1e-15
X = eo['iris']
Y_right = eo['pdist-chebyshev-iris']
Y_test2 = pdist(X, 'test_chebyshev')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_matching_mtica1(self):
# Test matching(*,*) with mtica example #1 (nums).
m = wmatching(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wmatching(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
assert_allclose(m, 0.6, rtol=0, atol=1e-10)
assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
def test_pdist_matching_mtica2(self):
# Test matching(*,*) with mtica example #2.
m = wmatching(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wmatching(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)
assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)
def test_pdist_jaccard_mtica1(self):
m = wjaccard(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wjaccard(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
assert_allclose(m, 0.6, rtol=0, atol=1e-10)
assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
def test_pdist_jaccard_mtica2(self):
m = wjaccard(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wjaccard(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)
assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)
def test_pdist_yule_mtica1(self):
m = wyule(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wyule(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 2, rtol=0, atol=1e-10)
assert_allclose(m2, 2, rtol=0, atol=1e-10)
def test_pdist_yule_mtica2(self):
m = wyule(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wyule(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 2, rtol=0, atol=1e-10)
assert_allclose(m2, 2, rtol=0, atol=1e-10)
def test_pdist_dice_mtica1(self):
m = wdice(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wdice(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 3 / 7, rtol=0, atol=1e-10)
assert_allclose(m2, 3 / 7, rtol=0, atol=1e-10)
def test_pdist_dice_mtica2(self):
m = wdice(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wdice(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 0.5, rtol=0, atol=1e-10)
assert_allclose(m2, 0.5, rtol=0, atol=1e-10)
def test_pdist_sokalsneath_mtica1(self):
m = sokalsneath(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = sokalsneath(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 3 / 4, rtol=0, atol=1e-10)
assert_allclose(m2, 3 / 4, rtol=0, atol=1e-10)
def test_pdist_sokalsneath_mtica2(self):
m = wsokalsneath(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wsokalsneath(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 4 / 5, rtol=0, atol=1e-10)
assert_allclose(m2, 4 / 5, rtol=0, atol=1e-10)
def test_pdist_rogerstanimoto_mtica1(self):
m = wrogerstanimoto(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wrogerstanimoto(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 3 / 4, rtol=0, atol=1e-10)
assert_allclose(m2, 3 / 4, rtol=0, atol=1e-10)
def test_pdist_rogerstanimoto_mtica2(self):
m = wrogerstanimoto(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wrogerstanimoto(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 4 / 5, rtol=0, atol=1e-10)
assert_allclose(m2, 4 / 5, rtol=0, atol=1e-10)
def test_pdist_russellrao_mtica1(self):
m = wrussellrao(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wrussellrao(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 3 / 5, rtol=0, atol=1e-10)
assert_allclose(m2, 3 / 5, rtol=0, atol=1e-10)
def test_pdist_russellrao_mtica2(self):
m = wrussellrao(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wrussellrao(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)
assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)
@pytest.mark.slow
def test_pdist_canberra_match(self):
D = eo['iris']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = wpdist_no_const(D, "canberra")
y2 = wpdist_no_const(D, "test_canberra")
_assert_within_tol(y1, y2, eps, verbose > 2)
def test_pdist_canberra_ticket_711(self):
# Test pdist(X, 'canberra') to see if Canberra gives the right result
# as reported on gh-1238.
eps = 1e-8
pdist_y = wpdist_no_const(([3.3], [3.4]), "canberra")
right_y = 0.01492537
_assert_within_tol(pdist_y, right_y, eps, verbose > 2)
def test_pdist_custom_notdouble(self):
# tests that when using a custom metric the data type is not altered
class myclass:
pass
def _my_metric(x, y):
if not isinstance(x[0], myclass) or not isinstance(y[0], myclass):
raise ValueError("Type has been changed")
return 1.123
data = np.array([[myclass()], [myclass()]], dtype=object)
pdist_y = pdist(data, metric=_my_metric)
right_y = 1.123
assert_equal(pdist_y, right_y, verbose=verbose > 2)
def _check_calling_conventions(self, X, metric, eps=1e-07, **kwargs):
# helper function for test_pdist_calling_conventions
try:
y1 = pdist(X, metric=metric, **kwargs)
y2 = pdist(X, metric=eval(metric), **kwargs)
y3 = pdist(X, metric="test_" + metric, **kwargs)
except Exception as e:
e_cls = e.__class__
if verbose > 2:
print(e_cls.__name__)
print(e)
assert_raises(e_cls, pdist, X, metric=metric, **kwargs)
assert_raises(e_cls, pdist, X, metric=eval(metric), **kwargs)
assert_raises(e_cls, pdist, X, metric="test_" + metric, **kwargs)
else:
_assert_within_tol(y1, y2, rtol=eps, verbose_=verbose > 2)
_assert_within_tol(y1, y3, rtol=eps, verbose_=verbose > 2)
def test_pdist_calling_conventions(self):
# Ensures that specifying the metric with a str or scipy function
# gives the same behaviour (i.e. same result or same exception).
# NOTE: The correctness should be checked within each metric tests.
# NOTE: Extra args should be checked with a dedicated test
for eo_name in self.rnd_eo_names:
# subsampling input data to speed-up tests
# NOTE: num samples needs to be > than dimensions for mahalanobis
X = eo[eo_name][::5, ::2]
for metric in _METRICS_NAMES:
if metric == 'wminkowski':
continue
if verbose > 2:
print("testing: ", metric, " with: ", eo_name)
if metric in {'dice', 'yule', 'kulsinski', 'matching',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath'} and 'bool' not in eo_name:
# python version permits non-bools e.g. for fuzzy logic
continue
self._check_calling_conventions(X, metric)
# Testing built-in metrics with extra args
if metric == "seuclidean":
V = np.var(X.astype(np.double), axis=0, ddof=1)
self._check_calling_conventions(X, metric, V=V)
elif metric == "mahalanobis":
V = np.atleast_2d(np.cov(X.astype(np.double).T))
VI = np.array(np.linalg.inv(V).T)
self._check_calling_conventions(X, metric, VI=VI)
def test_pdist_dtype_equivalence(self):
# Tests that the result is not affected by type up-casting
eps = 1e-07
tests = [(eo['random-bool-data'], self.valid_upcasts['bool']),
(eo['random-uint-data'], self.valid_upcasts['uint']),
(eo['random-int-data'], self.valid_upcasts['int']),
(eo['random-float32-data'], self.valid_upcasts['float32'])]
for metric in _METRICS_NAMES:
for test in tests:
X1 = test[0][::5, ::2]
try:
y1 = pdist(X1, metric=metric)
except Exception as e:
e_cls = e.__class__
if verbose > 2:
print(e_cls.__name__)
print(e)
for new_type in test[1]:
X2 = new_type(X1)
assert_raises(e_cls, pdist, X2, metric=metric)
else:
for new_type in test[1]:
y2 = pdist(new_type(X1), metric=metric)
_assert_within_tol(y1, y2, eps, verbose > 2)
def test_pdist_out(self):
# Test that out parameter works properly
eps = 1e-07
X = eo['random-float32-data'][::5, ::2]
out_size = int((X.shape[0] * (X.shape[0] - 1)) / 2)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "'wminkowski' metric is deprecated")
for metric in _METRICS_NAMES:
kwargs = dict()
if metric in ['minkowski', 'wminkowski']:
kwargs['p'] = 1.23
if metric == 'wminkowski':
kwargs['w'] = 1.0 / X.std(axis=0)
out1 = np.empty(out_size, dtype=np.double)
Y_right = pdist(X, metric, **kwargs)
Y_test1 = pdist(X, metric, out=out1, **kwargs)
# test that output is numerically equivalent
_assert_within_tol(Y_test1, Y_right, eps)
# test that Y_test1 and out1 are the same object
assert_(Y_test1 is out1)
# test for incorrect shape
out2 = np.empty(out_size + 3, dtype=np.double)
assert_raises(ValueError, pdist, X, metric, out=out2, **kwargs)
# test for (C-)contiguous output
out3 = np.empty(2 * out_size, dtype=np.double)[::2]
assert_raises(ValueError, pdist, X, metric, out=out3, **kwargs)
# test for incorrect dtype
out5 = np.empty(out_size, dtype=np.int64)
assert_raises(ValueError, pdist, X, metric, out=out5, **kwargs)
def test_striding(self):
# test that striding is handled correct with calls to
# _copy_array_if_base_present
eps = 1e-07
X = eo['random-float32-data'][::5, ::2]
X_copy = X.copy()
# confirm contiguity
assert_(not X.flags.c_contiguous)
assert_(X_copy.flags.c_contiguous)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
message="'wminkowski' metric is deprecated")
for metric in _METRICS_NAMES:
kwargs = dict()
if metric in ['minkowski', 'wminkowski']:
kwargs['p'] = 1.23
if metric == 'wminkowski':
kwargs['w'] = 1.0 / X.std(axis=0)
Y1 = pdist(X, metric, **kwargs)
Y2 = pdist(X_copy, metric, **kwargs)
# test that output is numerically equivalent
_assert_within_tol(Y1, Y2, eps, verbose > 2)
class TestSomeDistanceFunctions:
def setup_method(self):
# 1D arrays
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
self.cases = [(x, y)]
def test_minkowski(self):
for x, y in self.cases:
dist1 = wminkowski(x, y, p=1)
assert_almost_equal(dist1, 3.0)
dist1p5 = wminkowski(x, y, p=1.5)
assert_almost_equal(dist1p5, (1.0 + 2.0**1.5)**(2. / 3))
wminkowski(x, y, p=2)
# Check that casting input to minimum scalar type doesn't affect result
# (issue #10262). This could be extended to more test inputs with
# np.min_scalar_type(np.max(input_matrix)).
a = np.array([352, 916])
b = np.array([350, 660])
assert_equal(minkowski(a, b),
minkowski(a.astype('uint16'), b.astype('uint16')))
def test_old_wminkowski(self):
with suppress_warnings() as wrn:
wrn.filter(DeprecationWarning,
message=".*wminkowski is deprecated")
w = np.array([1.0, 2.0, 0.5])
for x, y in self.cases:
dist1 = old_wminkowski(x, y, p=1, w=w)
assert_almost_equal(dist1, 3.0)
dist1p5 = old_wminkowski(x, y, p=1.5, w=w)
assert_almost_equal(dist1p5, (2.0**1.5+1.0)**(2./3))
dist2 = old_wminkowski(x, y, p=2, w=w)
assert_almost_equal(dist2, np.sqrt(5))
# test weights Issue #7893
arr = np.arange(4)
w = np.full_like(arr, 4)
assert_almost_equal(old_wminkowski(arr, arr + 1, p=2, w=w), 8.0)
assert_almost_equal(wminkowski(arr, arr + 1, p=2, w=w), 4.0)
def test_euclidean(self):
for x, y in self.cases:
dist = weuclidean(x, y)
assert_almost_equal(dist, np.sqrt(5))
def test_sqeuclidean(self):
for x, y in self.cases:
dist = wsqeuclidean(x, y)
assert_almost_equal(dist, 5.0)
def test_cosine(self):
for x, y in self.cases:
dist = wcosine(x, y)
assert_almost_equal(dist, 1.0 - 18.0 / (np.sqrt(14) * np.sqrt(27)))
def test_correlation(self):
xm = np.array([-1.0, 0, 1.0])
ym = np.array([-4.0 / 3, -4.0 / 3, 5.0 - 7.0 / 3])
for x, y in self.cases:
dist = wcorrelation(x, y)
assert_almost_equal(dist, 1.0 - np.dot(xm, ym) / (norm(xm) * norm(ym)))
def test_correlation_positive(self):
# Regression test for gh-12320 (negative return value due to rounding
x = np.array([0., 0., 0., 0., 0., 0., -2., 0., 0., 0., -2., -2., -2.,
0., -2., 0., -2., 0., 0., -1., -2., 0., 1., 0., 0., -2.,
0., 0., -2., 0., -2., -2., -2., -2., -2., -2., 0.])
y = np.array([1., 1., 1., 1., 1., 1., -1., 1., 1., 1., -1., -1., -1.,
1., -1., 1., -1., 1., 1., 0., -1., 1., 2., 1., 1., -1.,
1., 1., -1., 1., -1., -1., -1., -1., -1., -1., 1.])
dist = correlation(x, y)
assert 0 <= dist <= 10 * np.finfo(np.float64).eps
def test_mahalanobis(self):
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
vi = np.array([[2.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 2.0]])
for x, y in self.cases:
dist = mahalanobis(x, y, vi)
assert_almost_equal(dist, np.sqrt(6.0))
def construct_squeeze_tests():
# Construct a class like TestSomeDistanceFunctions but testing 2-d vectors
# with a length-1 dimension which is deprecated
def setup_method(self):
# 1D arrays
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
# 3x1 arrays
x31 = x[:, np.newaxis]
y31 = y[:, np.newaxis]
# 1x3 arrays
x13 = x31.T
y13 = y31.T
self.cases = [(x31, y31), (x13, y13), (x31, y13)]
sup = suppress_warnings()
sup.filter(DeprecationWarning,
".*distance metrics ignoring length-1 dimensions is deprecated.*")
base = TestSomeDistanceFunctions
attrs = {
name: sup(getattr(base, name))
for name in dir(base)
if name.startswith('test_')
}
attrs['setup_method'] = setup_method
name = 'TestDistanceFunctionsSqueeze'
globals()[name] = type(name, (base,), attrs)
construct_squeeze_tests()
class TestSquareForm:
checked_dtypes = [np.float64, np.float32, np.int32, np.int8, bool]
def test_squareform_matrix(self):
for dtype in self.checked_dtypes:
self.check_squareform_matrix(dtype)
def test_squareform_vector(self):
for dtype in self.checked_dtypes:
self.check_squareform_vector(dtype)
def check_squareform_matrix(self, dtype):
A = np.zeros((0, 0), dtype=dtype)
rA = squareform(A)
assert_equal(rA.shape, (0,))
assert_equal(rA.dtype, dtype)
A = np.zeros((1, 1), dtype=dtype)
rA = squareform(A)
assert_equal(rA.shape, (0,))
assert_equal(rA.dtype, dtype)
A = np.array([[0, 4.2], [4.2, 0]], dtype=dtype)
rA = squareform(A)
assert_equal(rA.shape, (1,))
assert_equal(rA.dtype, dtype)
assert_array_equal(rA, np.array([4.2], dtype=dtype))
def check_squareform_vector(self, dtype):
v = np.zeros((0,), dtype=dtype)
rv = squareform(v)
assert_equal(rv.shape, (1, 1))
assert_equal(rv.dtype, dtype)
assert_array_equal(rv, [[0]])
v = np.array([8.3], dtype=dtype)
rv = squareform(v)
assert_equal(rv.shape, (2, 2))
assert_equal(rv.dtype, dtype)
assert_array_equal(rv, np.array([[0, 8.3], [8.3, 0]], dtype=dtype))
def test_squareform_multi_matrix(self):
for n in range(2, 5):
self.check_squareform_multi_matrix(n)
def check_squareform_multi_matrix(self, n):
X = np.random.rand(n, 4)
Y = wpdist_no_const(X)
assert_equal(len(Y.shape), 1)
A = squareform(Y)
Yr = squareform(A)
s = A.shape
k = 0
if verbose >= 3:
print(A.shape, Y.shape, Yr.shape)
assert_equal(len(s), 2)
assert_equal(len(Yr.shape), 1)
assert_equal(s[0], s[1])
for i in range(0, s[0]):
for j in range(i + 1, s[1]):
if i != j:
assert_equal(A[i, j], Y[k])
k += 1
else:
assert_equal(A[i, j], 0)
class TestNumObsY:
def test_num_obs_y_multi_matrix(self):
for n in range(2, 10):
X = np.random.rand(n, 4)
Y = wpdist_no_const(X)
assert_equal(num_obs_y(Y), n)
def test_num_obs_y_1(self):
# Tests num_obs_y(y) on a condensed distance matrix over 1
# observations. Expecting exception.
assert_raises(ValueError, self.check_y, 1)
def test_num_obs_y_2(self):
# Tests num_obs_y(y) on a condensed distance matrix over 2
# observations.
assert_(self.check_y(2))
def test_num_obs_y_3(self):
assert_(self.check_y(3))
def test_num_obs_y_4(self):
assert_(self.check_y(4))
def test_num_obs_y_5_10(self):
for i in range(5, 16):
self.minit(i)
def test_num_obs_y_2_100(self):
# Tests num_obs_y(y) on 100 improper condensed distance matrices.
# Expecting exception.
a = set([])
for n in range(2, 16):
a.add(n * (n - 1) / 2)
for i in range(5, 105):
if i not in a:
assert_raises(ValueError, self.bad_y, i)
def minit(self, n):
assert_(self.check_y(n))
def bad_y(self, n):
y = np.random.rand(n)
return num_obs_y(y)
def check_y(self, n):
return num_obs_y(self.make_y(n)) == n
def make_y(self, n):
return np.random.rand((n * (n - 1)) // 2)
class TestNumObsDM:
def test_num_obs_dm_multi_matrix(self):
for n in range(1, 10):
X = np.random.rand(n, 4)
Y = wpdist_no_const(X)
A = squareform(Y)
if verbose >= 3:
print(A.shape, Y.shape)
assert_equal(num_obs_dm(A), n)
def test_num_obs_dm_0(self):
# Tests num_obs_dm(D) on a 0x0 distance matrix. Expecting exception.
assert_(self.check_D(0))
def test_num_obs_dm_1(self):
# Tests num_obs_dm(D) on a 1x1 distance matrix.
assert_(self.check_D(1))
def test_num_obs_dm_2(self):
assert_(self.check_D(2))
def test_num_obs_dm_3(self):
assert_(self.check_D(2))
def test_num_obs_dm_4(self):
assert_(self.check_D(4))
def check_D(self, n):
return num_obs_dm(self.make_D(n)) == n
def make_D(self, n):
return np.random.rand(n, n)
def is_valid_dm_throw(D):
return is_valid_dm(D, throw=True)
class TestIsValidDM:
def test_is_valid_dm_improper_shape_1D_E(self):
D = np.zeros((5,), dtype=np.double)
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_1D_F(self):
D = np.zeros((5,), dtype=np.double)
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_improper_shape_3D_E(self):
D = np.zeros((3, 3, 3), dtype=np.double)
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_3D_F(self):
D = np.zeros((3, 3, 3), dtype=np.double)
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_nonzero_diagonal_E(self):
y = np.random.rand(10)
D = squareform(y)
for i in range(0, 5):
D[i, i] = 2.0
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_nonzero_diagonal_F(self):
y = np.random.rand(10)
D = squareform(y)
for i in range(0, 5):
D[i, i] = 2.0
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_asymmetric_E(self):
y = np.random.rand(10)
D = squareform(y)
D[1, 3] = D[3, 1] + 1
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_asymmetric_F(self):
y = np.random.rand(10)
D = squareform(y)
D[1, 3] = D[3, 1] + 1
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_correct_1_by_1(self):
D = np.zeros((1, 1), dtype=np.double)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_2_by_2(self):
y = np.random.rand(1)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_3_by_3(self):
y = np.random.rand(3)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_4_by_4(self):
y = np.random.rand(6)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_5_by_5(self):
y = np.random.rand(10)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def is_valid_y_throw(y):
return is_valid_y(y, throw=True)
class TestIsValidY:
# If test case name ends on "_E" then an exception is expected for the
# given input, if it ends in "_F" then False is expected for the is_valid_y
# check. Otherwise the input is expected to be valid.
def test_is_valid_y_improper_shape_2D_E(self):
y = np.zeros((3, 3,), dtype=np.double)
assert_raises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_2D_F(self):
y = np.zeros((3, 3,), dtype=np.double)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_improper_shape_3D_E(self):
y = np.zeros((3, 3, 3), dtype=np.double)
assert_raises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_3D_F(self):
y = np.zeros((3, 3, 3), dtype=np.double)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_correct_2_by_2(self):
y = self.correct_n_by_n(2)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_3_by_3(self):
y = self.correct_n_by_n(3)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_4_by_4(self):
y = self.correct_n_by_n(4)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_5_by_5(self):
y = self.correct_n_by_n(5)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_2_100(self):
a = set([])
for n in range(2, 16):
a.add(n * (n - 1) / 2)
for i in range(5, 105):
if i not in a:
assert_raises(ValueError, self.bad_y, i)
def bad_y(self, n):
y = np.random.rand(n)
return is_valid_y(y, throw=True)
def correct_n_by_n(self, n):
y = np.random.rand((n * (n - 1)) // 2)
return y
def test_bad_p():
# Raise ValueError if p < 1.
p = 0.5
assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p)
assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p, [1, 1])
def test_sokalsneath_all_false():
# Regression test for ticket #876
assert_raises(ValueError, sokalsneath, [False, False, False], [False, False, False])
def test_canberra():
# Regression test for ticket #1430.
assert_equal(wcanberra([1, 2, 3], [2, 4, 6]), 1)
assert_equal(wcanberra([1, 1, 0, 0], [1, 0, 1, 0]), 2)
def test_braycurtis():
# Regression test for ticket #1430.
assert_almost_equal(wbraycurtis([1, 2, 3], [2, 4, 6]), 1. / 3, decimal=15)
assert_almost_equal(wbraycurtis([1, 1, 0, 0], [1, 0, 1, 0]), 0.5, decimal=15)
def test_euclideans():
# Regression test for ticket #1328.
x1 = np.array([1, 1, 1])
x2 = np.array([0, 0, 0])
# Basic test of the calculation.
assert_almost_equal(wsqeuclidean(x1, x2), 3.0, decimal=14)
assert_almost_equal(weuclidean(x1, x2), np.sqrt(3), decimal=14)
# Check flattening for (1, N) or (N, 1) inputs
with pytest.warns(DeprecationWarning,
match="ignoring length-1 dimensions is deprecated"):
assert_almost_equal(weuclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
np.sqrt(3), decimal=14)
with pytest.warns(DeprecationWarning,
match="ignoring length-1 dimensions is deprecated"):
assert_almost_equal(wsqeuclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
3.0, decimal=14)
with pytest.warns(DeprecationWarning,
match="ignoring length-1 dimensions is deprecated"):
assert_almost_equal(wsqeuclidean(x1[:, np.newaxis], x2[:, np.newaxis]),
3.0, decimal=14)
# Distance metrics only defined for vectors (= 1-D)
x = np.arange(4).reshape(2, 2)
assert_raises(ValueError, weuclidean, x, x)
assert_raises(ValueError, wsqeuclidean, x, x)
# Another check, with random data.
rs = np.random.RandomState(1234567890)
x = rs.rand(10)
y = rs.rand(10)
d1 = weuclidean(x, y)
d2 = wsqeuclidean(x, y)
assert_almost_equal(d1**2, d2, decimal=14)
def test_hamming_unequal_length():
# Regression test for gh-4290.
x = [0, 0, 1]
y = [1, 0, 1, 0]
# Used to give an AttributeError from ndarray.mean called on bool
assert_raises(ValueError, whamming, x, y)
def test_hamming_string_array():
# https://github.com/scikit-learn/scikit-learn/issues/4014
a = np.array(['eggs', 'spam', 'spam', 'eggs', 'spam', 'spam', 'spam',
'spam', 'spam', 'spam', 'spam', 'eggs', 'eggs', 'spam',
'eggs', 'eggs', 'eggs', 'eggs', 'eggs', 'spam'],
dtype='|S4')
b = np.array(['eggs', 'spam', 'spam', 'eggs', 'eggs', 'spam', 'spam',
'spam', 'spam', 'eggs', 'spam', 'eggs', 'spam', 'eggs',
'spam', 'spam', 'eggs', 'spam', 'spam', 'eggs'],
dtype='|S4')
desired = 0.45
assert_allclose(whamming(a, b), desired)
def test_minkowski_w():
# Regression test for gh-8142.
arr_in = np.array([[83.33333333, 100., 83.33333333, 100., 36.,
60., 90., 150., 24., 48.],
[83.33333333, 100., 83.33333333, 100., 36.,
60., 90., 150., 24., 48.]])
p0 = pdist(arr_in, metric='minkowski', p=1, w=None)
c0 = cdist(arr_in, arr_in, metric='minkowski', p=1, w=None)
p1 = pdist(arr_in, metric='minkowski', p=1)
c1 = cdist(arr_in, arr_in, metric='minkowski', p=1)
assert_allclose(p0, p1, rtol=1e-15)
assert_allclose(c0, c1, rtol=1e-15)
def test_sqeuclidean_dtypes():
# Assert that sqeuclidean returns the right types of values.
# Integer types should be converted to floating for stability.
# Floating point types should be the same as the input.
x = [1, 2, 3]
y = [4, 5, 6]
for dtype in [np.int8, np.int16, np.int32, np.int64]:
d = wsqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
assert_(np.issubdtype(d.dtype, np.floating))
for dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
d1 = wsqeuclidean([0], np.asarray([-1], dtype=dtype))
d2 = wsqeuclidean(np.asarray([-1], dtype=dtype), [0])
assert_equal(d1, d2)
assert_equal(d1, np.float64(np.iinfo(dtype).max)**2)
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
for dtype in ['float16', 'float128']:
# These aren't present in older numpy versions; float128 may also not
# be present on all platforms.
if hasattr(np, dtype):
dtypes.append(getattr(np, dtype))
for dtype in dtypes:
d = wsqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
assert_equal(d.dtype, dtype)
def test_sokalmichener():
# Test that sokalmichener has the same result for bool and int inputs.
p = [True, True, False]
q = [True, False, True]
x = [int(b) for b in p]
y = [int(b) for b in q]
dist1 = sokalmichener(p, q)
dist2 = sokalmichener(x, y)
# These should be exactly the same.
assert_equal(dist1, dist2)
def test_sokalmichener_with_weight():
# from: | 1 | | 0 |
# to: | 1 | | 1 |
# weight| | 1 | | 0.2
ntf = 0 * 1 + 0 * 0.2
nft = 0 * 1 + 1 * 0.2
ntt = 1 * 1 + 0 * 0.2
nff = 0 * 1 + 0 * 0.2
expected = 2 * (nft + ntf) / (ntt + nff + 2 * (nft + ntf))
assert_almost_equal(expected, 0.2857143)
actual = sokalmichener([1, 0], [1, 1], w=[1, 0.2])
assert_almost_equal(expected, actual)
a1 = [False, False, True, True, True, False, False, True, True, True, True,
True, True, False, True, False, False, False, True, True]
a2 = [True, True, True, False, False, True, True, True, False, True,
True, True, True, True, False, False, False, True, True, True]
for w in [0.05, 0.1, 1.0, 20.0]:
assert_almost_equal(sokalmichener(a2, a1, [w]), 0.6666666666666666)
def test_modifies_input():
# test whether cdist or pdist modifies input arrays
X1 = np.asarray([[1., 2., 3.],
[1.2, 2.3, 3.4],
[2.2, 2.3, 4.4],
[22.2, 23.3, 44.4]])
X1_copy = X1.copy()
with suppress_warnings() as w:
w.filter(message="'wminkowski' metric is deprecated")
for metric in _METRICS_NAMES:
kwargs = {"w": 1.0 / X1.std(axis=0)} if metric == "wminkowski" else {}
cdist(X1, X1, metric, **kwargs)
pdist(X1, metric, **kwargs)
assert_array_equal(X1, X1_copy)
def test_Xdist_deprecated_args():
# testing both cdist and pdist deprecated warnings
X1 = np.asarray([[1., 2., 3.],
[1.2, 2.3, 3.4],
[2.2, 2.3, 4.4],
[22.2, 23.3, 44.4]])
weights = np.arange(3)
for metric in _METRICS_NAMES:
kwargs = {"w": weights} if metric == "wminkowski" else dict()
with suppress_warnings() as w:
w.filter(DeprecationWarning,
message="'wminkowski' metric is deprecated")
with pytest.raises(TypeError):
cdist(X1, X1, metric, 2., **kwargs)
with pytest.raises(TypeError):
pdist(X1, metric, 2., **kwargs)
for arg in ["p", "V", "VI"]:
kwargs = {arg:"foo"}
if metric == "wminkowski":
if "p" in kwargs or "w" in kwargs:
continue
kwargs["w"] = weights
if((arg == "V" and metric == "seuclidean") or
(arg == "VI" and metric == "mahalanobis") or
(arg == "p" and metric == "minkowski")):
continue
with suppress_warnings() as w:
w.filter(DeprecationWarning,
message="'wminkowski' metric is deprecated")
with pytest.raises(TypeError):
cdist(X1, X1, metric, **kwargs)
with pytest.raises(TypeError):
pdist(X1, metric, **kwargs)
def test_Xdist_non_negative_weights():
X = eo['random-float32-data'][::5, ::2]
w = np.ones(X.shape[1])
w[::5] = -w[::5]
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
message="'wminkowski' metric is deprecated")
for metric in _METRICS_NAMES:
if metric in ['seuclidean', 'mahalanobis', 'jensenshannon']:
continue
for m in [metric, eval(metric), "test_" + metric]:
assert_raises(ValueError, pdist, X, m, w=w)
assert_raises(ValueError, cdist, X, X, m, w=w)
def test__validate_vector():
x = [1, 2, 3]
y = _validate_vector(x)
assert_array_equal(y, x)
y = _validate_vector(x, dtype=np.float64)
assert_array_equal(y, x)
assert_equal(y.dtype, np.float64)
x = [1]
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, x)
x = 1
with pytest.warns(DeprecationWarning,
match="ignoring length-1 dimensions is deprecated"):
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, [x])
x = np.arange(5).reshape(1, -1, 1)
with pytest.warns(DeprecationWarning,
match="ignoring length-1 dimensions is deprecated"):
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_array_equal(y, x[0, :, 0])
x = [[1, 2], [3, 4]]
assert_raises(ValueError, _validate_vector, x)
def test_yule_all_same():
# Test yule avoids a divide by zero when exactly equal
x = np.ones((2, 6), dtype=bool)
d = wyule(x[0], x[0])
assert d == 0.0
d = pdist(x, 'yule')
assert_equal(d, [0.0])
d = cdist(x[:1], x[:1], 'yule')
assert_equal(d, [[0.0]])
def test_jensenshannon():
assert_almost_equal(jensenshannon([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], 2.0),
1.0)
assert_almost_equal(jensenshannon([1.0, 0.0], [0.5, 0.5]),
0.46450140402245893)
assert_almost_equal(jensenshannon([1.0, 0.0, 0.0], [1.0, 0.0, 0.0]), 0.0)
assert_almost_equal(jensenshannon([[1.0, 2.0]], [[0.5, 1.5]], axis=0),
[0.0, 0.0])
assert_almost_equal(jensenshannon([[1.0, 2.0]], [[0.5, 1.5]], axis=1),
[0.0649045])
assert_almost_equal(jensenshannon([[1.0, 2.0]], [[0.5, 1.5]], axis=0,
keepdims=True), [[0.0, 0.0]])
assert_almost_equal(jensenshannon([[1.0, 2.0]], [[0.5, 1.5]], axis=1,
keepdims=True), [[0.0649045]])
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]])
b = np.array([[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]])
assert_almost_equal(jensenshannon(a, b, axis=0),
[0.1954288, 0.1447697, 0.1138377, 0.0927636])
assert_almost_equal(jensenshannon(a, b, axis=1),
[0.1402339, 0.0399106, 0.0201815])
|
ychen820/microblog
|
refs/heads/master
|
y/google-cloud-sdk/.install/.backup/platform/gsutil/third_party/boto/boto/cacerts/__init__.py
|
260
|
# Copyright 2010 Google Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
|
ryfeus/lambda-packs
|
refs/heads/master
|
Lxml_requests/source/affine/tests/test_pickle.py
|
5
|
"""
Validate that instances of `affine.Affine()` can be pickled and unpickled.
"""
import pickle
from multiprocessing import Pool
import affine
def test_pickle():
a = affine.Affine(1, 2, 3, 4, 5, 6)
assert pickle.loads(pickle.dumps(a)) == a
def _mp_proc(x):
# A helper function - needed for test_with_multiprocessing()
# Can't be defined inside the test because multiprocessing needs
# everything to be in __main__
assert isinstance(x, affine.Affine)
return x
def test_with_multiprocessing():
a1 = affine.Affine(1, 2, 3, 4, 5, 6)
a2 = affine.Affine(6, 5, 4, 3, 2, 1)
results = Pool(2).map(_mp_proc, [a1, a2])
for expected, actual in zip([a1, a2], results):
assert expected == actual
|
ibm-watson-iot/iot-python
|
refs/heads/master
|
src/wiotp/sdk/api/usage/__init__.py
|
2
|
# *****************************************************************************
# Copyright (c) 2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from datetime import datetime
from collections import defaultdict
from wiotp.sdk.exceptions import ApiException
class DataTransferSummary(defaultdict):
def __init__(self, **kwargs):
daysAsObj = []
if "days" in kwargs and kwargs["days"] is not None:
for day in kwargs["days"]:
daysAsObj.append(DayDataTransfer(**day))
del kwargs["days"]
dict.__init__(self, days=daysAsObj, **kwargs)
@property
def start(self):
return datetime.strptime(self["start"], "%Y-%m-%d").date()
@property
def end(self):
return datetime.strptime(self["end"], "%Y-%m-%d").date()
@property
def average(self):
return self["average"]
@property
def total(self):
return self["total"]
@property
def days(self):
return self["days"]
class DayDataTransfer(defaultdict):
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
@property
def date(self):
return datetime.strptime(self["date"], "%Y-%m-%d").date()
@property
def total(self):
return self["total"]
class Usage:
def __init__(self, apiClient):
self._apiClient = apiClient
def dataTransfer(self, start, end, detail=False):
"""
Retrieve the organization-specific status of each of the services offered by the IBM Watson IoT Platform.
In case of failure it throws APIException
"""
r = self._apiClient.get(
"api/v0002/usage/data-traffic?start=%s&end=%s&detail=%s"
% (start.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d"), detail)
)
if r.status_code == 200:
return DataTransferSummary(**r.json())
else:
raise ApiException(r)
|
onceuponatimeforever/oh-mainline
|
refs/heads/master
|
vendor/packages/celery/celery/tests/test_task/test_result.py
|
18
|
from __future__ import absolute_import
from __future__ import with_statement
from celery import states
from celery.app import app_or_default
from celery.utils import uuid
from celery.utils.serialization import pickle
from celery.result import AsyncResult, EagerResult, TaskSetResult, ResultSet
from celery.exceptions import TimeoutError
from celery.task.base import Task
from celery.tests.utils import AppCase
from celery.tests.utils import skip_if_quick
def mock_task(name, status, result):
return dict(id=uuid(), name=name, status=status, result=result)
def save_result(task):
app = app_or_default()
traceback = "Some traceback"
if task["status"] == states.SUCCESS:
app.backend.mark_as_done(task["id"], task["result"])
elif task["status"] == states.RETRY:
app.backend.mark_as_retry(task["id"], task["result"],
traceback=traceback)
else:
app.backend.mark_as_failure(task["id"], task["result"],
traceback=traceback)
def make_mock_taskset(size=10):
tasks = [mock_task("ts%d" % i, states.SUCCESS, i) for i in xrange(size)]
[save_result(task) for task in tasks]
return [AsyncResult(task["id"]) for task in tasks]
class TestAsyncResult(AppCase):
def setup(self):
self.task1 = mock_task("task1", states.SUCCESS, "the")
self.task2 = mock_task("task2", states.SUCCESS, "quick")
self.task3 = mock_task("task3", states.FAILURE, KeyError("brown"))
self.task4 = mock_task("task3", states.RETRY, KeyError("red"))
for task in (self.task1, self.task2, self.task3, self.task4):
save_result(task)
def test_reduce(self):
a1 = AsyncResult("uuid", task_name="celery.ping")
restored = pickle.loads(pickle.dumps(a1))
self.assertEqual(restored.task_id, "uuid")
self.assertEqual(restored.task_name, "celery.ping")
a2 = AsyncResult("uuid")
self.assertEqual(pickle.loads(pickle.dumps(a2)).task_id, "uuid")
def test_successful(self):
ok_res = AsyncResult(self.task1["id"])
nok_res = AsyncResult(self.task3["id"])
nok_res2 = AsyncResult(self.task4["id"])
self.assertTrue(ok_res.successful())
self.assertFalse(nok_res.successful())
self.assertFalse(nok_res2.successful())
pending_res = AsyncResult(uuid())
self.assertFalse(pending_res.successful())
def test_str(self):
ok_res = AsyncResult(self.task1["id"])
ok2_res = AsyncResult(self.task2["id"])
nok_res = AsyncResult(self.task3["id"])
self.assertEqual(str(ok_res), self.task1["id"])
self.assertEqual(str(ok2_res), self.task2["id"])
self.assertEqual(str(nok_res), self.task3["id"])
pending_id = uuid()
pending_res = AsyncResult(pending_id)
self.assertEqual(str(pending_res), pending_id)
def test_repr(self):
ok_res = AsyncResult(self.task1["id"])
ok2_res = AsyncResult(self.task2["id"])
nok_res = AsyncResult(self.task3["id"])
self.assertEqual(repr(ok_res), "<AsyncResult: %s>" % (
self.task1["id"]))
self.assertEqual(repr(ok2_res), "<AsyncResult: %s>" % (
self.task2["id"]))
self.assertEqual(repr(nok_res), "<AsyncResult: %s>" % (
self.task3["id"]))
pending_id = uuid()
pending_res = AsyncResult(pending_id)
self.assertEqual(repr(pending_res), "<AsyncResult: %s>" % (
pending_id))
def test_hash(self):
self.assertEqual(hash(AsyncResult("x0w991")),
hash(AsyncResult("x0w991")))
self.assertNotEqual(hash(AsyncResult("x0w991")),
hash(AsyncResult("x1w991")))
def test_get_traceback(self):
ok_res = AsyncResult(self.task1["id"])
nok_res = AsyncResult(self.task3["id"])
nok_res2 = AsyncResult(self.task4["id"])
self.assertFalse(ok_res.traceback)
self.assertTrue(nok_res.traceback)
self.assertTrue(nok_res2.traceback)
pending_res = AsyncResult(uuid())
self.assertFalse(pending_res.traceback)
def test_get(self):
ok_res = AsyncResult(self.task1["id"])
ok2_res = AsyncResult(self.task2["id"])
nok_res = AsyncResult(self.task3["id"])
nok2_res = AsyncResult(self.task4["id"])
self.assertEqual(ok_res.get(), "the")
self.assertEqual(ok2_res.get(), "quick")
with self.assertRaises(KeyError):
nok_res.get()
self.assertIsInstance(nok2_res.result, KeyError)
self.assertEqual(ok_res.info, "the")
def test_get_timeout(self):
res = AsyncResult(self.task4["id"]) # has RETRY status
with self.assertRaises(TimeoutError):
res.get(timeout=0.1)
pending_res = AsyncResult(uuid())
with self.assertRaises(TimeoutError):
pending_res.get(timeout=0.1)
@skip_if_quick
def test_get_timeout_longer(self):
res = AsyncResult(self.task4["id"]) # has RETRY status
with self.assertRaises(TimeoutError):
res.get(timeout=1)
def test_ready(self):
oks = (AsyncResult(self.task1["id"]),
AsyncResult(self.task2["id"]),
AsyncResult(self.task3["id"]))
self.assertTrue(all(result.ready() for result in oks))
self.assertFalse(AsyncResult(self.task4["id"]).ready())
self.assertFalse(AsyncResult(uuid()).ready())
class test_ResultSet(AppCase):
def test_add_discard(self):
x = ResultSet([])
x.add(AsyncResult("1"))
self.assertIn(AsyncResult("1"), x.results)
x.discard(AsyncResult("1"))
x.discard(AsyncResult("1"))
x.discard("1")
self.assertNotIn(AsyncResult("1"), x.results)
x.update([AsyncResult("2")])
def test_clear(self):
x = ResultSet([])
r = x.results
x.clear()
self.assertIs(x.results, r)
class MockAsyncResultFailure(AsyncResult):
@property
def result(self):
return KeyError("baz")
@property
def status(self):
return states.FAILURE
def get(self, propagate=True, **kwargs):
if propagate:
raise self.result
return self.result
class MockAsyncResultSuccess(AsyncResult):
forgotten = False
def forget(self):
self.forgotten = True
@property
def result(self):
return 42
@property
def status(self):
return states.SUCCESS
def get(self, **kwargs):
return self.result
class SimpleBackend(object):
ids = []
def __init__(self, ids=[]):
self.ids = ids
def get_many(self, *args, **kwargs):
return ((id, {"result": i}) for i, id in enumerate(self.ids))
class TestTaskSetResult(AppCase):
def setup(self):
self.size = 10
self.ts = TaskSetResult(uuid(), make_mock_taskset(self.size))
def test_total(self):
self.assertEqual(self.ts.total, self.size)
def test_iterate_raises(self):
ar = MockAsyncResultFailure(uuid())
ts = TaskSetResult(uuid(), [ar])
it = iter(ts)
with self.assertRaises(KeyError):
it.next()
def test_forget(self):
subs = [MockAsyncResultSuccess(uuid()),
MockAsyncResultSuccess(uuid())]
ts = TaskSetResult(uuid(), subs)
ts.forget()
for sub in subs:
self.assertTrue(sub.forgotten)
def test_getitem(self):
subs = [MockAsyncResultSuccess(uuid()),
MockAsyncResultSuccess(uuid())]
ts = TaskSetResult(uuid(), subs)
self.assertIs(ts[0], subs[0])
def test_save_restore(self):
subs = [MockAsyncResultSuccess(uuid()),
MockAsyncResultSuccess(uuid())]
ts = TaskSetResult(uuid(), subs)
ts.save()
with self.assertRaises(AttributeError):
ts.save(backend=object())
self.assertEqual(TaskSetResult.restore(ts.taskset_id).subtasks,
ts.subtasks)
ts.delete()
self.assertIsNone(TaskSetResult.restore(ts.taskset_id))
with self.assertRaises(AttributeError):
TaskSetResult.restore(ts.taskset_id, backend=object())
def test_join_native(self):
backend = SimpleBackend()
subtasks = [AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = TaskSetResult(uuid(), subtasks)
backend.ids = [subtask.task_id for subtask in subtasks]
res = ts.join_native()
self.assertEqual(res, range(10))
def test_iter_native(self):
backend = SimpleBackend()
subtasks = [AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = TaskSetResult(uuid(), subtasks)
backend.ids = [subtask.task_id for subtask in subtasks]
self.assertEqual(len(list(ts.iter_native())), 10)
def test_iterate_yields(self):
ar = MockAsyncResultSuccess(uuid())
ar2 = MockAsyncResultSuccess(uuid())
ts = TaskSetResult(uuid(), [ar, ar2])
it = iter(ts)
self.assertEqual(it.next(), 42)
self.assertEqual(it.next(), 42)
def test_iterate_eager(self):
ar1 = EagerResult(uuid(), 42, states.SUCCESS)
ar2 = EagerResult(uuid(), 42, states.SUCCESS)
ts = TaskSetResult(uuid(), [ar1, ar2])
it = iter(ts)
self.assertEqual(it.next(), 42)
self.assertEqual(it.next(), 42)
def test_join_timeout(self):
ar = MockAsyncResultSuccess(uuid())
ar2 = MockAsyncResultSuccess(uuid())
ar3 = AsyncResult(uuid())
ts = TaskSetResult(uuid(), [ar, ar2, ar3])
with self.assertRaises(TimeoutError):
ts.join(timeout=0.0000001)
def test_itersubtasks(self):
it = self.ts.itersubtasks()
for i, t in enumerate(it):
self.assertEqual(t.get(), i)
def test___iter__(self):
it = iter(self.ts)
results = sorted(list(it))
self.assertListEqual(results, list(xrange(self.size)))
def test_join(self):
joined = self.ts.join()
self.assertListEqual(joined, list(xrange(self.size)))
def test_successful(self):
self.assertTrue(self.ts.successful())
def test_failed(self):
self.assertFalse(self.ts.failed())
def test_waiting(self):
self.assertFalse(self.ts.waiting())
def test_ready(self):
self.assertTrue(self.ts.ready())
def test_completed_count(self):
self.assertEqual(self.ts.completed_count(), self.ts.total)
class TestPendingAsyncResult(AppCase):
def setup(self):
self.task = AsyncResult(uuid())
def test_result(self):
self.assertIsNone(self.task.result)
class TestFailedTaskSetResult(TestTaskSetResult):
def setup(self):
self.size = 11
subtasks = make_mock_taskset(10)
failed = mock_task("ts11", states.FAILURE, KeyError("Baz"))
save_result(failed)
failed_res = AsyncResult(failed["id"])
self.ts = TaskSetResult(uuid(), subtasks + [failed_res])
def test_itersubtasks(self):
it = self.ts.itersubtasks()
for i in xrange(self.size - 1):
t = it.next()
self.assertEqual(t.get(), i)
with self.assertRaises(KeyError):
t = it.next() # need to do this in two lines or 2to3 borks.
t.get()
def test_completed_count(self):
self.assertEqual(self.ts.completed_count(), self.ts.total - 1)
def test___iter__(self):
it = iter(self.ts)
def consume():
return list(it)
with self.assertRaises(KeyError):
consume()
def test_join(self):
with self.assertRaises(KeyError):
self.ts.join()
def test_successful(self):
self.assertFalse(self.ts.successful())
def test_failed(self):
self.assertTrue(self.ts.failed())
class TestTaskSetPending(AppCase):
def setup(self):
self.ts = TaskSetResult(uuid(), [
AsyncResult(uuid()),
AsyncResult(uuid())])
def test_completed_count(self):
self.assertEqual(self.ts.completed_count(), 0)
def test_ready(self):
self.assertFalse(self.ts.ready())
def test_waiting(self):
self.assertTrue(self.ts.waiting())
def x_join(self):
with self.assertRaises(TimeoutError):
self.ts.join(timeout=0.001)
@skip_if_quick
def x_join_longer(self):
with self.assertRaises(TimeoutError):
self.ts.join(timeout=1)
class RaisingTask(Task):
def run(self, x, y):
raise KeyError("xy")
class TestEagerResult(AppCase):
def test_wait_raises(self):
res = RaisingTask.apply(args=[3, 3])
with self.assertRaises(KeyError):
res.wait()
def test_wait(self):
res = EagerResult("x", "x", states.RETRY)
res.wait()
self.assertEqual(res.state, states.RETRY)
self.assertEqual(res.status, states.RETRY)
def test_revoke(self):
res = RaisingTask.apply(args=[3, 3])
self.assertFalse(res.revoke())
|
dleecefft/pcapstats
|
refs/heads/master
|
pbin/csv2df1.py
|
1
|
#!/usr/bin/env python
#
import os, sys, getopt
from datetime import datetime
if __name__ == "__main__":
# input and output file data, adjust line grep statements if needed to match a different log line.
logfile=''
line1grep = 'Accepted'
line2grep = 'Recieved'
logoutlist = []
csv = True
writefile = False
outfile='parsedhpotlog'
# Need the epoch object to make millisecond timestamps
epoch = datetime.utcfromtimestamp(0)
# Use getopt to avoid param order errors
if len(sys.argv) < 4:
print("Usage: %s -l 2016-04-04_rdphoney -f csv | ascii_log [-w outputfilei]) " % sys.argv[0])
exit()
opts, args = getopt.getopt(sys.argv[1:],"l:f:w:")
for o, a in opts:
if o == '-l':
logfile=a
elif o == '-f':
outformat=a
elif o == '-w':
writefile = True
outfile =a
elif o == '-h':
print("Usage: %s -l 2016-04-04_rdphoney -f (csv|ascii_log) [ -w outputfileprefix ] " % sys.argv[0])
else:
print("Usage: %s -l 2016-04-04_rdphoney -f (csv|ascii_log) [ -w outputfileprefix ] " % sys.argv[0])
|
gangadhar-kadam/church-erpnext
|
refs/heads/master
|
patches/december_2012/deprecate_tds.py
|
3
|
def execute():
import webnotes
from webnotes.model import delete_doc
from webnotes.model.code import get_obj
from webnotes.model.doc import addchild
# delete doctypes and tables
for dt in ["TDS Payment", "TDS Return Acknowledgement", "Form 16A",
"TDS Rate Chart", "TDS Category", "TDS Control", "TDS Detail",
"TDS Payment Detail", "TDS Rate Detail", "TDS Category Account",
"Form 16A Ack Detail", "Form 16A Tax Detail"]:
delete_doc("DocType", dt)
webnotes.conn.commit()
webnotes.conn.sql("drop table if exists `tab%s`" % dt)
webnotes.conn.begin()
delete_doc("Search Criteria", "tds_return")
# Add tds entry in tax table for purchase invoice
pi_list = webnotes.conn.sql("""select name from `tabPurchase Invoice`
where ifnull(tax_code, '')!='' and ifnull(ded_amount, 0)!=0""")
for pi in pi_list:
piobj = get_obj("Purchase Invoice", pi[0], with_children=1)
ch = addchild(piobj.doc, 'taxes_and_charges', 'Purchase Taxes and Charges')
ch.charge_type = "Actual"
ch.account_head = piobj.doc.tax_code
ch.description = piobj.doc.tax_code
ch.rate = -1*piobj.doc.ded_amount
ch.tax_amount = -1*piobj.doc.ded_amount
ch.category = "Total"
ch.save(1)
# Add tds entry in entries table for journal voucher
jv_list = webnotes.conn.sql("""select name from `tabJournal Voucher`
where ifnull(tax_code, '')!='' and ifnull(ded_amount, 0)!=0""")
for jv in jv_list:
jvobj = get_obj("Journal Voucher", jv[0], with_children=1)
ch = addchild(jvobj.doc, 'entries', 'Journal Voucher Detail')
ch.account = jvobj.doc.tax_code
ch.credit = jvobj.doc.ded_amount
ch.save(1)
|
gitqwerty777/myGspread
|
refs/heads/master
|
tests/test.py
|
2
|
# -*- coding: utf-8 -*-
import os
import re
import time
import random
import hashlib
import unittest
import ConfigParser
import itertools
import gspread
class GspreadTest(unittest.TestCase):
def setUp(self):
creds_filename = "tests.config"
try:
config_filename = os.path.join(
os.path.dirname(__file__), creds_filename)
config = ConfigParser.ConfigParser()
config.readfp(open(config_filename))
email = config.get('Google Account', 'email')
password = config.get('Google Account', 'password')
self.config = config
self.gc = gspread.login(email, password)
self.assertTrue(isinstance(self.gc, gspread.Client))
except IOError:
msg = "Can't find %s for reading google account credentials. " \
"You can create it from %s.example in tests/ directory."
raise Exception(msg % (creds_filename, creds_filename))
class ClientTest(GspreadTest):
"""Test for gspread.client."""
def test_open(self):
title = self.config.get('Spreadsheet', 'title')
spreadsheet = self.gc.open(title)
self.assertTrue(isinstance(spreadsheet, gspread.Spreadsheet))
def test_no_found_exeption(self):
noexistent_title = "Please don't use this phrase as a name of a sheet."
self.assertRaises(gspread.SpreadsheetNotFound,
self.gc.open,
noexistent_title)
def test_open_by_key(self):
key = self.config.get('Spreadsheet', 'key')
spreadsheet = self.gc.open_by_key(key)
self.assertTrue(isinstance(spreadsheet, gspread.Spreadsheet))
def test_open_by_url(self):
url = self.config.get('Spreadsheet', 'url')
spreadsheet = self.gc.open_by_url(url)
self.assertTrue(isinstance(spreadsheet, gspread.Spreadsheet))
def test_openall(self):
spreadsheet_list = self.gc.openall()
for s in spreadsheet_list:
self.assertTrue(isinstance(s, gspread.Spreadsheet))
class SpreadsheetTest(GspreadTest):
"""Test for gspread.Spreadsheet."""
def setUp(self):
super(SpreadsheetTest, self).setUp()
title = self.config.get('Spreadsheet', 'title')
self.spreadsheet = self.gc.open(title)
def test_properties(self):
self.assertEqual(self.config.get('Spreadsheet', 'id'),
self.spreadsheet.id)
self.assertEqual(self.config.get('Spreadsheet', 'title'),
self.spreadsheet.title)
def test_sheet1(self):
sheet1 = self.spreadsheet.sheet1
self.assertTrue(isinstance(sheet1, gspread.Worksheet))
def test_get_worksheet(self):
sheet1 = self.spreadsheet.get_worksheet(0)
self.assertTrue(isinstance(sheet1, gspread.Worksheet))
def test_worksheet(self):
sheet_title = self.config.get('Spreadsheet', 'sheet1_title')
sheet = self.spreadsheet.worksheet(sheet_title)
self.assertTrue(isinstance(sheet, gspread.Worksheet))
class WorksheetTest(GspreadTest):
"""Test for gspread.Worksheet."""
def setUp(self):
super(WorksheetTest, self).setUp()
title = self.config.get('Spreadsheet', 'title')
self.spreadsheet = self.gc.open(title)
self.sheet = self.spreadsheet.sheet1
def test_properties(self):
self.assertEqual(self.sheet.id,
self.config.get('Worksheet', 'id'))
self.assertEqual(self.sheet.title,
self.config.get('Worksheet', 'title'))
self.assertEqual(self.sheet.row_count,
self.config.getint('Worksheet', 'row_count'))
self.assertEqual(self.sheet.col_count,
self.config.getint('Worksheet', 'col_count'))
def test_get_int_addr(self):
self.assertEqual(self.sheet.get_int_addr('ABC3'), (3, 731))
def test_get_addr_int(self):
self.assertEqual(self.sheet.get_addr_int(3, 731), 'ABC3')
self.assertEqual(self.sheet.get_addr_int(1, 104),'CZ1')
def test_addr_converters(self):
for row in range(1, 257):
for col in range(1, 512):
addr = self.sheet.get_addr_int(row, col)
(r, c) = self.sheet.get_int_addr(addr)
self.assertEqual((row, col), (r, c))
def test_acell(self):
cell = self.sheet.acell('A1')
self.assertTrue(isinstance(cell, gspread.Cell))
def test_cell(self):
cell = self.sheet.cell(1, 1)
self.assertTrue(isinstance(cell, gspread.Cell))
def test_range(self):
cell_range = self.sheet.range('A1:A5')
for c in cell_range:
self.assertTrue(isinstance(c, gspread.Cell))
def test_update_acell(self):
value = hashlib.md5(str(time.time())).hexdigest()
self.sheet.update_acell('A2', value)
self.assertEqual(self.sheet.acell('A2').value, value)
def test_update_cell(self):
value = hashlib.md5(str(time.time())).hexdigest()
self.sheet.update_cell(1, 2, value)
self.assertEqual(self.sheet.cell(1, 2).value, value)
self.sheet.update_cell(1, 2, 42)
self.assertEqual(self.sheet.cell(1, 2).value, '42')
self.sheet.update_cell(1, 2, 42)
self.assertEqual(self.sheet.cell(1, 2).value, '42')
self.sheet.update_cell(1, 2, 42.01)
self.assertEqual(self.sheet.cell(1, 2).value, '42.01')
self.sheet.update_cell(1, 2, u'Артур')
self.assertEqual(self.sheet.cell(1, 2).value, u'Артур')
def test_update_cell_multiline(self):
value = hashlib.md5(str(time.time())).hexdigest()
value = "%s\n%s" % (value, value)
self.sheet.update_cell(1, 2, value)
self.assertEqual(self.sheet.cell(1, 2).value, value)
def test_update_cells(self):
list_len = 10
value_list = [hashlib.md5(str(time.time() + i)).hexdigest()
for i in range(list_len)]
# Test multiline
value_list[0] = "%s\n%s" % (value_list[0], value_list[0])
range_label = 'A1:A%s' % list_len
cell_list = self.sheet.range(range_label)
for c, v in zip(cell_list, value_list):
c.value = v
self.sheet.update_cells(cell_list)
cell_list = self.sheet.range(range_label)
for c, v in zip(cell_list, value_list):
self.assertEqual(c.value, v)
def test_resize(self):
add_num = 10
new_rows = self.sheet.row_count + add_num
self.sheet.add_rows(add_num)
self.assertEqual(self.sheet.row_count, new_rows)
new_cols = self.sheet.col_count + add_num
self.sheet.add_cols(add_num)
self.assertEqual(self.sheet.col_count, new_cols)
new_rows -= add_num
new_cols -= add_num
self.sheet.resize(new_rows, new_cols)
self.assertEqual(self.sheet.row_count, new_rows)
self.assertEqual(self.sheet.col_count, new_cols)
def test_find(self):
sheet = self.sheet
value = hashlib.md5(str(time.time())).hexdigest()
sheet.update_cell(2, 10, value)
sheet.update_cell(2, 11, value)
cell = sheet.find(value)
self.assertEqual(cell.value, value)
value2 = hashlib.md5(str(time.time())).hexdigest()
value = "%so_O%s" % (value, value2)
sheet.update_cell(2, 11, value)
o_O_re = re.compile('[a-z]_[A-Z]%s' % value2)
cell = sheet.find(o_O_re)
self.assertEqual(cell.value, value)
def test_findall(self):
sheet = self.sheet
list_len = 10
range_label = 'A1:A%s' % list_len
cell_list = sheet.range(range_label)
value = hashlib.md5(str(time.time())).hexdigest()
for c in cell_list:
c.value = value
sheet.update_cells(cell_list)
result_list = sheet.findall(value)
self.assertEqual(list_len, len(result_list))
for c in result_list:
self.assertEqual(c.value, value)
cell_list = sheet.range(range_label)
value = hashlib.md5(str(time.time())).hexdigest()
for c in cell_list:
char = chr(random.randrange(ord('a'), ord('z')))
c.value = "%s%s_%s%s" % (c.value, char, char.upper(), value)
sheet.update_cells(cell_list)
o_O_re = re.compile('[a-z]_[A-Z]%s' % value)
result_list = sheet.findall(o_O_re)
self.assertEqual(list_len, len(result_list))
def test_get_all_values(self):
# make a new, clean worksheet
self.spreadsheet.add_worksheet('get_all_values_test', 10, 5)
sheet = self.spreadsheet.worksheet('get_all_values_test')
# put in new values, made from three lists
rows = [["A1", "B1", "", "D1"],
["", "b2", "", ""],
["", "", "", ""],
["A4", "B4", "", "D4"]]
cell_list = sheet.range('A1:D1')
cell_list.extend(sheet.range('A2:D2'))
cell_list.extend(sheet.range('A3:D3'))
cell_list.extend(sheet.range('A4:D4'))
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
sheet.update_cells(cell_list)
# read values with get_all_values, get a list of lists
read_data = sheet.get_all_values()
# values should match with original lists
self.assertEqual(read_data, rows)
# clean up newly added worksheet
# will have to be done by hand; there is no delete worksheet method
def test_get_all_records(self):
# make a new, clean worksheet
# same as for test_all_values, find a way to refactor it
self.spreadsheet.add_worksheet('get_all_values_test', 10, 5)
sheet = self.spreadsheet.worksheet('get_all_values_test')
# put in new values, made from three lists
rows = [["A1", "B1", "", "D1"],
[1, "b2", 1.45, ""],
["", "", "", ""],
["A4", 0.4, "", 4]]
cell_list = sheet.range('A1:D1')
cell_list.extend(sheet.range('A2:D2'))
cell_list.extend(sheet.range('A3:D3'))
cell_list.extend(sheet.range('A4:D4'))
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
sheet.update_cells(cell_list)
# first, read empty strings to empty strings
read_records = sheet.get_all_records()
d0 = dict(zip(rows[0], rows[1]))
d1 = dict(zip(rows[0], rows[2]))
d2 = dict(zip(rows[0], rows[3]))
self.assertEqual(read_records[0], d0)
self.assertEqual(read_records[1], d1)
self.assertEqual(read_records[2], d2)
# then, read empty strings to zeros
read_records = sheet.get_all_records(empty2zero=True)
d1 = dict(zip(rows[0], (0, 0, 0, 0)))
self.assertEqual(read_records[1], d1)
def test_get_all_records_different_header(self):
# make a new, clean worksheet
# same as for test_all_values, find a way to refactor it
self.spreadsheet.add_worksheet('get_all_records', 10, 5)
sheet = self.spreadsheet.worksheet('get_all_records')
# put in new values, made from three lists
rows = [["", "", "", ""],
["", "", "", ""],
["A1", "B1", "", "D1"],
[1, "b2", 1.45, ""],
["", "", "", ""],
["A4", 0.4, "", 4]]
cell_list = sheet.range('A1:D1')
cell_list.extend(sheet.range('A2:D2'))
cell_list.extend(sheet.range('A3:D3'))
cell_list.extend(sheet.range('A4:D4'))
cell_list.extend(sheet.range('A5:D5'))
cell_list.extend(sheet.range('A6:D6'))
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
sheet.update_cells(cell_list)
# first, read empty strings to empty strings
read_records = sheet.get_all_records(head=3)
d0 = dict(zip(rows[2], rows[3]))
d1 = dict(zip(rows[2], rows[4]))
d2 = dict(zip(rows[2], rows[5]))
self.assertEqual(read_records[0], d0)
self.assertEqual(read_records[1], d1)
self.assertEqual(read_records[2], d2)
# then, read empty strings to zeros
read_records = sheet.get_all_records(empty2zero=True, head=3)
d1 = dict(zip(rows[2], (0, 0, 0, 0)))
self.assertEqual(read_records[1], d1)
self.gc.del_worksheet(sheet)
def test_append_row(self):
num_rows = self.sheet.row_count
num_cols = self.sheet.col_count
values = ['o_0'] * (num_cols + 4)
self.sheet.append_row(values)
self.assertEqual(self.sheet.row_count, num_rows + 1)
self.assertEqual(self.sheet.col_count, num_cols + 4)
read_values = self.sheet.row_values(self.sheet.row_count)
self.assertEqual(values, read_values)
# undo the appending and resizing
self.sheet.resize(num_rows, num_cols)
def test_insert_row(self):
num_rows = self.sheet.row_count
num_cols = self.sheet.col_count
values = ['o_0'] * (num_cols + 4)
self.sheet.insert_row(values, 1)
self.assertEqual(self.sheet.row_count, num_rows + 1)
self.assertEqual(self.sheet.col_count, num_cols + 4)
read_values = self.sheet.row_values(1)
self.assertEqual(values, read_values)
# undo the appending and resizing
# self.sheet.resize(num_rows, num_cols)
def test_export(self):
list_len = 10
time_md5 = hashlib.md5(str(time.time())).hexdigest()
wks_name = 'export_test_%s' % time_md5
self.spreadsheet.add_worksheet(wks_name, list_len, 5)
sheet = self.spreadsheet.worksheet(wks_name)
value_list = [hashlib.md5(str(time.time() + i)).hexdigest()
for i in range(list_len)]
range_label = 'A1:A%s' % list_len
cell_list = sheet.range(range_label)
for c, v in zip(cell_list, value_list):
c.value = v
sheet.update_cells(cell_list)
exported_data = sheet.export(format='csv').read()
csv_value = '\n'.join(value_list)
self.assertEqual(exported_data, csv_value)
class WorksheetDeleteTest(GspreadTest):
def setUp(self):
super(WorksheetDeleteTest, self).setUp()
title = self.config.get('Spreadsheet', 'title')
self.spreadsheet = self.gc.open(title)
ws1_name = self.config.get('WorksheetDelete', 'ws1_name')
ws2_name = self.config.get('WorksheetDelete', 'ws2_name')
self.ws1 = self.spreadsheet.add_worksheet(ws1_name, 1, 1)
self.ws2 = self.spreadsheet.add_worksheet(ws2_name, 1, 1)
def test_delete_multiple_worksheets(self):
self.spreadsheet.del_worksheet(self.ws1)
self.spreadsheet.del_worksheet(self.ws2)
class CellTest(GspreadTest):
"""Test for gspread.Cell."""
def setUp(self):
super(CellTest, self).setUp()
title = self.config.get('Spreadsheet', 'title')
sheet = self.gc.open(title).sheet1
self.sheet = sheet
def test_properties(self):
update_value = hashlib.md5(str(time.time())).hexdigest()
self.sheet.update_acell('A1', update_value)
cell = self.sheet.acell('A1')
self.assertEqual(cell.value, update_value)
self.assertEqual(cell.row, 1)
self.assertEqual(cell.col, 1)
def test_numeric_value(self):
numeric_value = 1.0 / 1024
# Use a formula here to avoid issues with differing decimal marks:
self.sheet.update_acell('A1', '= 1 / 1024')
cell = self.sheet.acell('A1')
self.assertEqual(cell.numeric_value, numeric_value)
self.assertIsInstance(cell.numeric_value, float)
self.sheet.update_acell('A1', 'Non-numeric value')
cell = self.sheet.acell('A1')
self.assertIs(cell.numeric_value, None)
|
malishevg/edugraph
|
refs/heads/master
|
cms/envs/dev_shared_preview.py
|
57
|
"""
This configuration is have localdev use a preview.localhost hostname for the preview LMS so that we can share
the same process between preview and published
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .dev import *
FEATURES['PREVIEW_LMS_BASE'] = "preview.localhost:8000"
|
msebire/intellij-community
|
refs/heads/master
|
python/testData/mover/lastLine_afterDown.py
|
83
|
print "first"
print "second"
|
peteraldaron/runotar
|
refs/heads/master
|
main.py
|
1
|
'''
use python3
'''
import query, db, word, utils
q = query.Query();
word = q.getWordEntryForLanguage("-n", language= "Finnish")
dataBase = db.DataBase();
dataBase.upsertOneToCollection(word.toObject(), word.hashValueDict, "name-test");
'''
print(word.suffix_match("sattua", "vapua"));
print(word.suffix_match_vowels("satama", "sorava"));
print(word.suffix_match_vowels("syväille", "ensikymäille"));
'''
#print(list(q.linksIterator))
#print(list(word.parseWikiEntry(q.getPageContent("olla")).keys()))
|
r-mart/scikit-learn
|
refs/heads/master
|
sklearn/manifold/setup.py
|
198
|
import os
import numpy
from numpy.distutils.misc_util import Configuration
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
|
TshepangRas/tshilo-dikotla
|
refs/heads/develop
|
td_maternal/tests/factories/rapid_test_result_factory.py
|
2
|
import factory
from django.utils import timezone
from edc_registration.tests.factories import RegisteredSubjectFactory
from edc_constants.choices import YES, NO, POS, NEG, NOT_APPLICABLE
from td_maternal.models import RapidTestResult
from .maternal_visit_factory import MaternalVisitFactory
class RapidTestResultFactory(factory.DjangoModelFactory):
class Meta:
model = RapidTestResult
maternal_visit = factory.SubFactory(MaternalVisitFactory)
report_datetime = timezone.now()
rapid_test_done = YES
result_date = timezone.datetime.today()
result = POS
|
saydulk/newfies-dialer
|
refs/heads/develop
|
newfies/dialer_cdr/constants.py
|
4
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
from django.utils.translation import ugettext_lazy as _
from django_lets_go.utils import Choice
class CALLREQUEST_STATUS(Choice):
"""
Store the Call Request Status
"""
PENDING = 1, _("pending")
FAILURE = 2, _("failure")
RETRY = 3, _("retry")
SUCCESS = 4, _("success")
ABORT = 5, _("abort")
PAUSE = 6, _("pause")
CALLING = 7, _("calling")
class CALLREQUEST_TYPE(Choice):
"""
Store the Call Request Type
"""
ALLOW_RETRY = 1, _('ALLOW RETRY')
CANNOT_RETRY = 2, _('CANNOT RETRY')
RETRY_DONE = 3, _('RETRY DONE')
class LEG_TYPE(Choice):
"""
Store the Leg Type
"""
A_LEG = 1, _('A-Leg')
B_LEG = 2, _('B-Leg')
class CALL_DISPOSITION(Choice):
"""
Store the Call Disposition
"""
ANSWER = 'ANSWER', _('ANSWER')
BUSY = 'BUSY', _('BUSY')
NOANSWER = 'NOANSWER', _('NOANSWER')
CANCEL = 'CANCEL', _('CANCEL')
CONGESTION = 'CONGESTION', _('CONGESTION')
FAILED = 'FAILED', _('FAILED') # Added to catch all
# Column Name for the CDR Report
CDR_REPORT_COLUMN_NAME = {
'date': _('start date'),
'call_id': _('call ID'),
'leg': _('leg'),
'caller_id': _('caller ID'),
'phone_no': _('phone no'),
'gateway': _('gateway'),
'duration': _('duration'),
'bill_sec': _('bill sec'),
'disposition': _('disposition'),
'amd_status': _('amd status')
}
class VOIPCALL_AMD_STATUS(Choice):
"""
Store the AMD Status
"""
PERSON = 1, _("PERSON")
MACHINE = 2, _("MACHINE")
UNSURE = 3, _("UNSURE")
|
TomTranter/OpenPNM
|
refs/heads/master
|
openpnm/__init__.py
|
1
|
r"""
::
o-o o--o o o o o
o o | | |\ | |\ /|
| | o-o o-o o-o o--o | \ | | o |
o o | | |-' | | | | \| | |
o-o o-o o-o o o o o o o o
|
o
**OpenPNM**
OpenPNM is a package for performing pore network simulations of transport in
porous materials.
It consists of the following submodules:
+----------------+------------------------------------------------------------+
| Submodule | Contents and Description |
+================+============================================================+
| ``core`` | Houses the ``Base`` & ``Subdomain`` classes, & model |
| | related mixins |
+----------------+------------------------------------------------------------+
| ``network`` | ``GenericNetwork`` class plus various network generators |
+----------------+------------------------------------------------------------+
| ``geometry`` | ``GenericGeometry`` class plus some subclasses containing a|
| | predefined set of pore-scale models |
+----------------+------------------------------------------------------------+
| ``phases`` | ``GenericPhase`` class plus some subclasses containing |
| | predefined models for common fluids like Water |
+----------------+------------------------------------------------------------+
| ``physics`` | ``GenericPhysics`` class plus some subclasses containing a |
| | predefined set of pore-scale models |
+----------------+------------------------------------------------------------+
| ``algorithms`` | Algorithms for simulating transport and percolation |
+----------------+------------------------------------------------------------+
| ``materials`` | A collection of predefined projects consisting of a network|
| | with suitable topology and a geometry with necessary models|
+----------------+------------------------------------------------------------+
| ``topotools`` | Tools for querying and manipulating network topology |
+----------------+------------------------------------------------------------+
| ``io`` | Import from and export to various common data formats |
+----------------+------------------------------------------------------------+
| ``utils`` | Helper utilites & classes, including ``Workspace`` and |
| | ``Project`` |
+----------------+------------------------------------------------------------+
| ``models`` | Library of pore-scale models for calculating geometric, |
| | thermodynamic, and physical properties |
+----------------+------------------------------------------------------------+
"""
__version__ = '2.3.1'
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
from . import utils
from .utils import Workspace, Project
from . import core
from . import models
from . import network
from . import topotools
from . import geometry
from . import phases
from . import physics
from . import algorithms
from . import io
from . import materials
|
joglomedia/p2pool
|
refs/heads/master
|
p2pool/test/bitcoin/test_getwork.py
|
275
|
import unittest
from p2pool.bitcoin import getwork, data as bitcoin_data
class Test(unittest.TestCase):
def test_all(self):
cases = [
{
'target': '0000000000000000000000000000000000000000000000f2b944000000000000',
'midstate': '5982f893102dec03e374b472647c4f19b1b6d21ae4b2ac624f3d2f41b9719404',
'hash1': '00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000',
'data': '0000000163930d52a5ffca79b29b95a659a302cd4e1654194780499000002274000000002e133d9e51f45bc0886d05252038e421e82bff18b67dc14b90d9c3c2f422cd5c4dd4598e1a44b9f200000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000'
},
{
'midstate' : 'f4a9b048c0cb9791bc94b13ee0eec21e713963d524fd140b58bb754dd7b0955f',
'data' : '000000019a1d7342fb62090bda686b22d90f9f73d0f5c418b9c980cd0000011a00000000680b07c8a2f97ecd831f951806857e09f98a3b81cdef1fa71982934fef8dc3444e18585d1a0abbcf00000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000',
'hash1' : '00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000',
'target' : '0000000000000000000000000000000000000000000000cfbb0a000000000000',
'extrathing': 'hi!',
},
{
'data' : '000000019a1d7342fb62090bda686b22d90f9f73d0f5c418b9c980cd0000011a00000000680b07c8a2f97ecd831f951806857e09f98a3b81cdef1fa71982934fef8dc3444e18585d1a0abbcf00000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000',
'hash1' : '00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000',
'target' : '0000000000000000000000000000000000000000000000cfbb0a000000000000',
'extrathing': 'hi!',
},
]
for case in cases:
ba = getwork.BlockAttempt.from_getwork(case)
extra = dict(case)
del extra['data'], extra['hash1'], extra['target']
extra.pop('midstate', None)
getwork_check = ba.getwork(**extra)
assert getwork_check == case or dict((k, v) for k, v in getwork_check.iteritems() if k != 'midstate') == case
case2s = [
getwork.BlockAttempt(
1,
0x148135e10208db85abb62754341a392eab1f186aab077a831cf7,
0x534ea08be1ab529f484369344b6d5423ef5a0767db9b3ebb4e182bbb67962520,
1305759879,
bitcoin_data.FloatingInteger.from_target_upper_bound(0x44b9f20000000000000000000000000000000000000000000000),
0x44b9f20000000000000000000000000000000000000000000000,
),
getwork.BlockAttempt(
1,
0x148135e10208db85abb62754341a392eab1f186aab077a831cf7,
0x534ea08be1ab529f484369344b6d5423ef5a0767db9b3ebb4e182bbb67962520,
1305759879,
bitcoin_data.FloatingInteger.from_target_upper_bound(0x44b9f20000000000000000000000000000000000000000000000),
432*2**230,
),
getwork.BlockAttempt(
1,
0x148135e10208db85abb62754341a392eab1f186aab077a831cf7,
0x534ea08be1ab529f484369344b6d5423ef5a0767db9b3ebb4e182bbb67962520,
1305759879,
bitcoin_data.FloatingInteger.from_target_upper_bound(0x44b9f20000000000000000000000000000000000000000000000),
7*2**240,
)
]
for case2 in case2s:
assert getwork.BlockAttempt.from_getwork(case2.getwork()) == case2
assert getwork.BlockAttempt.from_getwork(case2.getwork(ident='hi')) == case2
case2 = case2.update(previous_block=case2.previous_block - 10)
assert getwork.BlockAttempt.from_getwork(case2.getwork()) == case2
assert getwork.BlockAttempt.from_getwork(case2.getwork(ident='hi')) == case2
|
FireballDWF/cloud-custodian
|
refs/heads/master
|
tools/c7n_mailer/c7n_mailer/sqs_queue_processor.py
|
5
|
# Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SQS Message Processing
===============
"""
import base64
import json
import logging
import traceback
import zlib
import six
from .email_delivery import EmailDelivery
from .sns_delivery import SnsDelivery
from c7n_mailer.utils import kms_decrypt
DATA_MESSAGE = "maidmsg/1.0"
class MailerSqsQueueIterator(object):
# Copied from custodian to avoid runtime library dependency
msg_attributes = ['sequence_id', 'op', 'ser']
def __init__(self, aws_sqs, queue_url, logger, limit=0, timeout=10):
self.aws_sqs = aws_sqs
self.queue_url = queue_url
self.limit = limit
self.logger = logger
self.timeout = timeout
self.messages = []
# this and the next function make this object iterable with a for loop
def __iter__(self):
return self
def __next__(self):
if self.messages:
return self.messages.pop(0)
response = self.aws_sqs.receive_message(
QueueUrl=self.queue_url,
WaitTimeSeconds=self.timeout,
MaxNumberOfMessages=3,
MessageAttributeNames=self.msg_attributes,
AttributeNames=['SentTimestamp']
)
msgs = response.get('Messages', [])
self.logger.debug('Messages received %d', len(msgs))
for m in msgs:
self.messages.append(m)
if self.messages:
return self.messages.pop(0)
raise StopIteration()
next = __next__ # python2.7
def ack(self, m):
self.aws_sqs.delete_message(
QueueUrl=self.queue_url,
ReceiptHandle=m['ReceiptHandle'])
class MailerSqsQueueProcessor(object):
def __init__(self, config, session, logger, max_num_processes=16):
self.config = config
self.logger = logger
self.session = session
self.max_num_processes = max_num_processes
self.receive_queue = self.config['queue_url']
if self.config.get('debug', False):
self.logger.debug('debug logging is turned on from mailer config file.')
logger.setLevel(logging.DEBUG)
"""
Cases
- aws resource is tagged CreatorName: 'milton', ldap_tag_uids has CreatorName,
we do an ldap lookup, get milton's email and send him an email
- you put an email in the to: field of the notify of your policy, we send an email
for all resources enforce by that policy
- you put an sns topic in the to: field of the notify of your policy, we send an sns
message for all resources enforce by that policy
- an lambda enforces a policy based on an event, we lookup the event aws username, get their
ldap email and send them an email about a policy enforcement (from lambda) for the event
- resource-owners has a list of tags, SupportEmail, OwnerEmail, if your resources
include those tags with valid emails, we'll send an email for those resources
any others
- resource-owners has a list of tags, SnSTopic, we'll deliver an sns message for
any resources with SnSTopic set with a value that is a valid sns topic.
"""
def run(self, parallel=False):
self.logger.info("Downloading messages from the SQS queue.")
aws_sqs = self.session.client('sqs')
sqs_messages = MailerSqsQueueIterator(aws_sqs, self.receive_queue, self.logger)
sqs_messages.msg_attributes = ['mtype', 'recipient']
# lambda doesn't support multiprocessing, so we don't instantiate any mp stuff
# unless it's being run from CLI on a normal system with SHM
if parallel:
import multiprocessing
process_pool = multiprocessing.Pool(processes=self.max_num_processes)
for sqs_message in sqs_messages:
self.logger.debug(
"Message id: %s received %s" % (
sqs_message['MessageId'], sqs_message.get('MessageAttributes', '')))
msg_kind = sqs_message.get('MessageAttributes', {}).get('mtype')
if msg_kind:
msg_kind = msg_kind['StringValue']
if not msg_kind == DATA_MESSAGE:
warning_msg = 'Unknown sqs_message or sns format %s' % (sqs_message['Body'][:50])
self.logger.warning(warning_msg)
if parallel:
process_pool.apply_async(self.process_sqs_message, args=sqs_message)
else:
self.process_sqs_message(sqs_message)
self.logger.debug('Processed sqs_message')
sqs_messages.ack(sqs_message)
if parallel:
process_pool.close()
process_pool.join()
self.logger.info('No sqs_messages left on the queue, exiting c7n_mailer.')
return
# This function when processing sqs messages will only deliver messages over email or sns
# If you explicitly declare which tags are aws_usernames (synonymous with ldap uids)
# in the ldap_uid_tags section of your mailer.yml, we'll do a lookup of those emails
# (and their manager if that option is on) and also send emails there.
def process_sqs_message(self, encoded_sqs_message):
body = encoded_sqs_message['Body']
try:
body = json.dumps(json.loads(body)['Message'])
except ValueError:
pass
sqs_message = json.loads(zlib.decompress(base64.b64decode(body)))
self.logger.debug("Got account:%s message:%s %s:%d policy:%s recipients:%s" % (
sqs_message.get('account', 'na'),
encoded_sqs_message['MessageId'],
sqs_message['policy']['resource'],
len(sqs_message['resources']),
sqs_message['policy']['name'],
', '.join(sqs_message['action'].get('to'))))
# get the map of email_to_addresses to mimetext messages (with resources baked in)
# and send any emails (to SES or SMTP) if there are email addresses found
email_delivery = EmailDelivery(self.config, self.session, self.logger)
to_addrs_to_email_messages_map = email_delivery.get_to_addrs_email_messages_map(sqs_message)
for email_to_addrs, mimetext_msg in six.iteritems(to_addrs_to_email_messages_map):
email_delivery.send_c7n_email(sqs_message, list(email_to_addrs), mimetext_msg)
# this sections gets the map of sns_to_addresses to rendered_jinja messages
# (with resources baked in) and delivers the message to each sns topic
sns_delivery = SnsDelivery(self.config, self.session, self.logger)
sns_message_packages = sns_delivery.get_sns_message_packages(sqs_message)
sns_delivery.deliver_sns_messages(sns_message_packages, sqs_message)
# this section sends a notification to the resource owner via Slack
if any(e.startswith('slack') or e.startswith('https://hooks.slack.com/')
for e in sqs_message.get('action', ()).get('to', []) +
sqs_message.get('action', ()).get('owner_absent_contact', [])):
from .slack_delivery import SlackDelivery
if self.config.get('slack_token'):
self.config['slack_token'] = \
kms_decrypt(self.config, self.logger, self.session, 'slack_token')
slack_delivery = SlackDelivery(self.config, self.logger, email_delivery)
slack_messages = slack_delivery.get_to_addrs_slack_messages_map(sqs_message)
try:
slack_delivery.slack_handler(sqs_message, slack_messages)
except Exception:
traceback.print_exc()
pass
# this section gets the map of metrics to send to datadog and delivers it
if any(e.startswith('datadog') for e in sqs_message.get('action', ()).get('to')):
from .datadog_delivery import DataDogDelivery
datadog_delivery = DataDogDelivery(self.config, self.session, self.logger)
datadog_message_packages = datadog_delivery.get_datadog_message_packages(sqs_message)
try:
datadog_delivery.deliver_datadog_messages(datadog_message_packages, sqs_message)
except Exception:
traceback.print_exc()
pass
# this section sends the full event to a Splunk HTTP Event Collector (HEC)
if any(
e.startswith('splunkhec://')
for e in sqs_message.get('action', ()).get('to')
):
from .splunk_delivery import SplunkHecDelivery
splunk_delivery = SplunkHecDelivery(self.config, self.session, self.logger)
splunk_messages = splunk_delivery.get_splunk_payloads(
sqs_message, encoded_sqs_message['Attributes']['SentTimestamp']
)
try:
splunk_delivery.deliver_splunk_messages(splunk_messages)
except Exception:
traceback.print_exc()
pass
|
abhilashnta/edx-platform
|
refs/heads/master
|
common/djangoapps/third_party_auth/admin.py
|
40
|
# -*- coding: utf-8 -*-
"""
Admin site configuration for third party authentication
"""
from django.contrib import admin
from config_models.admin import ConfigurationModelAdmin, KeyedConfigurationModelAdmin
from .models import OAuth2ProviderConfig, SAMLProviderConfig, SAMLConfiguration, SAMLProviderData
from .tasks import fetch_saml_metadata
admin.site.register(OAuth2ProviderConfig, KeyedConfigurationModelAdmin)
class SAMLProviderConfigAdmin(KeyedConfigurationModelAdmin):
""" Django Admin class for SAMLProviderConfig """
def get_list_display(self, request):
""" Don't show every single field in the admin change list """
return (
'name', 'enabled', 'backend_name', 'entity_id', 'metadata_source',
'has_data', 'icon_class', 'change_date', 'changed_by', 'edit_link'
)
def has_data(self, inst):
""" Do we have cached metadata for this SAML provider? """
if not inst.is_active:
return None # N/A
data = SAMLProviderData.current(inst.entity_id)
return bool(data and data.is_valid())
has_data.short_description = u'Metadata Ready'
has_data.boolean = True
def save_model(self, request, obj, form, change):
"""
Post save: Queue an asynchronous metadata fetch to update SAMLProviderData.
We only want to do this for manual edits done using the admin interface.
Note: This only works if the celery worker and the app worker are using the
same 'configuration' cache.
"""
super(SAMLProviderConfigAdmin, self).save_model(request, obj, form, change)
fetch_saml_metadata.apply_async((), countdown=2)
admin.site.register(SAMLProviderConfig, SAMLProviderConfigAdmin)
class SAMLConfigurationAdmin(ConfigurationModelAdmin):
""" Django Admin class for SAMLConfiguration """
def get_list_display(self, request):
""" Shorten the public/private keys in the change view """
return (
'change_date', 'changed_by', 'enabled', 'entity_id',
'org_info_str', 'key_summary',
)
def key_summary(self, inst):
""" Short summary of the key pairs configured """
if not inst.public_key or not inst.private_key:
return u'<em>Key pair incomplete/missing</em>'
pub1, pub2 = inst.public_key[0:10], inst.public_key[-10:]
priv1, priv2 = inst.private_key[0:10], inst.private_key[-10:]
return u'Public: {}…{}<br>Private: {}…{}'.format(pub1, pub2, priv1, priv2)
key_summary.allow_tags = True
admin.site.register(SAMLConfiguration, SAMLConfigurationAdmin)
class SAMLProviderDataAdmin(admin.ModelAdmin):
""" Django Admin class for SAMLProviderData (Read Only) """
list_display = ('entity_id', 'is_valid', 'fetched_at', 'expires_at', 'sso_url')
readonly_fields = ('is_valid', )
def get_readonly_fields(self, request, obj=None):
if obj: # editing an existing object
return self.model._meta.get_all_field_names() # pylint: disable=protected-access
return self.readonly_fields
admin.site.register(SAMLProviderData, SAMLProviderDataAdmin)
|
Moussee/Fun-with-StarWars-people
|
refs/heads/master
|
node_modules/node-gyp/gyp/setup.py
|
2462
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from setuptools import setup
setup(
name='gyp',
version='0.1',
description='Generate Your Projects',
author='Chromium Authors',
author_email='chromium-dev@googlegroups.com',
url='http://code.google.com/p/gyp',
package_dir = {'': 'pylib'},
packages=['gyp', 'gyp.generator'],
entry_points = {'console_scripts': ['gyp=gyp:script_main'] }
)
|
guewen/OpenUpgrade
|
refs/heads/master
|
addons/account/tests/test_search.py
|
204
|
from openerp.tests.common import TransactionCase
class TestSearch(TransactionCase):
"""Tests for search on name_search (account.account)
The name search on account.account is quite complexe, make sure
we have all the correct results
"""
def setUp(self):
super(TestSearch, self).setUp()
cr, uid = self.cr, self.uid
self.account_model = self.registry('account.account')
self.account_type_model = self.registry('account.account.type')
ac_ids = self.account_type_model.search(cr, uid, [], limit=1)
self.atax = (int(self.account_model.create(cr, uid, dict(
name="Tax Received",
code="121",
user_type=ac_ids[0],
))), "121 Tax Received")
self.apurchase = (int(self.account_model.create(cr, uid, dict(
name="Purchased Stocks",
code="1101",
user_type=ac_ids[0],
))), "1101 Purchased Stocks")
self.asale = (int(self.account_model.create(cr, uid, dict(
name="Product Sales",
code="200",
user_type=ac_ids[0],
))), "200 Product Sales")
self.all_ids = [self.atax[0], self.apurchase[0], self.asale[0]]
def test_name_search(self):
cr, uid = self.cr, self.uid
atax_ids = self.account_model.name_search(cr, uid, name="Tax", operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0]]), set([a[0] for a in atax_ids]), "name_search 'ilike Tax' should have returned Tax Received account only")
atax_ids = self.account_model.name_search(cr, uid, name="Tax", operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.apurchase[0], self.asale[0]]), set([a[0] for a in atax_ids]), "name_search 'not ilike Tax' should have returned all but Tax Received account")
apur_ids = self.account_model.name_search(cr, uid, name='1101', operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.apurchase[0]]), set([a[0] for a in apur_ids]), "name_search 'ilike 1101' should have returned Purchased Stocks account only")
apur_ids = self.account_model.name_search(cr, uid, name='1101', operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0], self.asale[0]]), set([a[0] for a in apur_ids]), "name_search 'not ilike 1101' should have returned all but Purchased Stocks account")
asale_ids = self.account_model.name_search(cr, uid, name='200 Sales', operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.asale[0]]), set([a[0] for a in asale_ids]), "name_search 'ilike 200 Sales' should have returned Product Sales account only")
asale_ids = self.account_model.name_search(cr, uid, name='200 Sales', operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0], self.apurchase[0]]), set([a[0] for a in asale_ids]), "name_search 'not ilike 200 Sales' should have returned all but Product Sales account")
asale_ids = self.account_model.name_search(cr, uid, name='Product Sales', operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.asale[0]]), set([a[0] for a in asale_ids]), "name_search 'ilike Product Sales' should have returned Product Sales account only")
asale_ids = self.account_model.name_search(cr, uid, name='Product Sales', operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0], self.apurchase[0]]), set([a[0] for a in asale_ids]), "name_search 'not ilike Product Sales' should have returned all but Product Sales account")
|
devendermishrajio/nova
|
refs/heads/master
|
nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
|
18
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_serialization import jsonutils
from nova import objects
from nova.objects import base as obj_base
from nova.scheduler.filters import numa_topology_filter
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.scheduler import fakes
class TestNUMATopologyFilter(test.NoDBTestCase):
def setUp(self):
super(TestNUMATopologyFilter, self).setUp()
self.filt_cls = numa_topology_filter.NUMATopologyFilter()
def test_numa_topology_filter_pass(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1', {'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_numa_host_no_numa_instance_pass(self):
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = None
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_fail_fit(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([2]), memory=512),
objects.InstanceNUMACell(id=2, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_fail_memory(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]),
memory=1024),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_fail_cpu(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4, 5]),
memory=512)])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 1,
'ram_allocation_ratio': 1.5})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_pass_set_limit(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
instance.numa_topology = instance_topology
filter_properties = {
'request_spec': {
'instance_properties': jsonutils.to_primitive(
obj_base.obj_to_primitive(instance))}}
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 21,
'ram_allocation_ratio': 1.3})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
limits = host.limits['numa_topology']
self.assertEqual(limits.cpu_allocation_ratio, 21)
self.assertEqual(limits.ram_allocation_ratio, 1.3)
|
anhdiepmmk/yowsup
|
refs/heads/master
|
yowsup/layers/protocol_groups/protocolentities/iq_groups_create.py
|
41
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .iq_groups import GroupsIqProtocolEntity
class CreateGroupsIqProtocolEntity(GroupsIqProtocolEntity):
'''
<iq type="set" id="{{id}}" xmlns="w:g2", to="g.us">
<create subject="{{subject}}">
<participant jid="{{jid}}"></participant>
</create>
</iq>
'''
def __init__(self, subject, _id = None, participants = None):
super(CreateGroupsIqProtocolEntity, self).__init__(to = "g.us", _id = _id, _type = "set")
self.setProps(subject)
self.setParticipants(participants or [])
def setProps(self, subject):
self.subject = subject
def setParticipants(self, participants):
self.participantList = participants
def toProtocolTreeNode(self):
node = super(CreateGroupsIqProtocolEntity, self).toProtocolTreeNode()
cnode = ProtocolTreeNode("create",{ "subject": self.subject})
participantNodes = [
ProtocolTreeNode("participant", {
"jid": participant
})
for participant in self.participantList
]
cnode.addChildren(participantNodes)
node.addChild(cnode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = super(CreateGroupsIqProtocolEntity,CreateGroupsIqProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = CreateGroupsIqProtocolEntity
entity.setProps(node.getChild("create").getAttributeValue("subject"))
participantList = []
for participantNode in node.getChild("create").getAllChildren():
participantList.append(participantNode["jid"])
entity.setParticipants(participantList)
return entity
|
grshane/monthofmud
|
refs/heads/master
|
web/themes/custom/mom/node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py
|
2710
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
|
vikas-Avnish/google-blog-converters-appengine
|
refs/heads/master
|
src/movabletype2blogger/mt2b.py
|
30
|
#!/usr/bin/python2.4
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import os.path
import logging
import re
import sys
import time
from xml.sax.saxutils import escape
import gdata
from gdata import atom
__author__ = 'JJ Lueck (jlueck@gmail.com)'
########################
# Constants
########################
CATEGORY_NS = 'http://www.blogger.com/atom/ns#'
CATEGORY_KIND = 'http://schemas.google.com/g/2005#kind'
POST_KIND = 'http://schemas.google.com/blogger/2008/kind#post'
COMMENT_KIND = 'http://schemas.google.com/blogger/2008/kind#comment'
ATOM_TYPE = 'application/atom+xml'
HTML_TYPE = 'text/html'
ATOM_THREADING_NS = 'http://purl.org/syndication/thread/1.0'
DUMMY_URI = 'http://www.blogger.com/'
###########################
# Helper Atom class
###########################
class BloggerGDataFeed(gdata.GDataFeed):
def _ToElementTree(self):
tree = gdata.GDataFeed._ToElementTree(self)
# Modify the tree such that entries are always the last elements
# of the top-level feed. This conforms to the Atom specification
# and fixes a bug where the Blog title may exist after the entries
# which causes Blogger to ignore the title.
for i in reversed(range(len(tree))):
if tree[i].tag.endswith('entry'):
break
subelem = tree[i]
tree.remove(subelem)
tree.insert(0, subelem)
return tree
class InReplyTo(atom.ExtensionElement):
"""Supplies the in-reply-to element from the Atom threading protocol."""
def __init__(self, ref, href=None):
"""Constructs an InReplyTo element."""
attrs = {}
attrs['ref'] = ref
attrs['type'] = ATOM_TYPE
if href:
attrs['href'] = href
atom.ExtensionElement.__init__(self, 'in-reply-to',
namespace=ATOM_THREADING_NS,
attributes=attrs)
###########################
# Translation class
###########################
class MovableType2Blogger(object):
"""Performs the translation of MovableType text export to Blogger
export format.
"""
def __init__(self):
self.next_id = 1
def Translate(self, infile, outfile):
"""Performs the actual translation to a Blogger export format.
Args:
infile: The input MovableType export file
outfile: The output file that should receive the translated document
"""
# Create the top-level feed object
feed = BloggerGDataFeed()
# Fill in the feed object with the boilerplate metadata
feed.generator = atom.Generator(text='Blogger')
feed.title = atom.Title(text='MovableType blog')
feed.link.append(
atom.Link(href=DUMMY_URI, rel='self', link_type=ATOM_TYPE))
feed.link.append(
atom.Link(href=DUMMY_URI, rel='alternate', link_type=HTML_TYPE))
# Calculate the last updated time by inspecting all of the posts
last_updated = 0
# These three variables keep the state as we parse the file
post_entry = self._GetNewEntry(POST_KIND) # The current post atom.Entry
comment_entry = None # The current comment atom.Entry
last_entry = None # The previous post atom.Entry if exists
tag_name = None # The current name of multi-line values
tag_contents = '' # The contents of multi-line values
# Loop through the text lines looking for key/value pairs
for line in infile:
# Remove whitespace
line = line.strip().lstrip(codecs.BOM_UTF8)
# Check for the post ending token
if line == '-' * 8:
# If the body tag is still being read, add what has been read.
if tag_name == 'BODY':
post_entry.content = atom.Content(
content_type='html', text=self._TranslateContents(tag_contents))
# Add the post to our feed
feed.entry.insert(0, post_entry)
last_entry = post_entry
# Reset the state variables
post_entry = self._GetNewEntry(POST_KIND)
comment_entry = None
tag_name = None
tag_contents = ''
continue
# Check for the tag ending separator
elif line == '-' * 5:
# Get the contents of the body and set the entry contents
if tag_name == 'BODY':
post_entry.content = atom.Content(
content_type='html', text=self._TranslateContents(tag_contents))
# This is the start of the COMMENT section. Create a new entry for
# the comment and add a link to the original post.
elif tag_name == 'COMMENT':
comment_entry.content = atom.Content(
content_type='html', text=self._TranslateContents(tag_contents))
comment_entry.title = atom.Title(
text=self._Encode(self._CreateSnippet(tag_contents)))
comment_entry.extension_elements.append(InReplyTo(post_entry.id.text))
feed.entry.append(comment_entry)
comment_entry = None
# Get the contents of the extended body and append it to the
# entry contents
elif tag_name == 'EXTENDED BODY':
if post_entry:
post_entry.content.text += '<br/>' + self._TranslateContents(tag_contents)
elif last_entry and last_entry.content:
last_entry.content.text += '<br/>' + self._TranslateContents(tag_contents)
# Convert any keywords (comma separated values) into Blogger labels
elif tag_name == 'KEYWORDS':
for keyword in tag_contents.split(','):
keyword = keyword.strip()
if keyword != '' and len(post_entry.category) < 20:
post_entry.category.append(
atom.Category(scheme=CATEGORY_NS, term=keyword))
# Reset the current tag and its contents
tag_name = None
tag_contents = ''
continue
# Split the line into key/value pairs
elems = line.split(':')
key = elems[0]
value = ''
if len(elems) > 1:
value = ':'.join(elems[1:]).strip()
# The author key indicates the start of a post as well as the author of
# the post entry or comment
if key == 'AUTHOR':
# Add the author's name
author_name = self._Encode(value)
if not author_name:
author_name = 'Anonymous'
if tag_name == 'COMMENT':
comment_entry.author.append(atom.Author(atom.Name(text=author_name)))
else:
post_entry.author.append(atom.Author(atom.Name(text=author_name)))
# The title only applies to new posts
elif key == 'TITLE' and tag_name != 'PING':
post_entry.title = atom.Title(text=self._Encode(value))
# If the status is a draft, mark it as so in the entry. If the status
# is 'Published' there's nothing to do here
elif key == 'STATUS':
if value == 'Draft':
post_entry.control = atom.Control(atom.Draft('yes'))
# Turn categories into labels
elif key == 'CATEGORY':
if value != '' and len(post_entry.category) < 20:
post_entry.category.append(
atom.Category(scheme=CATEGORY_NS, term=value))
# Convert the date and specify it as the published/updated time
elif key == 'DATE' and tag_name != 'PING':
time_val = self._FromMtTime(value)
entry = post_entry
if tag_name == 'COMMENT':
entry = comment_entry
entry.published = atom.Published(self._ToBlogTime(time_val))
entry.updated = atom.Updated(self._ToBlogTime(time_val))
# Check to see if this was the last post published (so far)
seconds = time.mktime(time_val)
last_updated = max(seconds, last_updated)
# Convert all tags into Blogger labels
elif key == 'TAGS':
for keyword in value.split(','):
keyword = keyword.strip()
if keyword != '' and len(post_entry.category) < 20:
post_entry.category.append(
atom.Category(scheme=CATEGORY_NS, term=keyword))
# Update the author's email if it is present and not empty
elif tag_name == 'COMMENT' and key == 'EMAIL' and len(value) > 0:
comment_entry.author[-1].email = atom.Email(text=value)
# Update the author's URI if it is present and not empty
elif tag_name == 'COMMENT' and key == 'URL' and len(value) > 0:
comment_entry.author[-1].uri = atom.Uri(text=value)
# If any of these keys are used, they contain information beyond this key
# on following lines
elif key in ('COMMENT', 'BODY', 'EXTENDED BODY', 'EXCERPT', 'KEYWORDS', 'PING'):
tag_name = key
if key == 'COMMENT':
comment_entry = self._GetNewEntry(COMMENT_KIND)
# These lines can be safely ignored
elif key in ('BASENAME', 'ALLOW COMMENTS', 'CONVERT BREAKS',
'ALLOW PINGS', 'PRIMARY CATEGORY', 'IP', 'URL', 'EMAIL'):
continue
# If the line is empty and we're processing the body, add an HTML line
# break
elif tag_name == 'BODY' and len(line) == 0:
tag_contents += '<br/>'
# This would be a line of content beyond a key/value pair
elif len(key) != 0:
tag_contents += line + '\n'
# Update the feed with the last updated time
feed.updated = atom.Updated(self._ToBlogTime(time.gmtime(last_updated)))
# Serialize the feed object
outfile.write(str(feed))
def _GetNewEntry(self, kind):
entry = gdata.GDataEntry()
entry.link.append(
atom.Link(href=DUMMY_URI, rel='self', link_type=ATOM_TYPE))
entry.link.append(
atom.Link(href=DUMMY_URI, rel='alternate', link_type=HTML_TYPE))
entry.id = atom.Id('post-' + self._GetNextId())
entry.category.append(
atom.Category(scheme=CATEGORY_KIND, term=kind))
return entry
def _GetNextId(self):
"""Returns the next entry identifier as a string."""
ret = self.next_id
self.next_id += 1
return str(self.next_id)
def _CreateSnippet(self, content):
"""Creates a snippet of content. The maximum size being 53 characters,
50 characters of data followed by elipses.
"""
content = re.sub('</?[^>/]+/?>', '', content)
if len(content) < 50:
return content
return content[0:49] + '...'
def _TranslateContents(self, content):
content = content.replace('\n', '<br/>')
return self._Encode(content)
def _Encode(self, content):
return content.decode('utf-8', 'replace').encode('utf-8')
def _FromMtTime(self, mt_time):
try:
return time.strptime(mt_time, "%m/%d/%Y %I:%M:%S %p")
except ValueError:
return time.gmtime()
def _ToBlogTime(self, time_tuple):
"""Converts a time struct to a Blogger time/date string."""
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time_tuple)
if __name__ == '__main__':
if len(sys.argv) <= 1:
print 'Usage: %s <movabletype_export_file>' % os.path.basename(sys.argv[0])
print
print ' Outputs the converted Blogger export file to standard out.'
sys.exit(-1)
mt_file = open(sys.argv[1])
translator = MovableType2Blogger()
translator.Translate(mt_file, sys.stdout)
mt_file.close()
|
iModels/ffci
|
refs/heads/master
|
moleculeci/models.py
|
1
|
# from django.db import models
# from django.conf import settings
#
#
# class UserProfile(models.Model):
#
# user = models.OneToOneField(settings.AUTH_USER_MODEL)
# github_name = models.CharField(max_length=255, default='github_name')
# class GithubInfo(models.Model):
#
# github_name = models.CharField(max_length=255, default='github_name')
# repo_name = models.CharField(max_length=255, default='repo_name')
# repo_path = models.CharField(max_length=255, default='repo_path')
# key = models.CharField(max_length=255, default='key')
# git_cmd = models.CharField(max_length=255, default='git_cmd')
|
poguez/datacats
|
refs/heads/master
|
datacats/cli/less.py
|
10
|
# Copyright 2014-2015 Boxkite Inc.
# This file is part of the DataCats package and is released under
# the terms of the GNU Affero General Public License version 3.0.
# See LICENSE.txt or http://www.fsf.org/licensing/licenses/agpl-3.0.html
from datacats.cli.util import require_extra_image
LESSC_IMAGE = 'datacats/lessc'
def less(environment, opts):
# pylint: disable=unused-argument
"""Recompiles less files in an environment.
Usage:
datacats less [ENVIRONMENT]
ENVIRONMENT may be an environment name or a path to an environment directory.
Default: '.'
"""
require_extra_image(LESSC_IMAGE)
print 'Converting .less files to .css...'
for log in environment.compile_less():
print log
|
shahar-stratoscale/nova
|
refs/heads/master
|
nova/tests/virt/libvirt/test_dmcrypt.py
|
21
|
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova import test
from nova import utils
from nova.virt.libvirt import dmcrypt
class LibvirtDmcryptTestCase(test.TestCase):
def setUp(self):
super(LibvirtDmcryptTestCase, self).setUp()
self.CIPHER = 'cipher'
self.KEY_SIZE = 256
self.NAME = 'disk'
self.TARGET = dmcrypt.volume_name(self.NAME)
self.PATH = '/dev/nova-lvm/instance_disk'
self.KEY = range(0, self.KEY_SIZE)
self.KEY_STR = ''.join(["%02x" % x for x in range(0, self.KEY_SIZE)])
self.executes = []
self.kwargs = {}
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
self.kwargs = kwargs
return None, None
def fake_listdir(path):
return [self.TARGET, '/dev/mapper/disk']
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os, 'listdir', fake_listdir)
def test_create_volume(self):
expected_commands = [('cryptsetup',
'create',
self.TARGET,
self.PATH,
'--cipher=' + self.CIPHER,
'--key-size=' + str(self.KEY_SIZE),
'--key-file=-')]
dmcrypt.create_volume(self.TARGET, self.PATH, self.CIPHER,
self.KEY_SIZE, self.KEY)
self.assertEqual(expected_commands, self.executes)
self.assertEqual(self.KEY_STR, self.kwargs['process_input'])
def test_delete_volume(self):
expected_commands = [('cryptsetup', 'remove', self.TARGET)]
dmcrypt.delete_volume(self.TARGET)
self.assertEqual(expected_commands, self.executes)
def test_list_volumes(self):
encrypted_volumes = dmcrypt.list_volumes()
self.assertEqual([self.TARGET], encrypted_volumes)
|
himleyb85/django
|
refs/heads/master
|
tests/template_tests/test_origin.py
|
165
|
from unittest import TestCase
from django.template import Engine
from .utils import TEMPLATE_DIR
class OriginTestCase(TestCase):
def setUp(self):
self.engine = Engine(dirs=[TEMPLATE_DIR])
def test_origin_compares_equal(self):
a = self.engine.get_template('index.html')
b = self.engine.get_template('index.html')
self.assertEqual(a.origin, b.origin)
self.assertTrue(a.origin == b.origin)
self.assertFalse(a.origin != b.origin)
def test_origin_compares_not_equal(self):
a = self.engine.get_template('first/test.html')
b = self.engine.get_template('second/test.html')
self.assertNotEqual(a.origin, b.origin)
self.assertFalse(a.origin == b.origin)
self.assertTrue(a.origin != b.origin)
|
aosagie/spark
|
refs/heads/master
|
examples/src/main/python/ml/generalized_linear_regression_example.py
|
52
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example demonstrating generalized linear regression.
Run with:
bin/spark-submit examples/src/main/python/ml/generalized_linear_regression_example.py
"""
from __future__ import print_function
from pyspark.sql import SparkSession
# $example on$
from pyspark.ml.regression import GeneralizedLinearRegression
# $example off$
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("GeneralizedLinearRegressionExample")\
.getOrCreate()
# $example on$
# Load training data
dataset = spark.read.format("libsvm")\
.load("data/mllib/sample_linear_regression_data.txt")
glr = GeneralizedLinearRegression(family="gaussian", link="identity", maxIter=10, regParam=0.3)
# Fit the model
model = glr.fit(dataset)
# Print the coefficients and intercept for generalized linear regression model
print("Coefficients: " + str(model.coefficients))
print("Intercept: " + str(model.intercept))
# Summarize the model over the training set and print out some metrics
summary = model.summary
print("Coefficient Standard Errors: " + str(summary.coefficientStandardErrors))
print("T Values: " + str(summary.tValues))
print("P Values: " + str(summary.pValues))
print("Dispersion: " + str(summary.dispersion))
print("Null Deviance: " + str(summary.nullDeviance))
print("Residual Degree Of Freedom Null: " + str(summary.residualDegreeOfFreedomNull))
print("Deviance: " + str(summary.deviance))
print("Residual Degree Of Freedom: " + str(summary.residualDegreeOfFreedom))
print("AIC: " + str(summary.aic))
print("Deviance Residuals: ")
summary.residuals().show()
# $example off$
spark.stop()
|
atheed/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/webdriver/webdriver/transport.py
|
59
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import httplib
import json
import urlparse
HTTP_TIMEOUT = 5
class Response(object):
"""Describes an HTTP response received from a remote en"Describes an HTTP
response received from a remote end whose body has been read and parsed as
appropriate."""
def __init__(self, status, body):
self.status = status
self.body = body
def __repr__(self):
return "wdclient.Response(status=%d, body=%s)" % (self.status, self.body)
@staticmethod
def from_http_response(http_response):
status = http_response.status
body = http_response.read()
# SpecID: dfn-send-a-response
#
# > 3. Set the response's header with name and value with the following
# > values:
# >
# > "Content-Type"
# > "application/json; charset=utf-8"
# > "cache-control"
# > "no-cache"
assert http_response.getheader("Content-Type") == "application/json; charset=utf-8"
assert http_response.getheader("Cache-Control") == "no-cache"
if body:
body = json.loads(body)
# SpecID: dfn-send-a-response
#
# > 4. If data is not null, let response's body be a JSON Object
# with a key `value` set to the JSON Serialization of data.
assert "value" in body
return Response(status, body)
class HTTPWireProtocol(object):
"""Transports messages (commands and responses) over the WebDriver
wire protocol.
"""
def __init__(self, host, port, url_prefix="/", timeout=HTTP_TIMEOUT):
"""Construct interface for communicating with the remote server.
:param url: URL of remote WebDriver server.
:param wait: Duration to wait for remote to appear.
"""
self.host = host
self.port = port
self.url_prefix = url_prefix
self._timeout = timeout
def url(self, suffix):
return urlparse.urljoin(self.path_prefix, suffix)
def send(self, method, url, body=None, headers=None):
"""Send a command to the remote.
:param method: "POST" or "GET".
:param url: "command part" of the requests URL path
:param body: Body of the request. Defaults to an empty dictionary
if ``method`` is "POST".
:param headers: Additional headers to include in the request.
:return: an instance of wdclient.Response describing the HTTP response
received from the remote end.
"""
if body is None and method == "POST":
body = {}
if isinstance(body, dict):
body = json.dumps(body)
if isinstance(body, unicode):
body = body.encode("utf-8")
if headers is None:
headers = {}
url = self.url_prefix + url
conn = httplib.HTTPConnection(
self.host, self.port, strict=True, timeout=self._timeout)
conn.request(method, url, body, headers)
try:
response = Response.from_http_response(conn.getresponse())
finally:
conn.close()
return response
|
PepperPD/edx-pepper-platform
|
refs/heads/master
|
common/djangoapps/student/migrations/0012_auto__add_field_userprofile_gender__add_field_userprofile_date_of_birt.py
|
188
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.gender'
db.add_column('auth_userprofile', 'gender',
self.gf('django.db.models.fields.CharField')(max_length=6, null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.date_of_birth'
db.add_column('auth_userprofile', 'date_of_birth',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.mailing_address'
db.add_column('auth_userprofile', 'mailing_address',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.country'
db.add_column('auth_userprofile', 'country',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.telephone_number'
db.add_column('auth_userprofile', 'telephone_number',
self.gf('django.db.models.fields.CharField')(max_length=25, null=True, blank=True),
keep_default=False)
# Adding field 'UserProfile.occupation'
db.add_column('auth_userprofile', 'occupation',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.gender'
db.delete_column('auth_userprofile', 'gender')
# Deleting field 'UserProfile.date_of_birth'
db.delete_column('auth_userprofile', 'date_of_birth')
# Deleting field 'UserProfile.mailing_address'
db.delete_column('auth_userprofile', 'mailing_address')
# Deleting field 'UserProfile.country'
db.delete_column('auth_userprofile', 'country')
# Deleting field 'UserProfile.telephone_number'
db.delete_column('auth_userprofile', 'telephone_number')
# Deleting field 'UserProfile.occupation'
db.delete_column('auth_userprofile', 'occupation')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
|
atsuyim/pupy
|
refs/heads/master
|
client/sources/resources/iter_files.py
|
35
|
#!/usr/bin/env python
# -*- coding: UTF8 -*-
import marshal, zlib
modules = marshal.loads(zlib.decompress(open("library_compressed_string.txt",'rb').read()))
for f in sorted([x for x in modules.iterkeys()]):
print f
|
CTSRD-SOAAP/chromium-42.0.2311.135
|
refs/heads/master
|
tools/mac/symbolicate_crash.py
|
178
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script can take an Apple-style CrashReporter log and symbolicate it. This
is useful for when a user's reports aren't being uploaded, for example.
Only versions 6, 7, 8, and 9 reports are supported. For more information on the
file format, reference this document:
TN2123 <http://developer.apple.com/library/mac/#technotes/tn2004/tn2123.html>
Information on symbolication was gleaned from:
<http://developer.apple.com/tools/xcode/symbolizingcrashdumps.html>
"""
import optparse
import os.path
import re
import subprocess
import sys
# Maps binary image identifiers to binary names (minus the .dSYM portion) found
# in the archive. These are the only objects that will be looked up.
SYMBOL_IMAGE_MAP = {
'com.google.Chrome': 'Google Chrome.app',
'com.google.Chrome.framework': 'Google Chrome Framework.framework',
'com.google.Chrome.helper': 'Google Chrome Helper.app'
}
class CrashReport(object):
"""A parsed representation of an Apple CrashReport text file."""
def __init__(self, file_name):
super(CrashReport, self).__init__()
self.report_info = {}
self.threads = []
self._binary_images = {}
fd = open(file_name, 'r')
self._ParseHeader(fd)
# Try and get the report version. If it's not a version we handle, abort.
self.report_version = int(self.report_info['Report Version'])
# Version 6: 10.5 and 10.6 crash report
# Version 7: 10.6 spindump report
# Version 8: 10.7 spindump report
# Version 9: 10.7 crash report
valid_versions = (6, 7, 8, 9)
if self.report_version not in valid_versions:
raise Exception("Only crash reports of versions %s are accepted." %
str(valid_versions))
# If this is a spindump (version 7 or 8 report), use a special parser. The
# format is undocumented, but is similar to version 6. However, the spindump
# report contains user and kernel stacks for every process on the system.
if self.report_version == 7 or self.report_version == 8:
self._ParseSpindumpStack(fd)
else:
self._ParseStack(fd)
self._ParseBinaryImages(fd)
fd.close()
def Symbolicate(self, symbol_path):
"""Symbolicates a crash report stack trace."""
# In order to be efficient, collect all the offsets that will be passed to
# atos by the image name.
offsets_by_image = self._CollectAddressesForImages(SYMBOL_IMAGE_MAP.keys())
# For each image, run atos with the list of addresses.
for image_name, addresses in offsets_by_image.items():
# If this image was not loaded or is in no stacks, skip.
if image_name not in self._binary_images or not len(addresses):
continue
# Combine the |image_name| and |symbol_path| into the path of the dSYM.
dsym_file = self._GetDSymPath(symbol_path, image_name)
# From the list of 2-Tuples of (frame, address), create a list of just
# addresses.
address_list = map(lambda x: x[1], addresses)
# Look up the load address of the image.
binary_base = self._binary_images[image_name][0]
# This returns a list of just symbols. The indices will match up with the
# list of |addresses|.
symbol_names = self._RunAtos(binary_base, dsym_file, address_list)
if not symbol_names:
print 'Error loading symbols for ' + image_name
continue
# Attaches a list of symbol names to stack frames. This assumes that the
# order of |addresses| has stayed the same as |symbol_names|.
self._AddSymbolsToFrames(symbol_names, addresses)
def _ParseHeader(self, fd):
"""Parses the header section of a crash report, which contains the OS and
application version information."""
# The header is made up of different sections, depending on the type of
# report and the report version. Almost all have a format of a key and
# value separated by a colon. Accumulate all of these artifacts into a
# dictionary until the first thread stack is reached.
thread_re = re.compile('^[ \t]*Thread ([a-f0-9]+)')
line = ''
while not thread_re.match(line):
# Skip blank lines. There are typically three or four sections separated
# by newlines in the header.
line = line.strip()
if line:
parts = line.split(':', 1)
# Certain lines in different report versions don't follow the key-value
# format, so skip them.
if len(parts) == 2:
# There's a varying amount of space padding after the ':' to align all
# the values; strip that.
self.report_info[parts[0]] = parts[1].lstrip()
line = fd.readline()
# When this loop exits, the header has been read in full. However, the first
# thread stack heading has been read past. Seek backwards from the current
# position by the length of the line so that it is re-read when
# _ParseStack() is entered.
fd.seek(-len(line), os.SEEK_CUR)
def _ParseStack(self, fd):
"""Parses the stack dump of a crash report and creates a list of threads
and their stack traces."""
# Compile a regex that matches the start of a thread stack. Note that this
# must be specific to not include the thread state section, which comes
# right after all the stack traces.
line_re = re.compile('^Thread ([0-9]+)( Crashed)?:(.*)')
# On entry into this function, the fd has been walked up to the "Thread 0"
# line.
line = fd.readline().rstrip()
in_stack = False
thread = None
while line_re.match(line) or in_stack:
# Check for start of the thread stack.
matches = line_re.match(line)
if not line.strip():
# A blank line indicates a break in the thread stack.
in_stack = False
elif matches:
# If this is the start of a thread stack, create the CrashThread.
in_stack = True
thread = CrashThread(matches.group(1))
thread.name = matches.group(3)
thread.did_crash = matches.group(2) != None
self.threads.append(thread)
else:
# All other lines are stack frames.
thread.stack.append(self._ParseStackFrame(line))
# Read the next line.
line = fd.readline()
def _ParseStackFrame(self, line):
"""Takes in a single line of text and transforms it into a StackFrame."""
frame = StackFrame(line)
# A stack frame is in the format of:
# |<frame-number> <binary-image> 0x<address> <symbol> <offset>|.
regex = '^([0-9]+) +(.+)[ \t]+(0x[0-9a-f]+) (.*) \+ ([0-9]+)$'
matches = re.match(regex, line)
if matches is None:
return frame
# Create a stack frame with the information extracted from the regex.
frame.frame_id = matches.group(1)
frame.image = matches.group(2)
frame.address = int(matches.group(3), 0) # Convert HEX to an int.
frame.original_symbol = matches.group(4)
frame.offset = matches.group(5)
frame.line = None
return frame
def _ParseSpindumpStack(self, fd):
"""Parses a spindump stack report. In this format, each thread stack has
both a user and kernel trace. Only the user traces are symbolicated."""
# The stack trace begins with the thread header, which is identified by a
# HEX number. The thread names appear to be incorrect in spindumps.
user_thread_re = re.compile('^ Thread ([0-9a-fx]+)')
# When this method is called, the fd has been walked right up to the first
# line.
line = fd.readline()
in_user_stack = False
in_kernel_stack = False
thread = None
frame_id = 0
while user_thread_re.match(line) or in_user_stack or in_kernel_stack:
# Check for the start of a thread.
matches = user_thread_re.match(line)
if not line.strip():
# A blank line indicates the start of a new thread. The blank line comes
# after the kernel stack before a new thread header.
in_kernel_stack = False
elif matches:
# This is the start of a thread header. The next line is the heading for
# the user stack, followed by the actual trace.
thread = CrashThread(matches.group(1))
frame_id = 0
self.threads.append(thread)
in_user_stack = True
line = fd.readline() # Read past the 'User stack:' header.
elif line.startswith(' Kernel stack:'):
# The kernel stack header comes immediately after the last frame (really
# the top frame) in the user stack, without a blank line.
in_user_stack = False
in_kernel_stack = True
elif in_user_stack:
# If this is a line while in the user stack, parse it as a stack frame.
thread.stack.append(self._ParseSpindumpStackFrame(line))
# Loop with the next line.
line = fd.readline()
# When the loop exits, the file has been read through the 'Binary images:'
# header. Seek backwards so that _ParseBinaryImages() does the right thing.
fd.seek(-len(line), os.SEEK_CUR)
def _ParseSpindumpStackFrame(self, line):
"""Parses a spindump-style stackframe."""
frame = StackFrame(line)
# The format of the frame is either:
# A: |<space><steps> <symbol> + <offset> (in <image-name>) [<address>]|
# B: |<space><steps> ??? (in <image-name> + <offset>) [<address>]|
regex_a = '^([ ]+[0-9]+) (.*) \+ ([0-9]+) \(in (.*)\) \[(0x[0-9a-f]+)\]'
regex_b = '^([ ]+[0-9]+) \?\?\?( \(in (.*) \+ ([0-9]+)\))? \[(0x[0-9a-f]+)\]'
# Create the stack frame with the information extracted from the regex.
matches = re.match(regex_a, line)
if matches:
frame.frame_id = matches.group(1)[4:] # Remove some leading spaces.
frame.original_symbol = matches.group(2)
frame.offset = matches.group(3)
frame.image = matches.group(4)
frame.address = int(matches.group(5), 0)
frame.line = None
return frame
# If pattern A didn't match (which it will most of the time), try B.
matches = re.match(regex_b, line)
if matches:
frame.frame_id = matches.group(1)[4:] # Remove some leading spaces.
frame.image = matches.group(3)
frame.offset = matches.group(4)
frame.address = int(matches.group(5), 0)
frame.line = None
return frame
# Otherwise, this frame could not be matched and just use the raw input.
frame.line = frame.line.strip()
return frame
def _ParseBinaryImages(self, fd):
"""Parses out the binary images section in order to get the load offset."""
# The parser skips some sections, so advance until the "Binary Images"
# header is reached.
while not fd.readline().lstrip().startswith("Binary Images:"): pass
# Create a regex to match the lines of format:
# |0x<start> - 0x<end> <binary-image> <version> (<version>) <<UUID>> <path>|
image_re = re.compile(
'[ ]*(0x[0-9a-f]+) -[ \t]+(0x[0-9a-f]+) [+ ]([a-zA-Z0-9._\-]+)')
# This section is in this format:
# |<start address> - <end address> <image name>|.
while True:
line = fd.readline()
if not line.strip():
# End when a blank line is hit.
return
# Match the line to the regex.
match = image_re.match(line)
if match:
# Store the offsets by image name so it can be referenced during
# symbolication. These are hex numbers with leading '0x', so int() can
# convert them to decimal if base=0.
address_range = (int(match.group(1), 0), int(match.group(2), 0))
self._binary_images[match.group(3)] = address_range
def _CollectAddressesForImages(self, images):
"""Iterates all the threads and stack frames and all the stack frames that
are in a list of binary |images|. The result is a dictionary, keyed by the
image name that maps to a list of tuples. Each is a 2-Tuple of
(stack_frame, address)"""
# Create the collection and initialize it with empty lists for each image.
collection = {}
for image in images:
collection[image] = []
# Perform the iteration.
for thread in self.threads:
for frame in thread.stack:
image_name = self._ImageForAddress(frame.address)
if image_name in images:
# Replace the image name in the frame in case it was elided.
frame.image = image_name
collection[frame.image].append((frame, frame.address))
# Return the result.
return collection
def _ImageForAddress(self, address):
"""Given a PC address, returns the bundle identifier of the image in which
the address resides."""
for image_name, address_range in self._binary_images.items():
if address >= address_range[0] and address <= address_range[1]:
return image_name
return None
def _GetDSymPath(self, base_path, image_name):
"""Takes a base path for the symbols and an image name. It looks the name up
in SYMBOL_IMAGE_MAP and creates a full path to the dSYM in the bundle."""
image_file = SYMBOL_IMAGE_MAP[image_name]
return os.path.join(base_path, image_file + '.dSYM', 'Contents',
'Resources', 'DWARF',
os.path.splitext(image_file)[0]) # Chop off the extension.
def _RunAtos(self, load_address, dsym_file, addresses):
"""Runs the atos with the provided arguments. |addresses| is used as stdin.
Returns a list of symbol information in the same order as |addresses|."""
args = ['atos', '-l', str(load_address), '-o', dsym_file]
# Get the arch type. This is of the format |X86 (Native)|.
if 'Code Type' in self.report_info:
arch = self.report_info['Code Type'].lower().split(' ')
if len(arch) == 2:
arch = arch[0]
if arch == 'x86':
# The crash report refers to i386 as x86, but atos doesn't know what
# that is.
arch = 'i386'
args.extend(['-arch', arch])
proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
addresses = map(hex, addresses)
(stdout, stderr) = proc.communicate(' '.join(addresses))
if proc.returncode:
return None
return stdout.rstrip().split('\n')
def _AddSymbolsToFrames(self, symbols, address_tuples):
"""Takes a single value (the list) from _CollectAddressesForImages and does
a smart-zip with the data returned by atos in |symbols|. Note that the
indices must match for this to succeed."""
if len(symbols) != len(address_tuples):
print 'symbols do not match'
# Each line of output from atos is in this format:
# |<symbol> (in <image>) (<file>:<line>)|.
line_regex = re.compile('(.+) \(in (.+)\) (\((.+):([0-9]+)\))?')
# Zip the two data sets together.
for i in range(len(symbols)):
symbol_parts = line_regex.match(symbols[i])
if not symbol_parts:
continue # Error.
frame = address_tuples[i][0]
frame.symbol = symbol_parts.group(1)
frame.image = symbol_parts.group(2)
frame.file_name = symbol_parts.group(4)
frame.line_number = symbol_parts.group(5)
class CrashThread(object):
"""A CrashThread represents a stacktrace of a single thread """
def __init__(self, thread_id):
super(CrashThread, self).__init__()
self.thread_id = thread_id
self.name = None
self.did_crash = False
self.stack = []
def __repr__(self):
name = ''
if self.name:
name = ': ' + self.name
return 'Thread ' + self.thread_id + name + '\n' + \
'\n'.join(map(str, self.stack))
class StackFrame(object):
"""A StackFrame is owned by a CrashThread."""
def __init__(self, line):
super(StackFrame, self).__init__()
# The original line. This will be set to None if symbolication was
# successfuly.
self.line = line
self.frame_id = 0
self.image = None
self.address = 0x0
self.original_symbol = None
self.offset = 0x0
# The following members are set after symbolication.
self.symbol = None
self.file_name = None
self.line_number = 0
def __repr__(self):
# If symbolication failed, just use the original line.
if self.line:
return ' %s' % self.line
# Use different location information depending on symbolicated data.
location = None
if self.file_name:
location = ' - %s:%s' % (self.file_name, self.line_number)
else:
location = ' + %s' % self.offset
# Same with the symbol information.
symbol = self.original_symbol
if self.symbol:
symbol = self.symbol
return ' %s\t0x%x\t[%s\t%s]\t%s' % (self.frame_id, self.address,
self.image, location, symbol)
def PrettyPrintReport(report):
"""Takes a crash report and prints it like the crash server would."""
print 'Process : ' + report.report_info['Process']
print 'Version : ' + report.report_info['Version']
print 'Date : ' + report.report_info['Date/Time']
print 'OS Version : ' + report.report_info['OS Version']
print
if 'Crashed Thread' in report.report_info:
print 'Crashed Thread : ' + report.report_info['Crashed Thread']
print
if 'Event' in report.report_info:
print 'Event : ' + report.report_info['Event']
print
for thread in report.threads:
print
if thread.did_crash:
exc_type = report.report_info['Exception Type'].split(' ')[0]
exc_code = report.report_info['Exception Codes'].replace('at', '@')
print '*CRASHED* ( ' + exc_type + ' / ' + exc_code + ' )'
# Version 7 reports have spindump-style output (with a stepped stack trace),
# so remove the first tab to get better alignment.
if report.report_version == 7:
for line in repr(thread).split('\n'):
print line.replace('\t', ' ', 1)
else:
print thread
def Main(args):
"""Program main."""
parser = optparse.OptionParser(
usage='%prog [options] symbol_path crash_report',
description='This will parse and symbolicate an Apple CrashReporter v6-9 '
'file.')
parser.add_option('-s', '--std-path', action='store_true', dest='std_path',
help='With this flag, the symbol_path is a containing '
'directory, in which a dSYM files are stored in a '
'directory named by the version. Example: '
'[symbolicate_crash.py -s ./symbols/ report.crash] will '
'look for dSYMs in ./symbols/15.0.666.0/ if the report is '
'from that verison.')
(options, args) = parser.parse_args(args[1:])
# Check that we have something to symbolicate.
if len(args) != 2:
parser.print_usage()
return 1
report = CrashReport(args[1])
symbol_path = None
# If not using the standard layout, this is a full path to the symbols.
if not options.std_path:
symbol_path = args[0]
# Otherwise, use the report version to locate symbols in a directory.
else:
# This is in the format of |M.N.B.P (B.P)|. Get just the part before the
# space.
chrome_version = report.report_info['Version'].split(' ')[0]
symbol_path = os.path.join(args[0], chrome_version)
# Check that the symbols exist.
if not os.path.isdir(symbol_path):
print >>sys.stderr, 'Symbol path %s is not a directory' % symbol_path
return 2
print >>sys.stderr, 'Using symbols from ' + symbol_path
print >>sys.stderr, '=' * 80
report.Symbolicate(symbol_path)
PrettyPrintReport(report)
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv))
|
rob-smallshire/pycpt
|
refs/heads/master
|
pycpt/colors.py
|
1
|
__author__ = 'rjs'
from collections import namedtuple
HSVColor = namedtuple('HSVColor', ['hue', 'saturation', 'value'])
RGBColor = namedtuple('RGBColor', ['red', 'green', 'blue'])
CMYKColor = namedtuple('CMYKColor', ['cyan', 'magenta', 'yellow', 'black'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.