repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
shelbycruver/real-python-test | refs/heads/master | env/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py | 1727 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
|
hgomes88/ardupilot | refs/heads/master | Tools/autotest/apm_unit_tests/dev/arducopter_RTL.py | 250 | import arducopter
def unit_test(mavproxy, mav):
'''A scripted flight plan'''
if (
arducopter.calibrate_level(mavproxy, mav) and
arducopter.arm_motors(mavproxy, mav) and
arducopter.takeoff(mavproxy,mav, alt_min=80, takeoff_throttle=1510) and
arducopter.hover(mavproxy,mav, hover_throttle=1300) and
arducopter.fly_RTL(mavproxy, mav, side=80, timeout=80)):
return True
return False
|
gerv/bedrock | refs/heads/master | bedrock/security/urls.py | 5 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf.urls import url
from bedrock.mozorg.util import page
from bedrock.security.views import (
AdvisoriesView,
AdvisoryView,
KVRedirectsView,
OldAdvisoriesListView,
OldAdvisoriesView,
ProductView,
ProductVersionView,
)
urlpatterns = (
page('', 'security/index.html'),
page('bug-bounty', 'security/bug-bounty.html'),
page('client-bug-bounty', 'security/client-bug-bounty.html'),
page('web-bug-bounty', 'security/web-bug-bounty.html'),
page('bug-bounty/faq', 'security/bug-bounty/faq.html'),
page('bug-bounty/faq-webapp', 'security/bug-bounty/faq-webapp.html'),
page('bug-bounty/hall-of-fame', 'security/bug-bounty/hall-of-fame.html'),
page('bug-bounty/web-eligible-sites', 'security/bug-bounty/web-eligible-sites.html'),
page('bug-bounty/web-hall-of-fame', 'security/bug-bounty/web-hall-of-fame.html'),
url(r'^advisories/$',
AdvisoriesView.as_view(), name='security.advisories'),
url(r'^advisories/mfsa(?P<pk>\d{4}-\d{2,3})/$',
AdvisoryView.as_view(), name='security.advisory'),
page('known-vulnerabilities', 'security/known-vulnerabilities.html'),
page('known-vulnerabilities/older-vulnerabilities', 'security/older-vulnerabilities.html'),
url(r'^known-vulnerabilities/(?P<slug>[a-z-]+)/$',
ProductView.as_view(), name='security.product-advisories'),
url(r'^known-vulnerabilities/(?P<product>[\w-]+)-(?P<version>\d{1,3}(\.\d{1,3})?)/$',
ProductVersionView.as_view(), name='security.product-version-advisories'),
url(r'^known-vulnerabilities/(?P<filename>.*)\.html$', KVRedirectsView.as_view()),
url(r'^(?:announce|advisories)(?:/.*)?/mfsa(?P<pk>\d{4}-\d{2,3})\.html$',
OldAdvisoriesView.as_view()),
url(r'^announce/$', OldAdvisoriesListView.as_view()),
)
|
gustavofonseca/packtools | refs/heads/master | tests/test_schematron_1_9.py | 2 | # coding: utf-8
from __future__ import unicode_literals
import unittest
import io
from lxml import isoschematron, etree
from packtools.catalogs import SCHEMAS
SCH = etree.parse(SCHEMAS['sps-1.9'])
def TestPhase(phase_name, cache):
"""Factory of parsed Schematron phases.
:param phase_name: the phase name
:param cache: mapping type
"""
if phase_name not in cache:
phase = isoschematron.Schematron(SCH, phase=phase_name)
cache[phase_name] = phase
return cache[phase_name]
class PhaseBasedTestCase(unittest.TestCase):
cache = {}
def _run_validation(self, sample):
schematron = TestPhase(self.sch_phase, self.cache)
return schematron.validate(etree.parse(sample))
class JournalIdTests(PhaseBasedTestCase):
"""Tests for article/front/journal-meta/journal-id elements.
Ticket #14 makes @journal-id-type="publisher-id" mandatory.
Ref: https://github.com/scieloorg/scielo_publishing_schema/issues/14
"""
sch_phase = 'phase.journal-id'
def test_case1(self):
"""
presence(@nlm-ta) is True
presence(@publisher-id) is True
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-id journal-id-type="nlm-ta">
Rev Saude Publica
</journal-id>
<journal-id journal-id-type="publisher-id">
RSP
</journal-id>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case2(self):
"""
presence(@nlm-ta) is True
presence(@publisher-id) is False
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-id journal-id-type="nlm-ta">
Rev Saude Publica
</journal-id>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_case3(self):
"""
presence(@nlm-ta) is False
presence(@publisher-id) is True
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">
RSP
</journal-id>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case4(self):
"""
presence(@nlm-ta) is False
presence(@publisher-id) is False
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-id journal-id-type='doi'>
123.plin
</journal-id>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_publisher_id_cannot_be_empty(self):
sample = u"""<article>
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id"></journal-id>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class JournalTitleGroupTests(PhaseBasedTestCase):
"""Tests for article/front/journal-meta/journal-title-group elements.
"""
sch_phase = 'phase.journal-title-group'
def test_journal_title_group_is_absent(self):
sample = u"""<article>
<front>
<journal-meta>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_case1(self):
"""
A: presence(journal-title) is True
B: presence(abbrev-journal-title[@abbrev-type='publisher']) is True
A ^ B is True
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-title-group>
<journal-title>
Revista de Saude Publica
</journal-title>
<abbrev-journal-title abbrev-type='publisher'>
Rev. Saude Publica
</abbrev-journal-title>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case2(self):
"""
A: presence(journal-title) is True
B: presence(abbrev-journal-title[@abbrev-type='publisher']) is False
A ^ B is False
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-title-group>
<journal-title>
Revista de Saude Publica
</journal-title>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_case3(self):
"""
A: presence(journal-title) is False
B: presence(abbrev-journal-title[@abbrev-type='publisher']) is True
A ^ B is False
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-title-group>
<abbrev-journal-title abbrev-type='publisher'>
Rev. Saude Publica
</abbrev-journal-title>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_case4(self):
"""
A: presence(journal-title) is False
B: presence(abbrev-journal-title[@abbrev-type='publisher']) is False
A ^ B is False
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-title-group>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_empty_journal_title(self):
sample = u"""<article>
<front>
<journal-meta>
<journal-title-group>
<journal-title></journal-title>
<abbrev-journal-title abbrev-type='publisher'>Rev. Saude Publica</abbrev-journal-title>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_empty_abbrev_journal_title(self):
sample = u"""<article>
<front>
<journal-meta>
<journal-title-group>
<journal-title>Revista de Saude Publica</journal-title>
<abbrev-journal-title abbrev-type='publisher'></abbrev-journal-title>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class PublisherTests(PhaseBasedTestCase):
"""Tests for article/front/journal-meta/publisher elements.
"""
sch_phase = 'phase.publisher'
def test_publisher_is_present(self):
sample = u"""<article>
<front>
<journal-meta>
<publisher>
<publisher-name>British Medical Journal</publisher-name>
</publisher>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_publisher_is_absent(self):
sample = u"""<article>
<front>
<journal-meta>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_publisher_is_empty(self):
sample = u"""<article>
<front>
<journal-meta>
<publisher>
<publisher-name></publisher-name>
</publisher>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ArticleCategoriesTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/article-categories elements.
"""
sch_phase = 'phase.article-categories'
def test_article_categories_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group>
<subject>ISO/TC 908</subject>
<subject>
SC 2, Measurement and evaluation of...
</subject>
</subj-group>
</article-categories>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_article_categories_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class fpage_OR_elocationTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/fpage or elocation-id elements.
"""
sch_phase = 'phase.fpage_or_elocation-id'
def test_case1(self):
"""
fpage is True
elocation-id is True
fpage v elocation-id is True
"""
sample = u"""<article>
<front>
<article-meta>
<fpage>01</fpage>
<elocation-id>E27</elocation-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case2(self):
"""
fpage is True
elocation-id is False
fpage v elocation-id is True
"""
sample = u"""<article>
<front>
<article-meta>
<fpage>01</fpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case3(self):
"""
fpage is False
elocation-id is True
fpage v elocation-id is True
"""
sample = u"""<article>
<front>
<article-meta>
<elocation-id>E27</elocation-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case4(self):
"""
fpage is False
elocation-id is False
fpage v elocation-id is False
"""
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_empty_fpage(self):
sample = u"""<article>
<front>
<article-meta>
<fpage></fpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_empty_elocationid(self):
sample = u"""<article>
<front>
<article-meta>
<elocation-id></elocation-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ISSNTests(PhaseBasedTestCase):
"""Tests for article/front/journal-meta/issn elements.
"""
sch_phase = 'phase.issn'
def test_case1(self):
"""
A: @pub-type='epub' is True
B: @pub-type='ppub' is True
A v B is True
"""
sample = u"""<article>
<front>
<journal-meta>
<issn pub-type="epub">
0959-8138
</issn>
<issn pub-type="ppub">
0959-813X
</issn>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case2(self):
"""
A: @pub-type='epub' is True
B: @pub-type='ppub' is False
A v B is True
"""
sample = u"""<article>
<front>
<journal-meta>
<issn pub-type="epub">
0959-8138
</issn>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case3(self):
"""
A: @pub-type='epub' is False
B: @pub-type='ppub' is True
A v B is True
"""
sample = u"""<article>
<front>
<journal-meta>
<issn pub-type="ppub">
0959-813X
</issn>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case4(self):
"""
A: @pub-type='epub' is False
B: @pub-type='ppub' is False
A v B is False
"""
sample = u"""<article>
<front>
<journal-meta>
<issn>
0959-813X
</issn>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_empty_issn(self):
sample = u"""<article>
<front>
<journal-meta>
<issn pub-type="epub"></issn>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ArticleIdTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/article-id elements.
"""
sch_phase = 'phase.article-id'
def test_article_id_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_pub_id_type_doi_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<article-id pub-id-type='other'>
10.1590/1414-431X20143435
</article-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_pub_id_type_doi(self):
sample = u"""<article>
<front>
<article-meta>
<article-id pub-id-type='doi'>
10.1590/1414-431X20143434
</article-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_pub_id_type_doi_is_empty(self):
sample = u"""<article>
<front>
<article-meta>
<article-id pub-id-type='doi'/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_invalid_pub_id_type(self):
sample = u"""<article>
<front>
<article-meta>
<article-id pub-id-type='unknown'>
10.1590/1414-431X20143434
</article-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_invalid_pub_id_type_case2(self):
sample = u"""<article>
<front>
<article-meta>
<article-id pub-id-type='unknown'>
10.1590/1414-431X20143434
</article-id>
<article-id pub-id-type='doi'>
10.1590/1414-431X20143434
</article-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_valid_pub_id_type_values(self):
for typ in ['doi', 'publisher-id', 'other']:
sample = u"""<article>
<front>
<article-meta>
<article-id pub-id-type='%s'>
10.1590/1414-431X20143433
</article-id>
<article-id pub-id-type='doi'>
10.1590/1414-431X20143434
</article-id>
</article-meta>
</front>
</article>
""" % typ
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_valid_pub_id_type_values_on_translations(self):
for typ in ['doi', 'publisher-id', 'other']:
sample = u"""<article>
<sub-article article-type='translation'>
<front-stub>
<article-id pub-id-type="%s">pLjk3by</article-id>
</front-stub>
</sub-article>
</article>
""" % typ
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_invalid_pub_id_type_values_on_translations(self):
sample = u"""<article>
<sub-article article-type='translation'>
<front-stub>
<article-id pub-id-type="invalid">pLjk3by</article-id>
</front-stub>
</sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class SubjGroupTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/article-categories/subj-group elements.
"""
sch_phase = 'phase.subj-group'
def test_subj_group_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
</article-categories>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_without_heading_type(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group subj-group-type="kwd">
<subject content-type="neurosci">
Cellular and Molecular Biology
</subject>
<subj-group>
<subject content-type="neurosci">
Blood and brain barrier
</subject>
</subj-group>
</subj-group>
</article-categories>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_heading_type(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group subj-group-type="heading">
<subject>
Cellular and Molecular Biology
</subject>
<subj-group>
<subject content-type="neurosci">
Blood and brain barrier
</subject>
</subj-group>
</subj-group>
</article-categories>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_heading_in_subarticle_pt(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group subj-group-type="heading">
<subject>
Original Article
</subject>
<subj-group>
<subject content-type="neurosci">
Blood and brain barrier
</subject>
</subj-group>
</subj-group>
</article-categories>
</article-meta>
</front>
<sub-article xml:lang="pt" article-type="translation" id="S01">
<front-stub>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Artigos Originais</subject>
</subj-group>
</article-categories>
</front-stub>
</sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_many_heading_in_subarticle_pt(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group subj-group-type="heading">
<subject>
Original Article
</subject>
<subj-group>
<subject content-type="neurosci">
Blood and brain barrier
</subject>
</subj-group>
</subj-group>
</article-categories>
</article-meta>
</front>
<sub-article xml:lang="pt" article-type="translation" id="S01">
<front-stub>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Artigos Originais</subject>
</subj-group>
<subj-group subj-group-type="heading">
<subject>Artigos Piratas</subject>
</subj-group>
</article-categories>
</front-stub>
</sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_heading_type_in_the_deep(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group>
<subject>
Cellular and Molecular Biology
</subject>
<subj-group subj-group-type="heading">
<subject>
Blood and brain barrier
</subject>
</subj-group>
</subj-group>
</article-categories>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_many_heading_type(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group subj-group-type="heading">
<subject>
Cellular and Molecular Biology
</subject>
</subj-group>
<subj-group subj-group-type="heading">
<subject>
Blood and brain barrier
</subject>
</subj-group>
</article-categories>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class AbstractLangTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/abstract elements.
"""
sch_phase = 'phase.abstract_lang'
def test_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<abstract>
<p>Differing socioeconomic positions in...</p>
</abstract>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_is_present_with_lang(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article>
<front>
<article-meta>
<abstract xml:lang="en">
<p>Differing socioeconomic positions in...</p>
</abstract>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_for_research_article(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="research-article">
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_research_article(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="research-article">
<front>
<article-meta>
<abstract>
<p>Differing socioeconomic positions in...</p>
</abstract>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_research_article_only_with_transabstract(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="research-article">
<front>
<article-meta>
<trans-abstract xml:lang="en">
<p>Differing socioeconomic positions in...</p>
</trans-abstract>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_missing_for_review_article(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="review-article">
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_review_article(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="review-article">
<front>
<article-meta>
<abstract>
<p>Differing socioeconomic positions in...</p>
</abstract>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_review_article_only_with_transabstract(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="review-article">
<front>
<article-meta>
<trans-abstract xml:lang="en">
<p>Differing socioeconomic positions in...</p>
</trans-abstract>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_transabstract_allowed_types(self):
for value in ['graphical',]:
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="research-article">
<front>
<article-meta>
<trans-abstract abstract-type="%s" xml:lang="en">
<p>Differing socioeconomic positions in...</p>
</trans-abstract>
</article-meta>
</front>
</article>
""" % value
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_transabstract_disallowed_types(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="research-article">
<front>
<article-meta>
<trans-abstract abstract-type="unknown" xml:lang="en">
<p>Differing socioeconomic positions in...</p>
</trans-abstract>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ArticleTitleLangTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/title-group/article-title elements.
"""
sch_phase = 'phase.article-title_lang'
def test_is_present_in_articlemeta(self):
sample = u"""<article>
<front>
<article-meta>
<title-group>
<article-title>
Systematic review of day hospital care...
</article-title>
</title-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_is_present_in_articlemeta_with_lang(self):
sample = u"""<article>
<front>
<article-meta>
<title-group>
<article-title xml:lang="en">
Systematic review of day hospital care...
</article-title>
</title-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_is_present_in_elementcitation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation>Aires M, Paz AA, Perosa CT. Situação de saúde e grau de dependência de pessoas idosas institucionalizadas. <italic>Rev Gaucha Enferm.</italic> 2009;30(3):192-9.</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title>Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_is_present_in_elementcitation_with_lang(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation>Aires M, Paz AA, Perosa CT. Situação de saúde e grau de dependência de pessoas idosas institucionalizadas. <italic>Rev Gaucha Enferm.</italic> 2009;30(3):192-9.</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title xml:lang="pt">Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class KwdGroupLangTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/kwd-group elements.
"""
sch_phase = 'phase.kwd-group_lang'
def test_single_occurence(self):
sample = u"""<article>
<front>
<article-meta>
<kwd-group>
<kwd>gene expression</kwd>
</kwd-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_many_occurencies(self):
sample = u"""<article>
<front>
<article-meta>
<kwd-group xml:lang="en">
<kwd>gene expression</kwd>
</kwd-group>
<kwd-group xml:lang="pt">
<kwd>expressao do gene</kwd>
</kwd-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_many_occurencies_without_lang(self):
sample = u"""<article>
<front>
<article-meta>
<kwd-group>
<kwd>gene expression</kwd>
</kwd-group>
<kwd-group>
<kwd>expressao do gene</kwd>
</kwd-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class AffContentTypeTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/contrib-group
- article/front/article-meta
"""
sch_phase = 'phase.aff_contenttypes'
def test_original_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_original_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution>
Grupo de ...
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_many_original(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="original">
Galera de ...
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_original_is_present_and_absent(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
</aff>
<aff>
<institution>
Grupo de ...
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_original_is_present_and_present(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
</aff>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_orgdiv1(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="orgdiv1">
Instituto de Matematica e Estatistica
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_orgdiv2(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="orgdiv2">
Instituto de Matematica e Estatistica
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_orgdiv3_is_not_allowed_anymore(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="orgdiv3">
Instituto de Matematica e Estatistica
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_allowed_normalized(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="normalized">
Instituto de Matematica e Estatistica
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_orgdiv4(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="orgdiv4">
Instituto de Matematica e Estatistica
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_orgname_inside_contrib_group(self):
sample = u"""<article>
<front>
<article-meta>
<contrib-group>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="orgname">
Instituto de Matematica e Estatistica
</institution>
</aff>
</contrib-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class CountsTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/counts elements.
"""
sch_phase = 'phase.counts'
def test_absent(self):
sample = u"""<article>
<front>
<article-meta>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_table_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_ref_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<table-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_fig_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_equation_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_page_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
</counts>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_tables(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<table-count count="1"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
<body>
<sec>
<p>
<table-wrap>
<table frame="hsides" rules="groups">
<colgroup width="25%"><col/><col/><col/><col/></colgroup>
<thead>
<tr>
<th style="font-weight:normal" align="left">Modelo</th>
<th style="font-weight:normal">Estrutura</th>
<th style="font-weight:normal">Processos</th>
<th style="font-weight:normal">Resultados</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top">SIPA<sup>1,2</sup></td>
<td valign="top">Urgência e hospitalar.</td>
<td valign="top">Realiza triagem para fragilidade.</td>
<td valign="top">Maior gasto comunitário, menor gasto.</td>
</tr>
</tbody>
</table>
</table-wrap>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_tables_as_graphic(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="1"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
<body>
<sec>
<p>
<table-wrap id="t01">
<graphic mimetype="image"
xlink:href="1414-431X-bjmbr-1414-431X20142875-gt001">
</graphic>
</table-wrap>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_ref(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="1"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
<back>
<ref-list>
<title>REFERÊNCIAS</title>
<ref id="B1">
<label>1</label>
<mixed-citation>
Béland F, Bergman H, Lebel P, Clarfield AM, Tousignant P, ...
</mixed-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_fig(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="1"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
<body>
<sec>
<p>
<fig id="f01">
<label>Figura 1</label>
<caption>
<title>Modelo das cinco etapas da pesquisa translacional.</title>
</caption>
<graphic xlink:href="0034-8910-rsp-48-2-0347-gf01"/>
</fig>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_equation(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="1"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
<body>
<sec>
<disp-formula>
<tex-math id="M1">
</tex-math>
</disp-formula>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_page(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="11"/>
</counts>
<fpage>140</fpage>
<lpage>150</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_page_wrong_count(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="50"/>
</counts>
<fpage>140</fpage>
<lpage>150</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_non_digit_pages(self):
"""Non-digit page interval cannot be checked automatically.
"""
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="11"/>
</counts>
<fpage>A140</fpage>
<lpage>A150</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_elocationid_pages(self):
"""Electronic pagination cannot be checked automatically.
"""
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="11"/>
</counts>
<elocation-id>A140</elocation-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class AuthorNotesTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/author-notes elements.
"""
sch_phase = 'phase.fn-group'
def test_allowed_fn_types(self):
for fn_type in ['author', 'con', 'conflict', 'corresp', 'current-aff',
'deceased', 'edited-by', 'equal', 'on-leave', 'participating-researchers',
'present-address', 'previously-at', 'study-group-members', 'other']:
sample = u"""<article>
<front>
<article-meta>
<author-notes>
<fn fn-type="%s">
<p>foobar</p>
</fn>
</author-notes>
</article-meta>
</front>
</article>
""" % fn_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_fn_types(self):
sample = u"""<article>
<front>
<article-meta>
<author-notes>
<fn fn-type="wtf">
<p>foobar</p>
</fn>
</author-notes>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class PubDateTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/pub-date elements.
"""
sch_phase = 'phase.pub-date'
def test_date_type_absent(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date publication-format="electronic">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_publication_format_absent(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_date_type_allowed_values_day_month_year(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub" publication-format="electronic">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_date_type_allowed_values_day_month(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub" publication-format="electronic">
<day>17</day>
<month>03</month>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_date_type_allowed_values_day_year(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub" publication-format="electronic">
<day>17</day>
<year>2014</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_date_type_allowed_values_month_year(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub" publication-format="electronic">
<month>03</month>
<year>2014</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_date_type_allowed_values_year(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub" publication-format="electronic">
<year>2014</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_pub_type_disallowed_value(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="unknown" publication-format="electronic">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_publication_format_allowed_values(self):
for value in ['electronic']:
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub" publication-format="%s">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
</article-meta>
</front>
</article>
""" % value
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_publication_format_disallowed_value(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub" publication-format="unknown">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_date_type_collection_without_pub_is_not_allowed(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="collection" publication-format="electronic">
<month>03</month>
<year>2014</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_date_type_collection_containing_month_year(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub" publication-format="electronic">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
<pub-date date-type="collection" publication-format="electronic">
<month>03</month>
<year>2015</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_date_type_collection_containing_day_month_year(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub" publication-format="electronic">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
<pub-date date-type="collection" publication-format="electronic">
<day>12</day>
<month>03</month>
<year>2015</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_date_type_collection_containing_day_year(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub" publication-format="electronic">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
<pub-date date-type="collection" publication-format="electronic">
<day>12</day>
<year>2015</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_date_type_collection_containing_year(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub" publication-format="electronic">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
<pub-date date-type="collection" publication-format="electronic">
<year>2015</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_date_type_collection_containing_season(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub" publication-format="electronic">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
<pub-date date-type="collection" publication-format="electronic">
<season>Jan-Feb</season>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_date_type_collection_containing_season_year(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date date-type="pub" publication-format="electronic">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
<pub-date date-type="collection" publication-format="electronic">
<season>Jan-Feb</season>
<year>2019</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class VolumeTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/volume
- article/back/ref-list/ref/element-citation/volume
"""
sch_phase = 'phase.volume'
def test_absent_in_front(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_present_but_empty_in_front(self):
sample = u"""<article>
<front>
<article-meta>
<volume></volume>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_present_in_front(self):
sample = u"""<article>
<front>
<article-meta>
<volume>10</volume>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class IssueTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/issue
- article/back/ref-list/ref/element-citation/issue
"""
sch_phase = 'phase.issue'
def test_absent_in_front(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_present_but_empty_in_front(self):
sample = u"""<article>
<front>
<article-meta>
<issue></issue>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_present_in_front(self):
sample = u"""<article>
<front>
<article-meta>
<issue>10</issue>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_special_number_support(self):
sample = u"""<article>
<front>
<article-meta>
<issue>spe</issue>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_is_present_in_elementcitation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<issue>10</issue>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_is_present_twice_in_elementcitation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<issue>02</issue>
<issue>02</issue>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_is_absent_in_elementcitation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class SupplementTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/supplement
"""
sch_phase = 'phase.supplement'
def test_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_present(self):
sample = u"""<article>
<front>
<article-meta>
<supplement>Suppl 2</supplement>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ElocationIdTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/elocation-id
- article/back/ref-list/ref/element-citation/elocation-id
"""
sch_phase = 'phase.elocation-id'
def test_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_fpage(self):
sample = u"""<article>
<front>
<article-meta>
<elocation-id>E27</elocation-id>
<fpage>12</fpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_without_fpage(self):
sample = u"""<article>
<front>
<article-meta>
<elocation-id>E27</elocation-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_absent_back(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_fpage_back(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<elocation-id>E27</elocation-id>
<fpage>12</fpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_without_fpage_back(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<elocation-id>E27</elocation-id>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_and_without_fpage_back(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<elocation-id>E27</elocation-id>
<fpage>12</fpage>
</element-citation>
</ref>
<ref>
<element-citation>
<elocation-id>E27</elocation-id>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class HistoryTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/history
"""
sch_phase = 'phase.history'
def test_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_date_type_allowed_values(self):
for pub_type in ['accepted', 'corrected', 'pub', 'preprint',
'retracted', 'received', 'rev-recd', 'rev-request']:
sample = u"""<article>
<front>
<article-meta>
<history>
<date date-type="%s">
<day>17</day>
<month>03</month>
<year>2014</year>
</date>
</history>
</article-meta>
</front>
</article>
""" % pub_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_date_type_disallowed_values(self):
sample = u"""<article>
<front>
<article-meta>
<history>
<date date-type="invalid">
<day>17</day>
<month>03</month>
<year>2014</year>
</date>
</history>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_date_type_allowed_values_multi(self):
sample = u"""<article>
<front>
<article-meta>
<history>
<date date-type="received">
<day>17</day>
<month>03</month>
<year>2014</year>
</date>
<date date-type="accepted">
<day>17</day>
<month>03</month>
<year>2014</year>
</date>
</history>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class ProductTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/product
"""
sch_phase = 'phase.product'
def test_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_absent_allowed_types(self):
for art_type in ['book-review', 'product-review']:
sample = u"""<article article-type="%s">
<front>
<article-meta>
</article-meta>
</front>
</article>
""" % art_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_types(self):
for art_type in ['book-review']:
sample = u"""<article article-type="%s">
<front>
<article-meta>
<product product-type="book">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
""" % art_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_types(self):
sample = u"""<article article-type="research-article">
<front>
<article-meta>
<product product-type="book">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_no_type(self):
sample = u"""<article>
<front>
<article-meta>
<product product-type="book">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_product_type(self):
sample = u"""<article article-type="book-review">
<front>
<article-meta>
<product>
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_allowed_product_types(self):
for prod_type in ['book', 'article', 'issue', 'website', 'film',
'software', 'hardware', 'other']:
sample = u"""<article article-type="book-review">
<front>
<article-meta>
<product product-type="%s">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
""" % prod_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_product_types(self):
sample = u"""<article article-type="book-review">
<front>
<article-meta>
<product product-type="invalid">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_formatting_and_punctuation(self):
sample = u"""<article article-type="book-review">
<front>
<article-meta>
<product product-type="book">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>,
<given-names>Prenomes do autor</given-names>;
</name>
</person-group>
<source>Título do livro</source>,
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class SecTitleTests(PhaseBasedTestCase):
"""Tests for:
- article/body/sec/title
"""
sch_phase = 'phase.sectitle'
def test_absent(self):
sample = u"""<article>
<body>
<sec>
<p>Foo bar</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_has_title(self):
sample = u"""<article>
<body>
<sec>
<title>Introduction</title>
<p>Foo bar</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_has_empty_title(self):
sample = u"""<article>
<body>
<sec>
<title></title>
<p>Foo bar</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ParagraphTests(PhaseBasedTestCase):
"""Tests for //p
"""
sch_phase = 'phase.paragraph'
def test_sec_without_id(self):
sample = u"""<article>
<body>
<sec>
<title>Intro</title>
<p>Foo bar</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_sec_with_id(self):
sample = u"""<article>
<body>
<sec>
<title>Intro</title>
<p id="p01">Foo bar</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_body_without_id(self):
sample = u"""<article>
<body>
<p>Foo bar</p>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_body_with_id(self):
sample = u"""<article>
<body>
<p id="p01">Foo bar</p>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class XrefRidTests(PhaseBasedTestCase):
"""Tests for //xref[@rid]
"""
sch_phase = 'phase.rid_integrity'
def test_mismatching_rid(self):
sample = u"""<article>
<front>
<article-meta>
<contrib-group>
<contrib>
<xref ref-type="aff" rid="aff1">
<sup>I</sup>
</xref>
</contrib>
</contrib-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_matching_rid(self):
sample = u"""<article>
<front>
<article-meta>
<contrib-group>
<contrib>
<xref ref-type="aff" rid="aff1">
<sup>I</sup>
</xref>
</contrib>
</contrib-group>
<aff id="aff1">
<label>I</label>
<institution content-type="orgname">
Secretaria Municipal de Saude de Belo Horizonte
</institution>
<addr-line>
<named-content content-type="city">Belo Horizonte</named-content>
<named-content content-type="state">MG</named-content>
</addr-line>
<country>Brasil</country>
<institution content-type="original">
Secretaria Municipal de Saude de Belo Horizonte. Belo Horizonte, MG, Brasil
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_mismatching_reftype(self):
sample = u"""<article>
<body>
<sec>
<table-wrap id="t01">
</table-wrap>
</sec>
<sec>
<p>
<xref ref-type="aff" rid="t01">table 1</xref>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class XrefRefTypeTests(PhaseBasedTestCase):
"""Tests for //xref[@ref-type]
"""
sch_phase = 'phase.xref_reftype_integrity'
def test_allowed_ref_types(self):
for reftype in ['aff', 'app', 'author-notes', 'bibr', 'contrib',
'corresp', 'disp-formula', 'fig', 'fn', 'sec',
'supplementary-material', 'table', 'table-fn',
'boxed-text']:
sample = u"""<article>
<body>
<sec>
<p>
<xref ref-type="%s">foo</xref>
</p>
</sec>
</body>
</article>
""" % reftype
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_ref_types(self):
for reftype in ['chem', 'kwd', 'list', 'other', 'plate'
'scheme', 'statement']:
sample = u"""<article>
<body>
<sec>
<p>
<xref ref-type="%s">foo</xref>
</p>
</sec>
</body>
</article>
""" % reftype
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class CaptionTests(PhaseBasedTestCase):
"""Tests for //caption
"""
sch_phase = 'phase.caption'
def test_with_title(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<fig id="f03">
<label>Figura 3</label>
<caption>
<title>
Percentual de atividade mitocondrial.
</title>
</caption>
<graphic xlink:href="1234-5678-rctb-45-05-0110-gf01.tif"/>
</fig>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_without_title(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<fig id="f03">
<label>Figura 3</label>
<caption>
<label>
Percentual de atividade mitocondrial.
</label>
</caption>
<graphic xlink:href="1234-5678-rctb-45-05-0110-gf01.tif"/>
</fig>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_title_and_more(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<fig id="f03">
<label>Figura 3</label>
<caption>
<title>
Percentual de atividade mitocondrial.
</title>
<label>
Percentual de atividade mitocondrial.
</label>
</caption>
<graphic xlink:href="1234-5678-rctb-45-05-0110-gf01.tif"/>
</fig>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class LicenseTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/permissions/license element.
"""
sch_phase = 'phase.license'
def test_missing_permissions_elem(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_license(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
</permissions>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_allowed_license_type(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="http://creativecommons.org/licenses/by/4.0/"
xml:lang="en">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_license_type(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="closed-access"
xlink:href="http://creativecommons.org/licenses/by/4.0/"
xml:lang="en">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_allowed_license_href(self):
allowed_licenses = [
'http://creativecommons.org/licenses/by-nc/4.0/',
'http://creativecommons.org/licenses/by-nc/3.0/',
'http://creativecommons.org/licenses/by/4.0/',
'http://creativecommons.org/licenses/by/3.0/',
'http://creativecommons.org/licenses/by-nc-nd/4.0/',
'http://creativecommons.org/licenses/by-nc-nd/3.0/',
'http://creativecommons.org/licenses/by/3.0/igo/',
'http://creativecommons.org/licenses/by-nc/3.0/igo/',
'http://creativecommons.org/licenses/by-nc-nd/3.0/igo/',
]
for license in allowed_licenses:
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="%s"
xml:lang="en">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
""" % license
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_license_href_https_scheme(self):
allowed_licenses = [
'https://creativecommons.org/licenses/by-nc/4.0/',
'https://creativecommons.org/licenses/by-nc/3.0/',
'https://creativecommons.org/licenses/by/4.0/',
'https://creativecommons.org/licenses/by/3.0/',
'https://creativecommons.org/licenses/by-nc-nd/4.0/',
'https://creativecommons.org/licenses/by-nc-nd/3.0/',
'https://creativecommons.org/licenses/by/3.0/igo/',
'https://creativecommons.org/licenses/by-nc/3.0/igo/',
'https://creativecommons.org/licenses/by-nc-nd/3.0/igo/',
]
for license in allowed_licenses:
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="%s"
xml:lang="en">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
""" % license
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_license_href(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="http://opensource.org/licenses/MIT"
xml:lang="en">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_trailing_slash(self):
allowed_licenses = [
'https://creativecommons.org/licenses/by-nc/4.0',
]
for license in allowed_licenses:
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="%s"
xml:lang="en">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
""" % license
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_permissions_within_elements_of_the_body(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="http://creativecommons.org/licenses/by/4.0/"
xml:lang="en">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
<body>
<sec>
<p>
<fig id="f01">
<label>Fig. 1</label>
<caption>
<title>título da imagem</title>
</caption>
<graphic xlink:href="1234-5678-rctb-45-05-0110-gf01.tif"/>
<permissions>
<copyright-statement>Copyright © 2014 SciELO</copyright-statement>
<copyright-year>2014</copyright-year>
<copyright-holder>SciELO</copyright-holder>
<license license-type="open-access"
xlink:href="http://creativecommons.org/licenses/by-nc-sa/4.0/"
xml:lang="en">
<license-p>This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.</license-p>
</license>
</permissions>
</fig>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_copyrighted_elements_within_the_body(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="http://creativecommons.org/licenses/by/4.0/"
xml:lang="en">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
<body>
<sec>
<p>
<fig id="f01">
<label>Fig. 1</label>
<caption>
<title>título da imagem</title>
</caption>
<graphic xlink:href="1234-5678-rctb-45-05-0110-gf01.tif"/>
<permissions>
<copyright-statement>Copyright © 2014 SciELO</copyright-statement>
<copyright-year>2014</copyright-year>
<copyright-holder>SciELO</copyright-holder>
</permissions>
</fig>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_irrestrict_use_licenses_within_elements_in_the_body(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="http://creativecommons.org/licenses/by/4.0/"
xml:lang="en">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
<body>
<sec>
<p>
<fig id="f01">
<label>Fig. 1</label>
<caption>
<title>título da imagem</title>
</caption>
<graphic xlink:href="1234-5678-rctb-45-05-0110-gf01.tif"/>
<permissions>
<copyright-statement>Copyright © 2014 SciELO</copyright-statement>
<copyright-year>2014</copyright-year>
<copyright-holder>SciELO</copyright-holder>
<license license-type="open-access"
xlink:href="http://creativecommons.org/licenses/by/2.0/"
xml:lang="en">
<license-p>This is an open-access article distributed under the terms of...</license-p>
</license>
</permissions>
</fig>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_main_article_copyright_info(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<copyright-statement>Copyright © 2014 SciELO</copyright-statement>
<copyright-year>2014</copyright-year>
<copyright-holder>SciELO</copyright-holder>
<license license-type="open-access"
xlink:href="http://creativecommons.org/licenses/by/4.0/"
xml:lang="en">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_lang_mismatch(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="en">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="http://creativecommons.org/licenses/by/4.0/"
xml:lang="pt">
<license-p>
Texto em pt-br...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_lang_mismatch_is_ignored_if_lang_is_en(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="pt">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="http://creativecommons.org/licenses/by/4.0/"
xml:lang="en">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_missing_lang_attribute(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink" xml:lang="pt">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="http://creativecommons.org/licenses/by/4.0/">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class AckTests(PhaseBasedTestCase):
"""Tests for article/back/ack element.
"""
sch_phase = 'phase.ack'
def test_with_sec(self):
sample = u"""<article>
<back>
<ack>
<sec>
<p>Some</p>
</sec>
</ack>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_without_sec(self):
sample = u"""<article>
<back>
<ack>
<title>Acknowledgment</title>
<p>Some text</p>
</ack>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class ElementCitationTests(PhaseBasedTestCase):
"""Tests for article/back/ref-list/ref/element-citation element.
"""
sch_phase = 'phase.element-citation'
def test_with_name_outside_persongroup(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<name>Foo</name>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_name_inside_persongroup(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<person-group>
<name>Foo</name>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_etal_outside_persongroup(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<etal>Foo</etal>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_etal_inside_persongroup(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<person-group>
<etal>Foo</etal>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_collab_outside_persongroup(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<collab>Foo</collab>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_collab_inside_persongroup(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<person-group>
<collab>Foo</collab>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_publication_types(self):
for pub_type in ['journal', 'book', 'webpage', 'thesis', 'confproc',
'patent', 'software', 'database', 'legal-doc', 'newspaper',
'other', 'report', 'data']:
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="%s">
</element-citation>
</ref>
</ref-list>
</back>
</article>
""" % pub_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_publication_types(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="invalid">
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_outside_ref(self):
sample = u"""<article>
<body>
<sec>
<p>
<element-citation publication-type="journal">
<person-group>
<collab>Foo</collab>
</person-group>
</element-citation>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ChapterTitleTests(PhaseBasedTestCase):
"""Tests for
- article/back/ref-list/ref/element-citation/chapter-title
- article/front/article-meta/product/chapter-title
"""
sch_phase = 'phase.chapter-title'
def test_absent_in_element_citation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_present_in_element_citation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<chapter-title>Título do capítulo</chapter-title>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_present_twice_in_element_citation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<chapter-title>Título do capítulo</chapter-title>
<chapter-title> Outro título do capítulo</chapter-title>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_absent_in_product(self):
sample = u"""<article article-type="book-review">
<front>
<article-meta>
<product product-type="chapter">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_present_in_product(self):
sample = u"""<article article-type="book-review">
<front>
<article-meta>
<product product-type="chapter">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<chapter-title>Título do capítulo do livro</chapter-title>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_present_twice_in_product(self):
sample = u"""<article article-type="book-review">
<front>
<article-meta>
<product product-type="chapter">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<chapter-title>Título do capítulo do livro</chapter-title>
<chapter-title>Outro título do capítulo do livro</chapter-title>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class PersonGroupTests(PhaseBasedTestCase):
"""Tests for
- article/back/ref-list/ref/element-citation/person-group
- article/front/article-meta/product/person-group
"""
sch_phase = 'phase.person-group'
def test_missing_type(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<person-group>
<name>Foo</name>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_type_at_product(self):
sample = u"""<article>
<front>
<article-meta>
<product>
<person-group>
<name>Foo</name>
</person-group>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_type(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<person-group person-group-type="author">
<name>Foo</name>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_types(self):
for group_type in ['author', 'compiler', 'editor', 'translator']:
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<person-group person-group-type="%s">
<name>Foo</name>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
""" % group_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_type(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<person-group person-group-type="invalid">
<name>Foo</name>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_loose_text_below_element_citation_node(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<person-group person-group-type="author">HERE
<collab>Foo</collab>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_loose_text_below_product_node(self):
sample = u"""<article>
<front>
<article-meta>
<product>
<person-group person-group-type="author">HERE
<collab>Foo</collab>
</person-group>
</product>
</article-meta>
</front>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<collab>Foo</collab>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class FNGroupTests(PhaseBasedTestCase):
"""Tests for article/back/fn-group/fn element.
"""
sch_phase = 'phase.fn-group'
def test_allowed_fn_types(self):
for fn_type in ['abbr', 'com', 'financial-disclosure', 'supported-by',
'presented-at', 'supplementary-material', 'other']:
sample = u"""<article>
<back>
<fn-group>
<fn fn-type="%s">
<p>foobar</p>
</fn>
</fn-group>
</back>
</article>
""" % fn_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_fn_types(self):
sample = u"""<article>
<back>
<fn-group>
<fn fn-type="invalid">
<p>foobar</p>
</fn>
</fn-group>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_group_title(self):
sample = u"""<article>
<back>
<fn-group>
<title>Notes</title>
<fn fn-type="other">
<p>foobar</p>
</fn>
</fn-group>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_many_group_titles_are_not_allowed(self):
sample = u"""<article>
<back>
<fn-group>
<title>Notes</title>
<title>Notes again</title>
<fn fn-type="other">
<p>foobar</p>
</fn>
</fn-group>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class XHTMLTableTests(PhaseBasedTestCase):
"""Tests for //table elements.
"""
sch_phase = 'phase.xhtml-table'
def test_valid_toplevel(self):
for elem in ['caption', 'summary', 'col', 'colgroup', 'thead', 'tfoot', 'tbody']:
sample = u"""<article>
<body>
<sec>
<p>
<table>
<%s></%s>
</table>
</p>
</sec>
</body>
</article>
""" % (elem, elem)
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_invalid_toplevel(self):
for elem in ['tr']:
sample = u"""<article>
<body>
<sec>
<p>
<table>
<%s></%s>
</table>
</p>
</sec>
</body>
</article>
""" % (elem, elem)
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_tbody_upon_th(self):
sample = u"""<article>
<body>
<sec>
<p>
<table>
<tbody>
<tr>
<th>Foo</th>
</tr>
</tbody>
</table>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_thead_upon_th(self):
sample = u"""<article>
<body>
<sec>
<p>
<table>
<thead>
<tr>
<th>Foo</th>
</tr>
</thead>
</table>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_thead_upon_td(self):
sample = u"""<article>
<body>
<sec>
<p>
<table>
<thead>
<tr>
<td>Foo</td>
</tr>
</thead>
</table>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class SupplementaryMaterialMimetypeTests(PhaseBasedTestCase):
"""Tests for article//supplementary-material elements.
"""
sch_phase = 'phase.supplementary-material'
def test_case1(self):
"""mimetype is True
mime-subtype is True
mimetype ^ mime-subtype is True
"""
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<supplementary-material id="S1"
xlink:title="local_file"
xlink:href="1471-2105-1-1-s1.pdf"
mimetype="application"
mime-subtype="pdf">
<label>Additional material</label>
<caption>
<p>Supplementary PDF file supplied by authors.</p>
</caption>
</supplementary-material>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case2(self):
"""mimetype is True
mime-subtype is False
mimetype ^ mime-subtype is False
"""
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<supplementary-material id="S1"
xlink:title="local_file"
xlink:href="1471-2105-1-1-s1.pdf"
mimetype="application">
<label>Additional material</label>
<caption>
<p>Supplementary PDF file supplied by authors.</p>
</caption>
</supplementary-material>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_case3(self):
"""mimetype is False
mime-subtype is True
mimetype ^ mime-subtype is False
"""
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<supplementary-material id="S1"
xlink:title="local_file"
xlink:href="1471-2105-1-1-s1.pdf"
mime-subtype="pdf">
<label>Additional material</label>
<caption>
<p>Supplementary PDF file supplied by authors.</p>
</caption>
</supplementary-material>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_case4(self):
"""mimetype is False
mime-subtype is False
mimetype ^ mime-subtype is False
"""
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<supplementary-material id="S1"
xlink:title="local_file"
xlink:href="1471-2105-1-1-s1.pdf">
<label>Additional material</label>
<caption>
<p>Supplementary PDF file supplied by authors.</p>
</caption>
</supplementary-material>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class AuthorNotesFNTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/author-notes/fn element.
"""
sch_phase = 'phase.fn-group'
def test_allowed_fn_types(self):
for fn_type in ['author', 'con', 'conflict', 'corresp', 'current-aff',
'deceased', 'edited-by', 'equal', 'on-leave',
'participating-researchers', 'present-address',
'previously-at', 'study-group-members', 'other',
'presented-at', 'presented-by']:
sample = u"""<article>
<front>
<article-meta>
<author-notes>
<fn fn-type="%s">
<p>foobar</p>
</fn>
</author-notes>
</article-meta>
</front>
</article>
""" % fn_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_fn_types(self):
sample = u"""<article>
<front>
<article-meta>
<author-notes>
<fn fn-type="invalid">
<p>foobar</p>
</fn>
</author-notes>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ArticleAttributesTests(PhaseBasedTestCase):
"""Tests for article element.
"""
sch_phase = 'phase.article-attrs'
def test_allowed_article_types(self):
for art_type in ['addendum', 'research-article', 'review-article',
'letter', 'article-commentary', 'brief-report', 'rapid-communication',
'oration', 'discussion', 'editorial', 'interview', 'correction',
'guidelines', 'other', 'obituary', 'case-report', 'book-review',
'reply', 'retraction', 'partial-retraction', 'clinical-trial',
'announcement', 'calendar', 'in-brief', 'book-received', 'news',
'reprint', 'meeting-report', 'abstract', 'product-review',
'dissertation', 'translation', 'data-article']:
sample = u"""<article article-type="%s" xml:lang="en" dtd-version="1.0" specific-use="sps-1.9">
</article>
""" % art_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_article_type(self):
sample = u"""<article article-type="invalid" dtd-version="1.0" specific-use="sps-1.9">
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_article_type(self):
sample = u"""<article xml:lang="en" dtd-version="1.0" specific-use="sps-1.9">
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_xmllang(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" specific-use="sps-1.9">
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_dtdversion(self):
sample = u"""<article article-type="research-article" xml:lang="en" specific-use="sps-1.9">
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_sps_version(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" xml:lang="en">
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_invalid_sps_version(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" xml:lang="en" specific-use="sps-1.0">
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class NamedContentTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/aff/addr-line/named-content elements.
"""
sch_phase = 'phase.named-content_attrs'
def test_missing_contenttype(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<addr-line>
<named-content>Foo</named-content>
</addr-line>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_allowed_contenttype(self):
for ctype in ['city', 'state']:
sample = u"""<article>
<front>
<article-meta>
<aff>
<addr-line>
<named-content content-type="%s">Foo</named-content>
</addr-line>
</aff>
</article-meta>
</front>
</article>
""" % ctype
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_contenttype(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<addr-line>
<named-content content-type="invalid">Foo</named-content>
</addr-line>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class MonthTests(PhaseBasedTestCase):
"""Tests for //month elements.
"""
sch_phase = 'phase.month'
def test_range_1_12(self):
for month in range(1, 13):
sample = u"""<article>
<front>
<article-meta>
<pub-date>
<month>%s</month>
</pub-date>
</article-meta>
</front>
</article>
""" % month
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_range_01_12(self):
for month in range(1, 13):
sample = u"""<article>
<front>
<article-meta>
<pub-date>
<month>%02d</month>
</pub-date>
</article-meta>
</front>
</article>
""" % month
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_out_of_range(self):
for month in [0, 13]:
sample = u"""<article>
<front>
<article-meta>
<pub-date>
<month>%s</month>
</pub-date>
</article-meta>
</front>
</article>
""" % month
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_must_be_integer(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date>
<month>January</month>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_is_present_in_elementcitation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<month>02</month>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_is_present_twice_in_elementcitation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<month>02</month>
<month>02</month>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_is_absent_in_elementcitation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class SizeTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/product/size
- article/back/ref-list/ref/element-citation/size
"""
sch_phase = 'phase.size'
def test_in_element_citation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<size units="pages">2</size>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_in_product(self):
sample = u"""<article>
<front>
<article-meta>
<product>
<size units="pages">2</size>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_missing_in_element_citation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_missing_in_product(self):
sample = u"""<article>
<front>
<article-meta>
<product>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_twice_in_element_citation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<size units="pages">2</size>
<size units="pages">2</size>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_twice_in_product(self):
sample = u"""<article>
<front>
<article-meta>
<product>
<size units="pages">2</size>
<size units="pages">2</size>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_units_in_product(self):
sample = u"""<article>
<front>
<article-meta>
<product>
<size>2</size>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_units_in_element_citation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<size>2</size>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_invalid_units_value(self):
sample = u"""<article>
<front>
<article-meta>
<product>
<size units="invalid">2</size>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ListTests(PhaseBasedTestCase):
"""Tests for list elements.
"""
sch_phase = 'phase.list'
def test_allowed_list_type(self):
for list_type in ['order', 'bullet', 'alpha-lower', 'alpha-upper',
'roman-lower', 'roman-upper', 'simple']:
sample = u"""<article>
<body>
<sec>
<p>
<list list-type="%s">
<title>Lista Númerica</title>
<list-item>
<p>Nullam gravida tellus eget condimentum egestas.</p>
</list-item>
<list-item>
<list list-type="%s">
<list-item>
<p>Curabitur luctus lorem ac feugiat pretium.</p>
</list-item>
</list>
</list-item>
<list-item>
<p>Donec pulvinar odio ut enim lobortis, eu dignissim elit accumsan.</p>
</list-item>
</list>
</p>
</sec>
</body>
</article>
""" % (list_type, list_type)
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_list_type(self):
sample = u"""<article>
<body>
<sec>
<p>
<list list-type="invalid">
<title>Lista Númerica</title>
<list-item>
<p>Nullam gravida tellus eget condimentum egestas.</p>
</list-item>
<list-item>
<list list-type="invalid">
<list-item>
<p>Curabitur luctus lorem ac feugiat pretium.</p>
</list-item>
</list>
</list-item>
<list-item>
<p>Donec pulvinar odio ut enim lobortis, eu dignissim elit accumsan.</p>
</list-item>
</list>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_disallowed_sub_list_type(self):
sample = u"""<article>
<body>
<sec>
<p>
<list list-type="order">
<title>Lista Númerica</title>
<list-item>
<p>Nullam gravida tellus eget condimentum egestas.</p>
</list-item>
<list-item>
<list list-type="invalid">
<list-item>
<p>Curabitur luctus lorem ac feugiat pretium.</p>
</list-item>
</list>
</list-item>
<list-item>
<p>Donec pulvinar odio ut enim lobortis, eu dignissim elit accumsan.</p>
</list-item>
</list>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_list_type(self):
sample = u"""<article>
<body>
<sec>
<p>
<list>
<title>Lista Númerica</title>
<list-item>
<p>Nullam gravida tellus eget condimentum egestas.</p>
</list-item>
<list-item>
<list>
<list-item>
<p>Curabitur luctus lorem ac feugiat pretium.</p>
</list-item>
</list>
</list-item>
<list-item>
<p>Donec pulvinar odio ut enim lobortis, eu dignissim elit accumsan.</p>
</list-item>
</list>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_sub_list_type(self):
sample = u"""<article>
<body>
<sec>
<p>
<list list-type="order">
<title>Lista Númerica</title>
<list-item>
<p>Nullam gravida tellus eget condimentum egestas.</p>
</list-item>
<list-item>
<list>
<list-item>
<p>Curabitur luctus lorem ac feugiat pretium.</p>
</list-item>
</list>
</list-item>
<list-item>
<p>Donec pulvinar odio ut enim lobortis, eu dignissim elit accumsan.</p>
</list-item>
</list>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class MediaTests(PhaseBasedTestCase):
"""Tests for article/body//p/media elements.
"""
sch_phase = 'phase.media_attributes'
def test_missing_mimetype(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<p><media mime-subtype="mp4" xlink:href="1234-5678-rctb-45-05-0110-m01.mp4"/></p>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_mime_subtype(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<p><media mimetype="video" xlink:href="1234-5678-rctb-45-05-0110-m01.mp4"/></p>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_href(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<p><media mimetype="video" mime-subtype="mp4"/></p>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_all_present(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<p><media mimetype="video" mime-subtype="mp4" xlink:href="1234-5678-rctb-45-05-0110-m01.mp4"/></p>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class ExtLinkTests(PhaseBasedTestCase):
"""Tests for ext-link elements.
"""
sch_phase = 'phase.ext-link'
def test_complete(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link ext-link-type="uri" xlink:href="http://www.scielo.org">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_extlinktype(self):
for link_type in ['uri', 'clinical-trial' ]:
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link ext-link-type="%s" xlink:href="http://www.scielo.org">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
""" % link_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_extlinktype(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link ext-link-type="invalid" xlink:href="http://www.scielo.org">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_extlinktype(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link xlink:href="http://www.scielo.org">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_xlinkhref(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link ext-link-type="uri">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_uri_without_scheme(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link ext-link-type="uri" xlink:href="www.scielo.org">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_file_scheme_is_not_allowed(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link ext-link-type="uri" xlink:href="file:///etc/passwd">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_commonly_used_uri_schemes(self):
for uri in ['ftp://ftp.scielo.org', 'http://www.scielo.org', 'urn:foo:bar']:
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link ext-link-type="uri" xlink:href="{uri}">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
""".format(uri=uri)
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class SubArticleAttributesTests(PhaseBasedTestCase):
"""Tests for sub-article element.
"""
sch_phase = 'phase.sub-article-attrs'
def test_allowed_article_types(self):
for art_type in ['abstract', 'letter', 'reply', 'translation']:
sample = u"""<article article-type="research-article" xml:lang="en" dtd-version="1.0" specific-use="sps-1.9">
<sub-article article-type="%s" xml:lang="pt" id="sa1"></sub-article>
</article>
""" % art_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_article_type(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" specific-use="sps-1.9">
<sub-article article-type="invalid" xml:lang="pt" id="trans_pt"></sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_article_type(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" specific-use="sps-1.9">
<sub-article xml:lang="pt" id="trans_pt"></sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_xmllang(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" specific-use="sps-1.9">
<sub-article article-type="translation" id="trans_pt"></sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_id(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" specific-use="sps-1.9">
<sub-article article-type="translation" xml:lang="pt"></sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ResponseAttributesTests(PhaseBasedTestCase):
"""Tests for response element.
"""
sch_phase = 'phase.response-attrs'
def test_allowed_response_types(self):
for type in ['addendum', 'discussion', 'reply']:
sample = u"""<article>
<response response-type="%s" xml:lang="pt" id="r1"></response>
</article>
""" % type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_response_type(self):
sample = u"""<article>
<response response-type="invalid" xml:lang="pt" id="r1"></response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_response_type(self):
sample = u"""<article>
<response xml:lang="pt" id="r1"></response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_xmllang(self):
sample = u"""<article>
<response response-type="invalid" id="r1"></response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_id(self):
sample = u"""<article>
<response response-type="invalid" xml:lang="pt"></response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ResponseReplyAttributeTests(PhaseBasedTestCase):
"""Tests for response[@response-type='reply'] elements.
"""
sch_phase = 'phase.response-reply-type'
def test_reply_type_demands_an_article_type(self):
""" the article-type of value `article-commentary` is required
"""
sample = u"""<article article-type="article-commentary">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
<related-article related-article-type="commentary-article" id="ra1" vol="109" page="87-92"/>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_reply_with_article_types_different_than_article_commentary(self):
""" anything different from `article-commentary` is now valid (03/2018)
"""
sample = u"""<article article-type="research-article">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
<related-article related-article-type="commentary-article" id="ra1" vol="109" page="87-92"/>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_reply_type_missing_related_article(self):
""" the article-type of value `article-commentary` is not required anymore
"""
sample = u"""<article article-type="article-commentary">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_related_article_missing_vol(self):
sample = u"""<article article-type="article-commentary">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
<related-article related-article-type="commentary-article" id="ra1" page="87-92"/>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_related_article_missing_page(self):
sample = u"""<article article-type="article-commentary">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
<related-article related-article-type="commentary-article" id="ra1" vol="109" elocation-id="1q2w"/>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_related_article_missing_elocationid(self):
sample = u"""<article article-type="article-commentary">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
<related-article related-article-type="commentary-article" id="ra1" vol="109" page="87-92"/>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_related_article_missing_page_and_elocationid(self):
sample = u"""<article article-type="article-commentary">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
<related-article related-article-type="commentary-article" id="ra1" vol="109"/>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class RelatedArticleTypesTests(PhaseBasedTestCase):
"""Tests for related-article element.
"""
sch_phase = 'phase.related-article-attrs'
def test_allowed_related_article_types(self):
for type in ['corrected-article', 'commentary-article',
'letter', 'partial-retraction', 'retracted-article']:
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<related-article related-article-type="%s" id="01" ext-link-type="doi" xlink:href="foo"/>
</article-meta>
</front>
</article>
""" % type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_related_article_type(self):
sample = u"""<article>
<front>
<article-meta>
<related-article related-article-type="invalid" id="01"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_id(self):
sample = u"""<article>
<front>
<article-meta>
<related-article related-article-type="corrected-article"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_related_article_type(self):
sample = u"""<article>
<front>
<article-meta>
<related-article id="01"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_allowed_ext_link_types(self):
for type in ['doi']:
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<related-article related-article-type="corrected-article" id="01" ext-link-type="%s" xlink:href="foo"/>
</article-meta>
</front>
</article>
""" % type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_invalid_ext_link_types(self):
for type in ['invalid',]:
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<related-article related-article-type="corrected-article" id="01" ext-link-type="%s" xlink:href="foo"/>
</article-meta>
</front>
</article>
""" % type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_ext_link_type_on_corrected_articles(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<related-article related-article-type="corrected-article" id="01" xlink:href="foo"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_xlinkhref_on_corrected_articles(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<related-article related-article-type="corrected-article" id="01" ext-link-type="corrected-article"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class CorrectionTests(PhaseBasedTestCase):
"""Tests for article[@article-type="correction"] element.
"""
sch_phase = 'phase.correction'
def test_expected_elements(self):
sample = u"""<article article-type="correction">
<front>
<article-meta>
<related-article related-article-type="corrected-article" id="01"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_missing_related_article(self):
""" must have a related-article[@related-article-type='corrected-article']
element.
"""
sample = u"""<article article-type="correction">
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_article_type_must_be_correction(self):
sample = u"""<article article-type="research-article">
<front>
<article-meta>
<related-article related-article-type="corrected-article" id="01"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class InBriefTests(PhaseBasedTestCase):
"""Tests for article[@article-type="in-brief"] element.
"""
sch_phase = 'phase.in-brief'
def test_expected_elements(self):
sample = u"""<article article-type="in-brief">
<front>
<article-meta>
<related-article related-article-type="article-reference" id="01"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_missing_related_article(self):
""" must have a related-article[@related-article-type='in-brief']
element.
"""
sample = u"""<article article-type="in-brief">
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_article_type_must_be_in_brief(self):
sample = u"""<article article-type="research-article">
<front>
<article-meta>
<related-article related-article-type="article-reference" id="01"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class FundingGroupTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/funding-group elements.
"""
sch_phase = 'phase.funding-group'
def test_funding_statement_when_fn_is_present_missing_award_group(self):
sample = u"""<article>
<front>
<article-meta>
<funding-group>
<funding-statement>This study was supported by FAPEST #12345</funding-statement>
</funding-group>
</article-meta>
</front>
<back>
<fn-group>
<fn id="fn01" fn-type="financial-disclosure">
<p>This study was supported by FAPEST #12345</p>
</fn>
</fn-group>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_funding_statement_when_fn_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<funding-group>
<award-group>
<funding-source>FAPEST</funding-source>
<award-id>12345</award-id>
</award-group>
<funding-statement>This study was supported by FAPEST #12345</funding-statement>
</funding-group>
</article-meta>
</front>
<back>
<fn-group>
<fn id="fn01" fn-type="financial-disclosure">
<p>This study was supported by FAPEST #12345</p>
</fn>
</fn-group>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_missing_funding_statement_when_fn_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<funding-group>
<award-group>
<funding-source>FAPEST</funding-source>
<award-id>12345</award-id>
</award-group>
</funding-group>
</article-meta>
</front>
<back>
<fn-group>
<fn id="fn01" fn-type="financial-disclosure">
<p>This study was supported by FAPEST #12345</p>
</fn>
</fn-group>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class AffCountryTests(PhaseBasedTestCase):
""" //aff/country/@country is required.
See: https://github.com/scieloorg/packtools/issues/44
"""
sch_phase = 'phase.aff_country'
def test_country_attribute_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country country="BR">Brasil</country>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_country_attribute_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country>Brasil</country>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_country_attribute_value_is_not_validated(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country country="XZ">Brasil</country>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_country_cannot_be_empty(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country country="XZ"></country>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_country_cannot_be_empty_closed_element(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country country="XZ"/>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class RefTests(PhaseBasedTestCase):
"""Tests for article/back/ref-list/ref element.
"""
sch_phase = 'phase.ref'
def test_element_and_mixed_citation_elements(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation>Aires M, Paz AA, Perosa CT. Situação de saúde e grau de dependência de pessoas idosas institucionalizadas. <italic>Rev Gaucha Enferm.</italic> 2009;30(3):192-9.</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title>Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_element_citation_cannot_be_present_twice(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation>Aires M, Paz AA, Perosa CT. Situação de saúde e grau de dependência de pessoas idosas institucionalizadas. <italic>Rev Gaucha Enferm.</italic> 2009;30(3):192-9.</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title>Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title>Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_element_citation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation>Aires M, Paz AA, Perosa CT. Situação de saúde e grau de dependência de pessoas idosas institucionalizadas. <italic>Rev Gaucha Enferm.</italic> 2009;30(3):192-9.</mixed-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_mixed_citation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title>Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_mixed_citation_cannot_be_empty(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation></mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title>Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ContribIdTests(PhaseBasedTestCase):
"""Tests for contrib-id element.
"""
sch_phase = 'phase.contrib-id'
def test_allowed_contrib_id_type_attrs(self):
for type in ['lattes', 'orcid', 'researchid', 'scopus']:
sample = u"""<article>
<front>
<article-meta>
<contrib-group>
<contrib-id contrib-id-type="%s">some id</contrib-id>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="orgname">
Instituto de Matematica e Estatistica
</institution>
</aff>
</contrib-group>
</article-meta>
</front>
</article>
""" % type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_related_article_type(self):
sample = u"""<article>
<front>
<article-meta>
<contrib-group>
<contrib-id contrib-id-type="invalid">some id</contrib-id>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="orgname">
Instituto de Matematica e Estatistica
</institution>
</aff>
</contrib-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_contrib_id_type(self):
sample = u"""<article>
<front>
<article-meta>
<contrib-group>
<contrib-id>some id</contrib-id>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="orgname">
Instituto de Matematica e Estatistica
</institution>
</aff>
</contrib-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class AffTests(PhaseBasedTestCase):
""" /article//aff is required.
"""
sch_phase = 'phase.aff'
def test_country_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country country="BR">Brasil</country>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_country_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_country_is_absent_in_subarticle(self):
for typ in ['abstract', 'letter', 'reply']:
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country country="BR">Brasil</country>
</aff>
</article-meta>
</front>
<sub-article article-type="{type}"
xml:lang="en"
id="s1">
<front-stub>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Artigos Originais</subject>
</subj-group>
</article-categories>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
</aff>
</front-stub>
</sub-article>
</article>
""".format(type=typ)
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_country_is_absent_in_subarticle_type_translation(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country country="BR">Brasil</country>
</aff>
</article-meta>
</front>
<sub-article article-type="translation"
xml:lang="en"
id="s1">
<front-stub>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Artigos Originais</subject>
</subj-group>
</article-categories>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
</aff>
</front-stub>
</sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class SourceTests(PhaseBasedTestCase):
"""Tests for article//source element.
"""
sch_phase = 'phase.source'
def test_source_is_absent_in_elementcitation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation>Aires M, Paz AA, Perosa CT. Situação de saúde e grau de dependência de pessoas idosas institucionalizadas. <italic>Rev Gaucha Enferm.</italic> 2009;30(3):192-9.</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title>Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_source_is_present_in_elementcitation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation>Aires M, Paz AA, Perosa CT. Situação de saúde e grau de dependência de pessoas idosas institucionalizadas. <italic>Rev Gaucha Enferm.</italic> 2009;30(3):192-9.</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title>Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_source_is_present_more_than_once_in_elementcitation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation>Aires M, Paz AA, Perosa CT. Situação de saúde e grau de dependência de pessoas idosas institucionalizadas. <italic>Rev Gaucha Enferm.</italic> 2009;30(3):192-9.</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title>Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<source>Rev Gaucha Foo</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_source_is_absent_in_product(self):
sample = u"""<article article-type="book-review">
<front>
<article-meta>
<product product-type="book">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_source_is_present_in_product(self):
sample = u"""<article article-type="book-review">
<front>
<article-meta>
<product product-type="book">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_source_is_present_more_than_once_in_product(self):
sample = u"""<article article-type="book-review">
<front>
<article-meta>
<product product-type="book">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
|
kchodorow/tensorflow | refs/heads/master | tensorflow/contrib/factorization/python/__init__.py | 182 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The python module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
valkyriesavage/gasustainability | refs/heads/master | django/contrib/localflavor/pl/forms.py | 273 | """
Polish-specific form helpers
"""
import re
from django.forms import ValidationError
from django.forms.fields import Select, RegexField
from django.utils.translation import ugettext_lazy as _
from django.core.validators import EMPTY_VALUES
class PLProvinceSelect(Select):
"""
A select widget with list of Polish administrative provinces as choices.
"""
def __init__(self, attrs=None):
from pl_voivodeships import VOIVODESHIP_CHOICES
super(PLProvinceSelect, self).__init__(attrs, choices=VOIVODESHIP_CHOICES)
class PLCountySelect(Select):
"""
A select widget with list of Polish administrative units as choices.
"""
def __init__(self, attrs=None):
from pl_administrativeunits import ADMINISTRATIVE_UNIT_CHOICES
super(PLCountySelect, self).__init__(attrs, choices=ADMINISTRATIVE_UNIT_CHOICES)
class PLPESELField(RegexField):
"""
A form field that validates as Polish Identification Number (PESEL).
Checks the following rules:
* the length consist of 11 digits
* has a valid checksum
The algorithm is documented at http://en.wikipedia.org/wiki/PESEL.
"""
default_error_messages = {
'invalid': _(u'National Identification Number consists of 11 digits.'),
'checksum': _(u'Wrong checksum for the National Identification Number.'),
}
def __init__(self, *args, **kwargs):
super(PLPESELField, self).__init__(r'^\d{11}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLPESELField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (1, 3, 7, 9, 1, 3, 7, 9, 1, 3, 1)
result = 0
for i in range(len(number)):
result += int(number[i]) * multiple_table[i]
return result % 10 == 0
class PLNIPField(RegexField):
"""
A form field that validates as Polish Tax Number (NIP).
Valid forms are: XXX-XXX-YY-YY or XX-XX-YYY-YYY.
Checksum algorithm based on documentation at
http://wipos.p.lodz.pl/zylla/ut/nip-rego.html
"""
default_error_messages = {
'invalid': _(u'Enter a tax number field (NIP) in the format XXX-XXX-XX-XX or XX-XX-XXX-XXX.'),
'checksum': _(u'Wrong checksum for the Tax Number (NIP).'),
}
def __init__(self, *args, **kwargs):
super(PLNIPField, self).__init__(r'^\d{3}-\d{3}-\d{2}-\d{2}$|^\d{2}-\d{2}-\d{3}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLNIPField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub("[-]", "", value)
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (6, 5, 7, 2, 3, 4, 5, 6, 7)
result = 0
for i in range(len(number)-1):
result += int(number[i]) * multiple_table[i]
result %= 11
if result == int(number[-1]):
return True
else:
return False
class PLREGONField(RegexField):
"""
A form field that validates its input is a REGON number.
Valid regon number consists of 9 or 14 digits.
See http://www.stat.gov.pl/bip/regon_ENG_HTML.htm for more information.
"""
default_error_messages = {
'invalid': _(u'National Business Register Number (REGON) consists of 9 or 14 digits.'),
'checksum': _(u'Wrong checksum for the National Business Register Number (REGON).'),
}
def __init__(self, *args, **kwargs):
super(PLREGONField, self).__init__(r'^\d{9,14}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLREGONField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
weights = (
(8, 9, 2, 3, 4, 5, 6, 7, -1),
(2, 4, 8, 5, 0, 9, 7, 3, 6, 1, 2, 4, 8, -1),
(8, 9, 2, 3, 4, 5, 6, 7, -1, 0, 0, 0, 0, 0),
)
weights = [table for table in weights if len(table) == len(number)]
for table in weights:
checksum = sum([int(n) * w for n, w in zip(number, table)])
if checksum % 11 % 10:
return False
return bool(weights)
class PLPostalCodeField(RegexField):
"""
A form field that validates as Polish postal code.
Valid code is XX-XXX where X is digit.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XX-XXX.'),
}
def __init__(self, *args, **kwargs):
super(PLPostalCodeField, self).__init__(r'^\d{2}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
|
BFriedland/UserDataBase-Heroku | refs/heads/master | venv/Lib/site-packages/pip/_vendor/html5lib/treewalkers/genshistream.py | 1730 | from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
h4ck3rm1k3/pip | refs/heads/develop | pip/_vendor/__init__.py | 252 | """
pip._vendor is for vendoring dependencies of pip to prevent needing pip to
depend on something external.
Files inside of pip._vendor should be considered immutable and should only be
updated to versions from upstream.
"""
from __future__ import absolute_import
import glob
import os.path
import sys
# Downstream redistributors which have debundled our dependencies should also
# patch this value to be true. This will trigger the additional patching
# to cause things like "six" to be available as pip.
DEBUNDLED = False
# By default, look in this directory for a bunch of .whl files which we will
# add to the beginning of sys.path before attempting to import anything. This
# is done to support downstream re-distributors like Debian and Fedora who
# wish to create their own Wheels for our dependencies to aid in debundling.
WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
# Define a small helper function to alias our vendored modules to the real ones
# if the vendored ones do not exist. This idea of this was taken from
# https://github.com/kennethreitz/requests/pull/2567.
def vendored(modulename):
vendored_name = "{0}.{1}".format(__name__, modulename)
try:
__import__(vendored_name, globals(), locals(), level=0)
except ImportError:
__import__(modulename, globals(), locals(), level=0)
sys.modules[vendored_name] = sys.modules[modulename]
base, head = vendored_name.rsplit(".", 1)
setattr(sys.modules[base], head, sys.modules[modulename])
# If we're operating in a debundled setup, then we want to go ahead and trigger
# the aliasing of our vendored libraries as well as looking for wheels to add
# to our sys.path. This will cause all of this code to be a no-op typically
# however downstream redistributors can enable it in a consistent way across
# all platforms.
if DEBUNDLED:
# Actually look inside of WHEEL_DIR to find .whl files and add them to the
# front of our sys.path.
sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
# Actually alias all of our vendored dependencies.
vendored("cachecontrol")
vendored("colorama")
vendored("distlib")
vendored("html5lib")
vendored("lockfile")
vendored("six")
vendored("six.moves")
vendored("six.moves.urllib")
vendored("packaging")
vendored("packaging.version")
vendored("packaging.specifiers")
vendored("pkg_resources")
vendored("progress")
vendored("retrying")
vendored("requests")
|
jonathanf/portalfacturas-e | refs/heads/master | portalfacturas/__init__.py | 1 | #
# Copyright (C) 2015 Jonathan Finlay <jfinlay@riseup.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import res_partner
import controllers
import ftp_server
import history_log
import mail_message
import ir_attachment
|
rogallic/learnen.ru | refs/heads/master | parsDict/concatJSONs.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import listdir
from os.path import isfile, join
with open('concatedDict-en.json', 'w') as w:
w.write('{\n')
counter = 0
dn = 'tmpThreads'
pref = 'parsWiki-1416645459-en';
for fn in listdir(dn):
if fn[0:len(pref)] == pref:
if counter:
w.write(',')
counter += 1
with open(dn + '/' + fn, 'r') as r:
for ln in r:
if ln != '}' and ln != '{\n':
w.write(ln)
w.write('}') |
alanjw/GreenOpenERP-Win-X86 | refs/heads/7.0 | python/Lib/site-packages/win32/test/test_win32guistruct.py | 4 | import unittest
import win32gui
import win32gui_struct
import win32con
import array
import pythoncom
class TestBase(unittest.TestCase):
def assertDictEquals(self, d, **kw):
checked = dict()
for n, v in kw.iteritems():
self.failUnlessEqual(v, d[n],
"'%s' doesn't match: %r != %r" % (n, v, d[n]))
checked[n] = True
checked_keys = checked.keys()
passed_keys = kw.keys()
checked_keys.sort()
passed_keys.sort()
self.failUnlessEqual(checked_keys, passed_keys)
class TestMenuItemInfo(TestBase):
def _testPackUnpack(self, text):
vals = dict(fType=win32con.MFT_MENUBARBREAK,
fState=win32con.MFS_CHECKED,
wID=123,
hSubMenu=1234,
hbmpChecked=12345,
hbmpUnchecked=123456,
dwItemData=1234567,
text=text,
hbmpItem=321)
mii, extras = win32gui_struct.PackMENUITEMINFO(**vals)
fType, fState, wID, hSubMenu, hbmpChecked, hbmpUnchecked, \
dwItemData, text, hbmpItem = win32gui_struct.UnpackMENUITEMINFO(mii)
self.assertDictEquals(vals, fType=fType, fState=fState, wID=wID,
hSubMenu=hSubMenu, hbmpChecked=hbmpChecked,
hbmpUnchecked=hbmpUnchecked,
dwItemData=dwItemData, text=text,
hbmpItem=hbmpItem)
def testPackUnpack(self):
self._testPackUnpack("Hello")
def testPackUnpackNone(self):
self._testPackUnpack(None)
def testEmptyMenuItemInfo(self):
mii, extra = win32gui_struct.EmptyMENUITEMINFO()
fType, fState, wID, hSubMenu, hbmpChecked, hbmpUnchecked, \
dwItemData, text, hbmpItem = win32gui_struct.UnpackMENUITEMINFO(mii)
self.failUnlessEqual(fType, 0)
self.failUnlessEqual(fState, 0)
self.failUnlessEqual(wID, 0)
self.failUnlessEqual(hSubMenu, 0)
self.failUnlessEqual(hbmpChecked, 0)
self.failUnlessEqual(hbmpUnchecked, 0)
self.failUnlessEqual(dwItemData, 0)
self.failUnlessEqual(hbmpItem, 0)
# it's not clear if UnpackMENUITEMINFO() should ignore cch, instead
# assuming it is a buffer size rather than 'current length' - but it
# never has (and this gives us every \0 in the string), and actually
# helps us test the unicode/str semantics.
self.failUnlessEqual(text, '\0' * len(text))
class TestMenuInfo(TestBase):
def testPackUnpack(self):
vals = dict(dwStyle=1, cyMax=2, hbrBack=3, dwContextHelpID=4,
dwMenuData=5)
mi = win32gui_struct.PackMENUINFO(**vals)
dwStyle, cyMax, hbrBack, dwContextHelpID, dwMenuData = \
win32gui_struct.UnpackMENUINFO(mi)
self.assertDictEquals(vals, dwStyle=dwStyle, cyMax=cyMax,
hbrBack=hbrBack,
dwContextHelpID=dwContextHelpID,
dwMenuData=dwMenuData)
def testEmptyMenuItemInfo(self):
mi = win32gui_struct.EmptyMENUINFO()
dwStyle, cyMax, hbrBack, dwContextHelpID, dwMenuData = \
win32gui_struct.UnpackMENUINFO(mi)
self.failUnlessEqual(dwStyle, 0)
self.failUnlessEqual(cyMax, 0)
self.failUnlessEqual(hbrBack, 0)
self.failUnlessEqual(dwContextHelpID, 0)
self.failUnlessEqual(dwMenuData, 0)
class TestTreeViewItem(TestBase):
def _testPackUnpack(self, text):
vals = dict(hitem=1, state=2, stateMask=3, text=text, image=4,
selimage=5, citems=6, param=7)
ti, extra = win32gui_struct.PackTVITEM(**vals)
hitem, state, stateMask, text, image, selimage, citems, param = \
win32gui_struct.UnpackTVITEM(ti)
self.assertDictEquals(vals, hitem=hitem, state=state,
stateMask=stateMask, text=text, image=image,
selimage=selimage, citems=citems, param=param)
def testPackUnpack(self):
self._testPackUnpack("Hello")
def testPackUnpackNone(self):
self._testPackUnpack(None)
def testEmpty(self):
ti, extras = win32gui_struct.EmptyTVITEM(0)
hitem, state, stateMask, text, image, selimage, citems, param = \
win32gui_struct.UnpackTVITEM(ti)
self.failUnlessEqual(hitem, 0)
self.failUnlessEqual(state, 0)
self.failUnlessEqual(stateMask, 0)
self.failUnlessEqual(text, '')
self.failUnlessEqual(image, 0)
self.failUnlessEqual(selimage, 0)
self.failUnlessEqual(citems, 0)
self.failUnlessEqual(param, 0)
class TestListViewItem(TestBase):
def _testPackUnpack(self, text):
vals = dict(item=None, subItem=None, state=1, stateMask=2,
text=text, image=3, param=4, indent=5)
ti, extra = win32gui_struct.PackLVITEM(**vals)
item, subItem, state, stateMask, text, image, param, indent = \
win32gui_struct.UnpackLVITEM(ti)
# patch expected values.
vals['item'] = 0
vals['subItem'] = 0
self.assertDictEquals(vals, item=item, subItem=subItem, state=state,
stateMask=stateMask, text=text, image=image,
param=param, indent=indent)
def testPackUnpack(self):
self._testPackUnpack("Hello")
def testPackUnpackNone(self):
self._testPackUnpack(None)
def testEmpty(self):
ti, extras = win32gui_struct.EmptyLVITEM(1, 2)
item, subItem, state, stateMask, text, image, param, indent = \
win32gui_struct.UnpackLVITEM(ti)
self.failUnlessEqual(item, 1)
self.failUnlessEqual(subItem, 2)
self.failUnlessEqual(state, 0)
self.failUnlessEqual(stateMask, 0)
self.failUnlessEqual(text, '')
self.failUnlessEqual(image, 0)
self.failUnlessEqual(param, 0)
self.failUnlessEqual(indent, 0)
class TestLVColumn(TestBase):
def _testPackUnpack(self, text):
vals = dict(fmt=1, cx=2, text=text, subItem=3, image=4, order=5)
ti, extra = win32gui_struct.PackLVCOLUMN(**vals)
fmt, cx, text, subItem, image, order = \
win32gui_struct.UnpackLVCOLUMN(ti)
self.assertDictEquals(vals, fmt=fmt, cx=cx, text=text, subItem=subItem,
image=image, order=order)
def testPackUnpack(self):
self._testPackUnpack("Hello")
def testPackUnpackNone(self):
self._testPackUnpack(None)
def testEmpty(self):
ti, extras = win32gui_struct.EmptyLVCOLUMN()
fmt, cx, text, subItem, image, order = \
win32gui_struct.UnpackLVCOLUMN(ti)
self.failUnlessEqual(fmt, 0)
self.failUnlessEqual(cx, 0)
self.failUnlessEqual(text, '')
self.failUnlessEqual(subItem, 0)
self.failUnlessEqual(image, 0)
self.failUnlessEqual(order, 0)
class TestDEV_BROADCAST_HANDLE(TestBase):
def testPackUnpack(self):
s = win32gui_struct.PackDEV_BROADCAST_HANDLE(123)
c = array.array("b", s)
got = win32gui_struct.UnpackDEV_BROADCAST(c.buffer_info()[0])
self.failUnlessEqual(got.handle, 123)
def testGUID(self):
s = win32gui_struct.PackDEV_BROADCAST_HANDLE(123,
guid=pythoncom.IID_IUnknown)
c = array.array("b", s)
got = win32gui_struct.UnpackDEV_BROADCAST(c.buffer_info()[0])
self.failUnlessEqual(got.handle, 123)
self.failUnlessEqual(got.eventguid, pythoncom.IID_IUnknown)
class TestDEV_BROADCAST_DEVICEINTERFACE(TestBase):
def testPackUnpack(self):
s = win32gui_struct.PackDEV_BROADCAST_DEVICEINTERFACE(pythoncom.IID_IUnknown,
"hello")
c = array.array("b", s)
got = win32gui_struct.UnpackDEV_BROADCAST(c.buffer_info()[0])
self.failUnlessEqual(got.classguid, pythoncom.IID_IUnknown)
self.failUnlessEqual(got.name, "hello")
class TestDEV_BROADCAST_VOLUME(TestBase):
def testPackUnpack(self):
s = win32gui_struct.PackDEV_BROADCAST_VOLUME(123, 456)
c = array.array("b", s)
got = win32gui_struct.UnpackDEV_BROADCAST(c.buffer_info()[0])
self.failUnlessEqual(got.unitmask, 123)
self.failUnlessEqual(got.flags, 456)
if __name__=='__main__':
unittest.main()
|
fragaria/suds | refs/heads/master | suds/xsd/__init__.py | 206 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{schema} module provides a intelligent representation of
an XSD schema. The I{raw} model is the XML tree and the I{model}
is the denormalized, objectified and intelligent view of the schema.
Most of the I{value-add} provided by the model is centered around
tranparent referenced type resolution and targeted denormalization.
"""
from logging import getLogger
from suds import *
from suds.sax import Namespace, splitPrefix
log = getLogger(__name__)
def qualify(ref, resolvers, defns=Namespace.default):
"""
Get a reference that is I{qualified} by namespace.
@param ref: A referenced schema type name.
@type ref: str
@param resolvers: A list of objects to be used to resolve types.
@type resolvers: [L{sax.element.Element},]
@param defns: An optional target namespace used to qualify references
when no prefix is specified.
@type defns: A default namespace I{tuple: (prefix,uri)} used when ref not prefixed.
@return: A qualified reference.
@rtype: (name, namespace-uri)
"""
ns = None
p, n = splitPrefix(ref)
if p is not None:
if not isinstance(resolvers, (list, tuple)):
resolvers = (resolvers,)
for r in resolvers:
resolved = r.resolvePrefix(p)
if resolved[1] is not None:
ns = resolved
break
if ns is None:
raise Exception('prefix (%s) not resolved' % p)
else:
ns = defns
return (n, ns[1])
def isqref(object):
"""
Get whether the object is a I{qualified reference}.
@param object: An object to be tested.
@type object: I{any}
@rtype: boolean
@see: L{qualify}
"""
return (\
isinstance(object, tuple) and \
len(object) == 2 and \
isinstance(object[0], basestring) and \
isinstance(object[1], basestring))
class Filter:
def __init__(self, inclusive=False, *items):
self.inclusive = inclusive
self.items = items
def __contains__(self, x):
if self.inclusive:
result = ( x in self.items )
else:
result = ( x not in self.items )
return result
|
eonpatapon/nova | refs/heads/master | nova/api/openstack/compute/schemas/v3/cells.py | 70 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'cell': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'type': {
'type': 'string',
'enum': ['parent', 'child'],
},
# NOTE: In unparse_transport_url(), a url consists of the
# following parameters:
# "qpid://<username>:<password>@<rpc_host>:<rpc_port>/"
# or
# "rabiit://<username>:<password>@<rpc_host>:<rpc_port>/"
# Then the url is stored into transport_url of cells table
# which is defined with String(255).
'username': {
'type': 'string', 'maxLength': 255,
'pattern': '^[a-zA-Z0-9-_]*$'
},
'password': {
# Allow to specify any string for strong password.
'type': 'string', 'maxLength': 255,
},
'rpc_host': parameter_types.hostname_or_ip_address,
'rpc_port': parameter_types.tcp_udp_port,
'rpc_virtual_host': parameter_types.hostname_or_ip_address,
},
'required': ['name'],
'additionalProperties': False,
},
},
'required': ['cell'],
'additionalProperties': False,
}
update = {
'type': 'object',
'properties': {
'cell': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'type': {
'type': 'string',
'enum': ['parent', 'child'],
},
'username': {
'type': 'string', 'maxLength': 255,
'pattern': '^[a-zA-Z0-9-_]*$'
},
'password': {
'type': 'string', 'maxLength': 255,
},
'rpc_host': parameter_types.hostname_or_ip_address,
'rpc_port': parameter_types.tcp_udp_port,
'rpc_virtual_host': parameter_types.hostname_or_ip_address,
},
'additionalProperties': False,
},
},
'required': ['cell'],
'additionalProperties': False,
}
sync_instances = {
'type': 'object',
'properties': {
'project_id': parameter_types.project_id,
'deleted': parameter_types.boolean,
'updated_since': {
'type': 'string',
'format': 'date-time',
},
},
'additionalProperties': False,
}
|
KokareIITP/django | refs/heads/master | tests/null_fk_ordering/models.py | 210 | """
Regression tests for proper working of ForeignKey(null=True). Tests these bugs:
* #7512: including a nullable foreign key reference in Meta ordering has un
xpected results
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# The first two models represent a very simple null FK ordering case.
class Author(models.Model):
name = models.CharField(max_length=150)
@python_2_unicode_compatible
class Article(models.Model):
title = models.CharField(max_length=150)
author = models.ForeignKey(Author, models.SET_NULL, null=True)
def __str__(self):
return 'Article titled: %s' % (self.title, )
class Meta:
ordering = ['author__name', ]
# These following 4 models represent a far more complex ordering case.
class SystemInfo(models.Model):
system_name = models.CharField(max_length=32)
class Forum(models.Model):
system_info = models.ForeignKey(SystemInfo, models.CASCADE)
forum_name = models.CharField(max_length=32)
@python_2_unicode_compatible
class Post(models.Model):
forum = models.ForeignKey(Forum, models.SET_NULL, null=True)
title = models.CharField(max_length=32)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Comment(models.Model):
post = models.ForeignKey(Post, models.SET_NULL, null=True)
comment_text = models.CharField(max_length=250)
class Meta:
ordering = ['post__forum__system_info__system_name', 'comment_text']
def __str__(self):
return self.comment_text
|
androbwebb/JenniferVirtualAssistant | refs/heads/master | quick_run.py | 1 | #!/usr/bin/env python
from server.brain import JenniferBrain
from ioclients.terminal import JenniferTerminalClient
from ioclients.terminal_with_sound import JenniferTerminalWithSoundClient
brain = JenniferBrain(allow_network_plugins=True)
client = JenniferTerminalWithSoundClient(brain)
client.run()
"""
$: dollar
$ -$ --$ A$ C$ HK$ M$ NZ$ S$ U.S.$ US$
'': closing quotation mark
' ''
(: opening parenthesis
( [ {
): closing parenthesis
) ] }
,: comma
,
--: dash
--
.: sentence terminator
. ! ?
:: colon or ellipsis
: ; ...
CC: conjunction, coordinating
& 'n and both but either et for less minus neither nor or plus so
therefore times v. versus vs. whether yet
CD: numeral, cardinal
mid-1890 nine-thirty forty-two one-tenth ten million 0.5 one forty-
seven 1987 twenty '79 zero two 78-degrees eighty-four IX '60s .025
fifteen 271,124 dozen quintillion DM2,000 ...
DT: determiner
all an another any both del each either every half la many much nary
neither no some such that the them these this those
EX: existential there
there
FW: foreign word
gemeinschaft hund ich jeux habeas Haementeria Herr K'ang-si vous
lutihaw alai je jour objets salutaris fille quibusdam pas trop Monte
terram fiche oui corporis ...
IN: preposition or conjunction, subordinating
astride among uppon whether out inside pro despite on by throughout
below within for towards near behind atop around if like until below
next into if beside ...
JJ: adjective or numeral, ordinal
third ill-mannered pre-war regrettable oiled calamitous first separable
ectoplasmic battery-powered participatory fourth still-to-be-named
multilingual multi-disciplinary ...
JJR: adjective, comparative
bleaker braver breezier briefer brighter brisker broader bumper busier
calmer cheaper choosier cleaner clearer closer colder commoner costlier
cozier creamier crunchier cuter ...
JJS: adjective, superlative
calmest cheapest choicest classiest cleanest clearest closest commonest
corniest costliest crassest creepiest crudest cutest darkest deadliest
dearest deepest densest dinkiest ...
LS: list item marker
A A. B B. C C. D E F First G H I J K One SP-44001 SP-44002 SP-44005
SP-44007 Second Third Three Two * a b c d first five four one six three
two
MD: modal auxiliary
can cannot could couldn't dare may might must need ought shall should
shouldn't will would
NN: noun, common, singular or mass
common-carrier cabbage knuckle-duster Casino afghan shed thermostat
investment slide humour falloff slick wind hyena override subhumanity
machinist ...
NNP: noun, proper, singular
Motown Venneboerger Czestochwa Ranzer Conchita Trumplane Christos
Oceanside Escobar Kreisler Sawyer Cougar Yvette Ervin ODI Darryl CTCA
Shannon A.K.C. Meltex Liverpool ...
NNPS: noun, proper, plural
Americans Americas Amharas Amityvilles Amusements Anarcho-Syndicalists
Andalusians Andes Andruses Angels Animals Anthony Antilles Antiques
Apache Apaches Apocrypha ...
NNS: noun, common, plural
undergraduates scotches bric-a-brac products bodyguards facets coasts
divestitures storehouses designs clubs fragrances averages
subjectivists apprehensions muses factory-jobs ...
PDT: pre-determiner
all both half many quite such sure this
POS: genitive marker
' 's
PRP: pronoun, personal
hers herself him himself hisself it itself me myself one oneself ours
ourselves ownself self she thee theirs them themselves they thou thy us
PRP$: pronoun, possessive
her his mine my our ours their thy your
RB: adverb
occasionally unabatingly maddeningly adventurously professedly
stirringly prominently technologically magisterially predominately
swiftly fiscally pitilessly ...
RBR: adverb, comparative
further gloomier grander graver greater grimmer harder harsher
healthier heavier higher however larger later leaner lengthier less-
perfectly lesser lonelier longer louder lower more ...
RBS: adverb, superlative
best biggest bluntest earliest farthest first furthest hardest
heartiest highest largest least less most nearest second tightest worst
RP: particle
aboard about across along apart around aside at away back before behind
by crop down ever fast for forth from go high i.e. in into just later
low more off on open out over per pie raising start teeth that through
under unto up up-pp upon whole with you
SYM: symbol
% & ' '' ''. ) ). * + ,. < = > @ A[fj] U.S U.S.S.R * ** ***
TO: "to" as preposition or infinitive marker
to
UH: interjection
Goodbye Goody Gosh Wow Jeepers Jee-sus Hubba Hey Kee-reist Oops amen
huh howdy uh dammit whammo shucks heck anyways whodunnit honey golly
man baby diddle hush sonuvabitch ...
VB: verb, base form
ask assemble assess assign assume atone attention avoid bake balkanize
bank begin behold believe bend benefit bevel beware bless boil bomb
boost brace break bring broil brush build ...
VBD: verb, past tense
dipped pleaded swiped regummed soaked tidied convened halted registered
cushioned exacted snubbed strode aimed adopted belied figgered
speculated wore appreciated contemplated ...
VBG: verb, present participle or gerund
telegraphing stirring focusing angering judging stalling lactating
hankerin' alleging veering capping approaching traveling besieging
encrypting interrupting erasing wincing ...
VBN: verb, past participle
multihulled dilapidated aerosolized chaired languished panelized used
experimented flourished imitated reunifed factored condensed sheared
unsettled primed dubbed desired ...
VBP: verb, present tense, not 3rd person singular
predominate wrap resort sue twist spill cure lengthen brush terminate
appear tend stray glisten obtain comprise detest tease attract
emphasize mold postpone sever return wag ...
VBZ: verb, present tense, 3rd person singular
bases reconstructs marks mixes displeases seals carps weaves snatches
slumps stretches authorizes smolders pictures emerges stockpiles
seduces fizzes uses bolsters slaps speaks pleads ...
WDT: WH-determiner
that what whatever which whichever
WP: WH-pronoun
that what whatever whatsoever which who whom whosoever
WP$: WH-pronoun, possessive
whose
WRB: Wh-adverb
how however whence whenever where whereby whereever wherein whereof why
``: opening quotation mark
""" |
StuartChaffe/responsive-table | refs/heads/master | node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py | 1812 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
# A dictionary mapping supported target types to extensions.
TARGET_TYPE_EXT = {
'executable': 'exe',
'loadable_module': 'dll',
'shared_library': 'dll',
'static_library': 'lib',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
|
titasakgm/brc-stock | refs/heads/master | openerp/addons/account/wizard/account_journal_select.py | 56 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class account_journal_select(osv.osv_memory):
"""
Account Journal Select
"""
_name = "account.journal.select"
_description = "Account Journal Select"
def action_open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
if context is None:
context = {}
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_move_line_select')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id])[0]
cr.execute('select journal_id, period_id from account_journal_period where id=%s', (context['active_id'],))
res = cr.fetchone()
if res:
journal_id, period_id = res
result['domain'] = str([('journal_id', '=', journal_id), ('period_id', '=', period_id)])
result['context'] = str({'journal_id': journal_id, 'period_id': period_id})
return result
account_journal_select()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Jaiz909/youtube-dl | refs/heads/master | youtube_dl/extractor/youku.py | 36 | # coding: utf-8
from __future__ import unicode_literals
import math
import random
import re
import time
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class YoukuIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
http://(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|
youku:)
(?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|)
'''
_TEST = {
'url': 'http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html',
'md5': 'ffe3f2e435663dc2d1eea34faeff5b5b',
'params': {
'test': False
},
'info_dict': {
'id': 'XNDgyMDQ2NTQw_part00',
'ext': 'flv',
'title': 'youtube-dl test video "\'/\\ä↭𝕐'
}
}
def _gen_sid(self):
nowTime = int(time.time() * 1000)
random1 = random.randint(1000, 1998)
random2 = random.randint(1000, 9999)
return "%d%d%d" % (nowTime, random1, random2)
def _get_file_ID_mix_string(self, seed):
mixed = []
source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
seed = float(seed)
for i in range(len(source)):
seed = (seed * 211 + 30031) % 65536
index = math.floor(seed / 65536 * len(source))
mixed.append(source[int(index)])
source.remove(source[int(index)])
# return ''.join(mixed)
return mixed
def _get_file_id(self, fileId, seed):
mixed = self._get_file_ID_mix_string(seed)
ids = fileId.split('*')
realId = []
for ch in ids:
if ch:
realId.append(mixed[int(ch)])
return ''.join(realId)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
config = self._download_json(info_url, video_id)
error_code = config['data'][0].get('error_code')
if error_code:
# -8 means blocked outside China.
error = config['data'][0].get('error') # Chinese and English, separated by newline.
raise ExtractorError(error or 'Server reported error %i' % error_code,
expected=True)
video_title = config['data'][0]['title']
seed = config['data'][0]['seed']
format = self._downloader.params.get('format', None)
supported_format = list(config['data'][0]['streamfileids'].keys())
# TODO proper format selection
if format is None or format == 'best':
if 'hd2' in supported_format:
format = 'hd2'
else:
format = 'flv'
ext = 'flv'
elif format == 'worst':
format = 'mp4'
ext = 'mp4'
else:
format = 'flv'
ext = 'flv'
fileid = config['data'][0]['streamfileids'][format]
keys = [s['k'] for s in config['data'][0]['segs'][format]]
# segs is usually a dictionary, but an empty *list* if an error occured.
files_info = []
sid = self._gen_sid()
fileid = self._get_file_id(fileid, seed)
# column 8,9 of fileid represent the segment number
# fileid[7:9] should be changed
for index, key in enumerate(keys):
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
download_url = 'http://k.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
info = {
'id': '%s_part%02d' % (video_id, index),
'url': download_url,
'uploader': None,
'upload_date': None,
'title': video_title,
'ext': ext,
}
files_info.append(info)
return files_info
|
CptLemming/paramiko | refs/heads/master | demos/demo_sftp.py | 32 | #!/usr/bin/env python
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
# based on code provided by raymond mosteller (thanks!)
import base64
import getpass
import os
import socket
import sys
import traceback
import paramiko
from paramiko.py3compat import input
# setup logging
paramiko.util.log_to_file('demo_sftp.log')
# Paramiko client configuration
UseGSSAPI = True # enable GSS-API / SSPI authentication
DoGSSAPIKeyExchange = True
Port = 22
# get hostname
username = ''
if len(sys.argv) > 1:
hostname = sys.argv[1]
if hostname.find('@') >= 0:
username, hostname = hostname.split('@')
else:
hostname = input('Hostname: ')
if len(hostname) == 0:
print('*** Hostname required.')
sys.exit(1)
if hostname.find(':') >= 0:
hostname, portstr = hostname.split(':')
Port = int(portstr)
# get username
if username == '':
default_username = getpass.getuser()
username = input('Username [%s]: ' % default_username)
if len(username) == 0:
username = default_username
if not UseGSSAPI:
password = getpass.getpass('Password for %s@%s: ' % (username, hostname))
else:
password = None
# get host key, if we know one
hostkeytype = None
hostkey = None
try:
host_keys = paramiko.util.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
except IOError:
try:
# try ~/ssh/ too, because windows can't have a folder named ~/.ssh/
host_keys = paramiko.util.load_host_keys(os.path.expanduser('~/ssh/known_hosts'))
except IOError:
print('*** Unable to open host keys file')
host_keys = {}
if hostname in host_keys:
hostkeytype = host_keys[hostname].keys()[0]
hostkey = host_keys[hostname][hostkeytype]
print('Using host key of type %s' % hostkeytype)
# now, connect and use paramiko Transport to negotiate SSH2 across the connection
try:
t = paramiko.Transport((hostname, Port))
t.connect(hostkey, username, password, gss_host=socket.getfqdn(hostname),
gss_auth=UseGSSAPI, gss_kex=DoGSSAPIKeyExchange)
sftp = paramiko.SFTPClient.from_transport(t)
# dirlist on remote host
dirlist = sftp.listdir('.')
print("Dirlist: %s" % dirlist)
# copy this demo onto the server
try:
sftp.mkdir("demo_sftp_folder")
except IOError:
print('(assuming demo_sftp_folder/ already exists)')
with sftp.open('demo_sftp_folder/README', 'w') as f:
f.write('This was created by demo_sftp.py.\n')
with open('demo_sftp.py', 'r') as f:
data = f.read()
sftp.open('demo_sftp_folder/demo_sftp.py', 'w').write(data)
print('created demo_sftp_folder/ on the server')
# copy the README back here
with sftp.open('demo_sftp_folder/README', 'r') as f:
data = f.read()
with open('README_demo_sftp', 'w') as f:
f.write(data)
print('copied README back here')
# BETTER: use the get() and put() methods
sftp.put('demo_sftp.py', 'demo_sftp_folder/demo_sftp.py')
sftp.get('demo_sftp_folder/README', 'README_demo_sftp')
t.close()
except Exception as e:
print('*** Caught exception: %s: %s' % (e.__class__, e))
traceback.print_exc()
try:
t.close()
except:
pass
sys.exit(1)
|
VisionSystemsInc/voxel_globe | refs/heads/master | voxel_globe/tools/__init__.py | 2 | from .voxel_dir import task_dir, storage_dir, image_dir, image_sha_dir, get_image_sha_dir, log_dir |
SGCreations/Flask | refs/heads/master | Work/Trivia - Module 5/env/Lib/site-packages/flask/testsuite/deprecations.py | 563 | # -*- coding: utf-8 -*-
"""
flask.testsuite.deprecations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests deprecation support.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase, catch_warnings
class DeprecationsTestCase(FlaskTestCase):
"""not used currently"""
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DeprecationsTestCase))
return suite
|
solashirai/edx-platform | refs/heads/master | lms/djangoapps/courseware/tests/test_tabs.py | 6 | """
Test cases for tabs.
"""
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import Http404
from mock import MagicMock, Mock, patch
from nose.plugins.attrib import attr
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.courses import get_course_by_id
from courseware.tabs import (
get_course_tab_list, CoursewareTab, CourseInfoTab, ProgressTab,
ExternalDiscussionCourseTab, ExternalLinkCourseTab
)
from courseware.tests.helpers import get_request_for_user, LoginEnrollmentTestCase
from courseware.tests.factories import InstructorFactory, StaffFactory
from courseware.views import get_static_tab_contents, static_tab
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from util.milestones_helpers import (
get_milestone_relationship_types,
add_milestone,
add_course_milestone,
add_course_content_milestone
)
from milestones.tests.utils import MilestonesTestCaseMixin
from xmodule import tabs as xmodule_tabs
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MIXED_TOY_MODULESTORE, TEST_DATA_MIXED_CLOSED_MODULESTORE,
SharedModuleStoreTestCase)
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
class TabTestCase(SharedModuleStoreTestCase):
"""Base class for Tab-related test cases."""
@classmethod
def setUpClass(cls):
super(TabTestCase, cls).setUpClass()
cls.course = CourseFactory.create(org='edX', course='toy', run='2012_Fall')
cls.fake_dict_tab = {'fake_key': 'fake_value'}
cls.books = None
def setUp(self):
super(TabTestCase, self).setUp()
self.reverse = lambda name, args: "name/{0}/args/{1}".format(name, ",".join(str(a) for a in args))
def create_mock_user(self, is_authenticated=True, is_staff=True, is_enrolled=True):
"""
Creates a mock user with the specified properties.
"""
user = UserFactory()
user.name = 'mock_user'
user.is_staff = is_staff
user.is_enrolled = is_enrolled
user.is_authenticated = lambda: is_authenticated
return user
def is_tab_enabled(self, tab, course, user):
"""
Returns true if the specified tab is enabled.
"""
return tab.is_enabled(course, user=user)
def set_up_books(self, num_books):
"""Initializes the textbooks in the course and adds the given number of books to each textbook"""
self.books = [MagicMock() for _ in range(num_books)]
for book_index, book in enumerate(self.books):
book.title = 'Book{0}'.format(book_index)
self.course.textbooks = self.books
self.course.pdf_textbooks = self.books
self.course.html_textbooks = self.books
def check_tab(
self,
tab_class,
dict_tab,
expected_link,
expected_tab_id,
expected_name='same',
invalid_dict_tab=None,
):
"""
Helper method to verify a tab class.
'tab_class' is the class of the tab that is being tested
'dict_tab' is the raw dictionary value of the tab
'expected_link' is the expected value for the hyperlink of the tab
'expected_tab_id' is the expected value for the unique id of the tab
'expected_name' is the expected value for the name of the tab
'invalid_dict_tab' is an invalid dictionary value for the tab.
Can be 'None' if the given tab class does not have any keys to validate.
"""
# create tab
tab = tab_class(tab_dict=dict_tab)
# name is as expected
self.assertEqual(tab.name, expected_name)
# link is as expected
self.assertEqual(tab.link_func(self.course, self.reverse), expected_link)
# verify active page name
self.assertEqual(tab.tab_id, expected_tab_id)
# validate tab
self.assertTrue(tab.validate(dict_tab))
if invalid_dict_tab:
with self.assertRaises(xmodule_tabs.InvalidTabsException):
tab.validate(invalid_dict_tab)
# check get and set methods
self.check_get_and_set_methods(tab)
# check to_json and from_json methods
self.check_tab_json_methods(tab)
# check equality methods
self.check_tab_equality(tab, dict_tab)
# return tab for any additional tests
return tab
def check_tab_equality(self, tab, dict_tab):
"""Tests the equality methods on the given tab"""
self.assertEquals(tab, dict_tab) # test __eq__
ne_dict_tab = dict_tab
ne_dict_tab['type'] = 'fake_type'
self.assertNotEquals(tab, ne_dict_tab) # test __ne__: incorrect type
self.assertNotEquals(tab, {'fake_key': 'fake_value'}) # test __ne__: missing type
def check_tab_json_methods(self, tab):
"""Tests the json from and to methods on the given tab"""
serialized_tab = tab.to_json()
deserialized_tab = tab.from_json(serialized_tab)
self.assertEquals(serialized_tab, deserialized_tab)
def check_can_display_results(
self,
tab,
expected_value=True,
for_authenticated_users_only=False,
for_staff_only=False,
for_enrolled_users_only=False
):
"""Checks can display results for various users"""
if for_staff_only:
user = self.create_mock_user(is_authenticated=True, is_staff=True, is_enrolled=True)
self.assertEquals(expected_value, self.is_tab_enabled(tab, self.course, user))
if for_authenticated_users_only:
user = self.create_mock_user(is_authenticated=True, is_staff=False, is_enrolled=False)
self.assertEquals(expected_value, self.is_tab_enabled(tab, self.course, user))
if not for_staff_only and not for_authenticated_users_only and not for_enrolled_users_only:
user = self.create_mock_user(is_authenticated=False, is_staff=False, is_enrolled=False)
self.assertEquals(expected_value, self.is_tab_enabled(tab, self.course, user))
if for_enrolled_users_only:
user = self.create_mock_user(is_authenticated=True, is_staff=False, is_enrolled=True)
self.assertEquals(expected_value, self.is_tab_enabled(tab, self.course, user))
def check_get_and_set_methods(self, tab):
"""Test __getitem__ and __setitem__ calls"""
self.assertEquals(tab['type'], tab.type)
self.assertEquals(tab['tab_id'], tab.tab_id)
with self.assertRaises(KeyError):
_ = tab['invalid_key']
self.check_get_and_set_method_for_key(tab, 'name')
self.check_get_and_set_method_for_key(tab, 'tab_id')
with self.assertRaises(KeyError):
tab['invalid_key'] = 'New Value'
def check_get_and_set_method_for_key(self, tab, key):
"""Test __getitem__ and __setitem__ for the given key"""
old_value = tab[key]
new_value = 'New Value'
tab[key] = new_value
self.assertEquals(tab[key], new_value)
tab[key] = old_value
self.assertEquals(tab[key], old_value)
class TextbooksTestCase(TabTestCase):
"""Test cases for Textbook Tab."""
def setUp(self):
super(TextbooksTestCase, self).setUp()
self.set_up_books(2)
self.dict_tab = MagicMock()
self.course.tabs = [
xmodule_tabs.CourseTab.load('textbooks'),
xmodule_tabs.CourseTab.load('pdf_textbooks'),
xmodule_tabs.CourseTab.load('html_textbooks'),
]
self.num_textbook_tabs = sum(1 for tab in self.course.tabs if tab.type in [
'textbooks', 'pdf_textbooks', 'html_textbooks'
])
self.num_textbooks = self.num_textbook_tabs * len(self.books)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_TEXTBOOK": True})
def test_textbooks_enabled(self):
type_to_reverse_name = {'textbook': 'book', 'pdftextbook': 'pdf_book', 'htmltextbook': 'html_book'}
num_textbooks_found = 0
user = self.create_mock_user(is_authenticated=True, is_staff=False, is_enrolled=True)
for tab in xmodule_tabs.CourseTabList.iterate_displayable(self.course, user=user):
# verify all textbook type tabs
if tab.type == 'single_textbook':
book_type, book_index = tab.tab_id.split("/", 1)
expected_link = self.reverse(
type_to_reverse_name[book_type],
args=[self.course.id.to_deprecated_string(), book_index]
)
self.assertEqual(tab.link_func(self.course, self.reverse), expected_link)
self.assertTrue(tab.name.startswith('Book{0}'.format(book_index)))
num_textbooks_found = num_textbooks_found + 1
self.assertEquals(num_textbooks_found, self.num_textbooks)
@attr('shard_1')
class StaticTabDateTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""Test cases for Static Tab Dates."""
MODULESTORE = TEST_DATA_MIXED_TOY_MODULESTORE
@classmethod
def setUpClass(cls):
super(StaticTabDateTestCase, cls).setUpClass()
cls.course = CourseFactory.create()
cls.page = ItemFactory.create(
category="static_tab", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="new_tab"
)
cls.course.tabs.append(xmodule_tabs.CourseTab.load('static_tab', name='New Tab', url_slug='new_tab'))
cls.course.save()
cls.toy_course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
def setUp(self):
super(StaticTabDateTestCase, self).setUp()
def test_logged_in(self):
self.setup_user()
url = reverse('static_tab', args=[self.course.id.to_deprecated_string(), 'new_tab'])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
def test_anonymous_user(self):
url = reverse('static_tab', args=[self.course.id.to_deprecated_string(), 'new_tab'])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
def test_invalid_course_key(self):
self.setup_user()
request = get_request_for_user(self.user)
with self.assertRaises(Http404):
static_tab(request, course_id='edX/toy', tab_slug='new_tab')
def test_get_static_tab_contents(self):
self.setup_user()
course = get_course_by_id(self.toy_course_key)
request = get_request_for_user(self.user)
tab = xmodule_tabs.CourseTabList.get_tab_by_slug(course.tabs, 'resources')
# Test render works okay
tab_content = get_static_tab_contents(request, course, tab)
self.assertIn(self.toy_course_key.to_deprecated_string(), tab_content)
self.assertIn('static_tab', tab_content)
# Test when render raises an exception
with patch('courseware.views.get_module') as mock_module_render:
mock_module_render.return_value = MagicMock(
render=Mock(side_effect=Exception('Render failed!'))
)
static_tab = get_static_tab_contents(request, course, tab)
self.assertIn("this module is temporarily unavailable", static_tab)
@attr('shard_1')
class StaticTabDateTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the static tab dates of an XML course
"""
MODULESTORE = TEST_DATA_MIXED_CLOSED_MODULESTORE
# The following XML test course (which lives at common/test/data/2014)
# is closed; we're testing that tabs still appear when
# the course is already closed
xml_course_key = SlashSeparatedCourseKey('edX', 'detached_pages', '2014')
# this text appears in the test course's tab
# common/test/data/2014/tabs/8e4cce2b4aaf4ba28b1220804619e41f.html
xml_data = "static 463139"
xml_url = "8e4cce2b4aaf4ba28b1220804619e41f"
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('static_tab', args=[self.xml_course_key.to_deprecated_string(), self.xml_url])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('static_tab', args=[self.xml_course_key.to_deprecated_string(), self.xml_url])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@attr('shard_1')
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
class EntranceExamsTabsTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase, MilestonesTestCaseMixin):
"""
Validate tab behavior when dealing with Entrance Exams
"""
MODULESTORE = TEST_DATA_MIXED_CLOSED_MODULESTORE
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
def setUp(self):
"""
Test case scaffolding
"""
super(EntranceExamsTabsTestCase, self).setUp()
self.course = CourseFactory.create()
self.instructor_tab = ItemFactory.create(
category="instructor", parent_location=self.course.location,
data="Instructor Tab", display_name="Instructor"
)
self.extra_tab_2 = ItemFactory.create(
category="static_tab", parent_location=self.course.location,
data="Extra Tab", display_name="Extra Tab 2"
)
self.extra_tab_3 = ItemFactory.create(
category="static_tab", parent_location=self.course.location,
data="Extra Tab", display_name="Extra Tab 3"
)
self.setup_user()
self.enroll(self.course)
self.user.is_staff = True
self.relationship_types = get_milestone_relationship_types()
def test_get_course_tabs_list_entrance_exam_enabled(self):
"""
Unit Test: test_get_course_tabs_list_entrance_exam_enabled
"""
entrance_exam = ItemFactory.create(
category="chapter",
parent_location=self.course.location,
data="Exam Data",
display_name="Entrance Exam",
is_entrance_exam=True
)
milestone = {
'name': 'Test Milestone',
'namespace': '{}.entrance_exams'.format(unicode(self.course.id)),
'description': 'Testing Courseware Tabs'
}
self.user.is_staff = False
request = get_request_for_user(self.user)
self.course.entrance_exam_enabled = True
self.course.entrance_exam_id = unicode(entrance_exam.location)
milestone = add_milestone(milestone)
add_course_milestone(
unicode(self.course.id),
self.relationship_types['REQUIRES'],
milestone
)
add_course_content_milestone(
unicode(self.course.id),
unicode(entrance_exam.location),
self.relationship_types['FULFILLS'],
milestone
)
course_tab_list = get_course_tab_list(request, self.course)
self.assertEqual(len(course_tab_list), 1)
self.assertEqual(course_tab_list[0]['tab_id'], 'courseware')
self.assertEqual(course_tab_list[0]['name'], 'Entrance Exam')
def test_get_course_tabs_list_skipped_entrance_exam(self):
"""
Tests tab list is not limited if user is allowed to skip entrance exam.
"""
#create a user
student = UserFactory()
# login as instructor hit skip entrance exam api in instructor app
instructor = InstructorFactory(course_key=self.course.id)
self.client.logout()
self.client.login(username=instructor.username, password='test')
url = reverse('mark_student_can_skip_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': student.email,
})
self.assertEqual(response.status_code, 200)
# log in again as student
self.client.logout()
self.login(self.email, self.password)
request = get_request_for_user(self.user)
course_tab_list = get_course_tab_list(request, self.course)
self.assertEqual(len(course_tab_list), 5)
def test_course_tabs_list_for_staff_members(self):
"""
Tests tab list is not limited if user is member of staff
and has not passed entrance exam.
"""
# Login as member of staff
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='test')
request = get_request_for_user(staff_user)
course_tab_list = get_course_tab_list(request, self.course)
self.assertEqual(len(course_tab_list), 5)
@attr('shard_1')
class TextBookCourseViewsTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
Validate tab behavior when dealing with textbooks.
"""
MODULESTORE = TEST_DATA_MIXED_TOY_MODULESTORE
@classmethod
def setUpClass(cls):
super(TextBookCourseViewsTestCase, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TextBookCourseViewsTestCase, self).setUp()
self.set_up_books(2)
self.setup_user()
self.enroll(self.course)
self.num_textbook_tabs = sum(1 for tab in self.course.tabs if tab.type in [
'textbooks', 'pdf_textbooks', 'html_textbooks'
])
self.num_textbooks = self.num_textbook_tabs * len(self.books)
def set_up_books(self, num_books):
"""Initializes the textbooks in the course and adds the given number of books to each textbook"""
self.books = [MagicMock() for _ in range(num_books)]
for book_index, book in enumerate(self.books):
book.title = 'Book{0}'.format(book_index)
self.course.textbooks = self.books
self.course.pdf_textbooks = self.books
self.course.html_textbooks = self.books
def test_pdf_textbook_tabs(self):
"""
Test that all textbooks tab links generating correctly.
"""
type_to_reverse_name = {'textbook': 'book', 'pdftextbook': 'pdf_book', 'htmltextbook': 'html_book'}
request = get_request_for_user(self.user)
course_tab_list = get_course_tab_list(request, self.course)
num_of_textbooks_found = 0
for tab in course_tab_list:
# Verify links of all textbook type tabs.
if tab.type == 'single_textbook':
book_type, book_index = tab.tab_id.split("/", 1)
expected_link = reverse(
type_to_reverse_name[book_type],
args=[self.course.id.to_deprecated_string(), book_index]
)
tab_link = tab.link_func(self.course, reverse)
self.assertEqual(tab_link, expected_link)
num_of_textbooks_found += 1
self.assertEqual(num_of_textbooks_found, self.num_textbooks)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_TEXTBOOK": False})
def test_textbooks_disabled(self):
tab = xmodule_tabs.CourseTab.load('textbooks')
self.assertFalse(tab.is_enabled(self.course, self.user))
class TabListTestCase(TabTestCase):
"""Base class for Test cases involving tab lists."""
def setUp(self):
super(TabListTestCase, self).setUp()
# invalid tabs
self.invalid_tabs = [
# less than 2 tabs
[{'type': CoursewareTab.type}],
# missing course_info
[{'type': CoursewareTab.type}, {'type': 'discussion', 'name': 'fake_name'}],
[{'type': 'unknown_type'}],
# incorrect order
[{'type': 'discussion', 'name': 'fake_name'},
{'type': CourseInfoTab.type, 'name': 'fake_name'}, {'type': CoursewareTab.type}],
]
# tab types that should appear only once
unique_tab_types = [
CoursewareTab.type,
CourseInfoTab.type,
'textbooks',
'pdf_textbooks',
'html_textbooks',
]
for unique_tab_type in unique_tab_types:
self.invalid_tabs.append([
{'type': CoursewareTab.type},
{'type': CourseInfoTab.type, 'name': 'fake_name'},
# add the unique tab multiple times
{'type': unique_tab_type},
{'type': unique_tab_type},
])
# valid tabs
self.valid_tabs = [
# any empty list is valid because a default list of tabs will be
# generated to replace the empty list.
[],
# all valid tabs
[
{'type': CoursewareTab.type},
{'type': CourseInfoTab.type, 'name': 'fake_name'},
{'type': 'discussion', 'name': 'fake_name'},
{'type': ExternalLinkCourseTab.type, 'name': 'fake_name', 'link': 'fake_link'},
{'type': ExternalLinkCourseTab.type, 'name': 'fake_name', 'link': 'fake_link'},
{'type': 'textbooks'},
{'type': 'pdf_textbooks'},
{'type': 'html_textbooks'},
{'type': ProgressTab.type, 'name': 'fake_name'},
{'type': xmodule_tabs.StaticTab.type, 'name': 'fake_name', 'url_slug': 'schlug'},
{'type': 'syllabus'},
],
# with external discussion
[
{'type': CoursewareTab.type},
{'type': CourseInfoTab.type, 'name': 'fake_name'},
{'type': ExternalDiscussionCourseTab.type, 'name': 'fake_name', 'link': 'fake_link'}
],
]
self.all_valid_tab_list = xmodule_tabs.CourseTabList().from_json(self.valid_tabs[1])
@attr('shard_1')
class ValidateTabsTestCase(TabListTestCase):
"""Test cases for validating tabs."""
def test_validate_tabs(self):
tab_list = xmodule_tabs.CourseTabList()
for invalid_tab_list in self.invalid_tabs:
with self.assertRaises(xmodule_tabs.InvalidTabsException):
tab_list.from_json(invalid_tab_list)
for valid_tab_list in self.valid_tabs:
from_json_result = tab_list.from_json(valid_tab_list)
self.assertEquals(len(from_json_result), len(valid_tab_list))
def test_invalid_tab_type(self):
"""
Verifies that having an unrecognized tab type does not cause
the tabs to be undisplayable.
"""
tab_list = xmodule_tabs.CourseTabList()
self.assertEquals(
len(tab_list.from_json([
{'type': CoursewareTab.type},
{'type': CourseInfoTab.type, 'name': 'fake_name'},
{'type': 'no_such_type'}
])),
2
)
@attr('shard_1')
class CourseTabListTestCase(TabListTestCase):
"""Testing the generator method for iterating through displayable tabs"""
def has_tab(self, tab_list, tab_type):
""" Searches the given lab_list for a given tab_type. """
for tab in tab_list:
if tab.type == tab_type:
return True
return False
def test_initialize_default_without_syllabus(self):
self.course.tabs = []
self.course.syllabus_present = False
xmodule_tabs.CourseTabList.initialize_default(self.course)
self.assertFalse(self.has_tab(self.course.tabs, 'syllabus'))
def test_initialize_default_with_syllabus(self):
self.course.tabs = []
self.course.syllabus_present = True
xmodule_tabs.CourseTabList.initialize_default(self.course)
self.assertTrue(self.has_tab(self.course.tabs, 'syllabus'))
def test_initialize_default_with_external_link(self):
self.course.tabs = []
self.course.discussion_link = "other_discussion_link"
xmodule_tabs.CourseTabList.initialize_default(self.course)
self.assertTrue(self.has_tab(self.course.tabs, 'external_discussion'))
self.assertFalse(self.has_tab(self.course.tabs, 'discussion'))
def test_initialize_default_without_external_link(self):
self.course.tabs = []
self.course.discussion_link = ""
xmodule_tabs.CourseTabList.initialize_default(self.course)
self.assertFalse(self.has_tab(self.course.tabs, 'external_discussion'))
self.assertTrue(self.has_tab(self.course.tabs, 'discussion'))
@patch.dict("django.conf.settings.FEATURES", {
"ENABLE_TEXTBOOK": True,
"ENABLE_DISCUSSION_SERVICE": True,
"ENABLE_STUDENT_NOTES": True,
"ENABLE_EDXNOTES": True,
})
def test_iterate_displayable(self):
self.course.hide_progress_tab = False
# create 1 book per textbook type
self.set_up_books(1)
# initialize the course tabs to a list of all valid tabs
self.course.tabs = self.all_valid_tab_list
# enumerate the tabs with no user
for i, tab in enumerate(xmodule_tabs.CourseTabList.iterate_displayable(
self.course,
inline_collections=False
)):
self.assertEquals(tab.type, self.course.tabs[i].type)
# enumerate the tabs with a staff user
user = UserFactory(is_staff=True)
CourseEnrollment.enroll(user, self.course.id)
for i, tab in enumerate(xmodule_tabs.CourseTabList.iterate_displayable(self.course, user=user)):
if getattr(tab, 'is_collection_item', False):
# a collection item was found as a result of a collection tab
self.assertTrue(getattr(self.course.tabs[i], 'is_collection', False))
else:
# all other tabs must match the expected type
self.assertEquals(tab.type, self.course.tabs[i].type)
# test including non-empty collections
self.assertIn(
{'type': 'html_textbooks'},
list(xmodule_tabs.CourseTabList.iterate_displayable(self.course, inline_collections=False)),
)
# test not including empty collections
self.course.html_textbooks = []
self.assertNotIn(
{'type': 'html_textbooks'},
list(xmodule_tabs.CourseTabList.iterate_displayable(self.course, inline_collections=False)),
)
def test_get_tab_by_methods(self):
"""Tests the get_tab methods in CourseTabList"""
self.course.tabs = self.all_valid_tab_list
for tab in self.course.tabs:
# get tab by type
self.assertEquals(xmodule_tabs.CourseTabList.get_tab_by_type(self.course.tabs, tab.type), tab)
# get tab by id
self.assertEquals(xmodule_tabs.CourseTabList.get_tab_by_id(self.course.tabs, tab.tab_id), tab)
@attr('shard_1')
class ProgressTestCase(TabTestCase):
"""Test cases for Progress Tab."""
def check_progress_tab(self):
"""Helper function for verifying the progress tab."""
return self.check_tab(
tab_class=ProgressTab,
dict_tab={'type': ProgressTab.type, 'name': 'same'},
expected_link=self.reverse('progress', args=[self.course.id.to_deprecated_string()]),
expected_tab_id=ProgressTab.type,
invalid_dict_tab=None,
)
@patch('student.models.CourseEnrollment.is_enrolled')
def test_progress(self, is_enrolled):
is_enrolled.return_value = True
self.course.hide_progress_tab = False
tab = self.check_progress_tab()
self.check_can_display_results(
tab, for_staff_only=True, for_enrolled_users_only=True
)
self.course.hide_progress_tab = True
self.check_progress_tab()
self.check_can_display_results(
tab, for_staff_only=True, for_enrolled_users_only=True, expected_value=False
)
@attr('shard_1')
class StaticTabTestCase(TabTestCase):
"""Test cases for Static Tab."""
def test_static_tab(self):
url_slug = 'schmug'
tab = self.check_tab(
tab_class=xmodule_tabs.StaticTab,
dict_tab={'type': xmodule_tabs.StaticTab.type, 'name': 'same', 'url_slug': url_slug},
expected_link=self.reverse('static_tab', args=[self.course.id.to_deprecated_string(), url_slug]),
expected_tab_id='static_tab_schmug',
invalid_dict_tab=self.fake_dict_tab,
)
self.check_can_display_results(tab)
self.check_get_and_set_method_for_key(tab, 'url_slug')
@attr('shard_1')
class DiscussionLinkTestCase(TabTestCase):
"""Test cases for discussion link tab."""
def setUp(self):
super(DiscussionLinkTestCase, self).setUp()
self.tabs_with_discussion = [
xmodule_tabs.CourseTab.load('discussion'),
]
self.tabs_without_discussion = [
]
@staticmethod
def _reverse(course):
"""Custom reverse function"""
def reverse_discussion_link(viewname, args):
"""reverse lookup for discussion link"""
if viewname == "django_comment_client.forum.views.forum_form_discussion" and args == [unicode(course.id)]:
return "default_discussion_link"
return reverse_discussion_link
def check_discussion(
self, tab_list,
expected_discussion_link,
expected_can_display_value,
discussion_link_in_course="",
is_staff=True,
is_enrolled=True,
):
"""Helper function to verify whether the discussion tab exists and can be displayed"""
self.course.tabs = tab_list
self.course.discussion_link = discussion_link_in_course
discussion_tab = xmodule_tabs.CourseTabList.get_discussion(self.course)
user = self.create_mock_user(is_authenticated=True, is_staff=is_staff, is_enrolled=is_enrolled)
with patch('student.models.CourseEnrollment.is_enrolled') as check_is_enrolled:
check_is_enrolled.return_value = is_enrolled
self.assertEquals(
(
discussion_tab is not None and
self.is_tab_enabled(discussion_tab, self.course, user) and
(discussion_tab.link_func(self.course, self._reverse(self.course)) == expected_discussion_link)
),
expected_can_display_value
)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": False})
def test_explicit_discussion_link(self):
"""Test that setting discussion_link overrides everything else"""
self.check_discussion(
tab_list=self.tabs_with_discussion,
discussion_link_in_course="other_discussion_link",
expected_discussion_link="other_discussion_link",
expected_can_display_value=True,
)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": False})
def test_discussions_disabled(self):
"""Test that other cases return None with discussions disabled"""
for tab_list in [[], self.tabs_with_discussion, self.tabs_without_discussion]:
self.check_discussion(
tab_list=tab_list,
expected_discussion_link=not None,
expected_can_display_value=False,
)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def test_tabs_with_discussion(self):
"""Test a course with a discussion tab configured"""
self.check_discussion(
tab_list=self.tabs_with_discussion,
expected_discussion_link="default_discussion_link",
expected_can_display_value=True,
)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def test_tabs_without_discussion(self):
"""Test a course with tabs configured but without a discussion tab"""
self.check_discussion(
tab_list=self.tabs_without_discussion,
expected_discussion_link=not None,
expected_can_display_value=False,
)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def test_tabs_enrolled_or_staff(self):
for is_enrolled, is_staff in [(True, False), (False, True)]:
self.check_discussion(
tab_list=self.tabs_with_discussion,
expected_discussion_link="default_discussion_link",
expected_can_display_value=True,
is_enrolled=is_enrolled,
is_staff=is_staff
)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def test_tabs_not_enrolled_or_staff(self):
is_enrolled = is_staff = False
self.check_discussion(
tab_list=self.tabs_with_discussion,
expected_discussion_link="default_discussion_link",
expected_can_display_value=False,
is_enrolled=is_enrolled,
is_staff=is_staff
)
|
sauloal/pycluster | refs/heads/master | pypy-1.9_64/lib-python/2.7/plat-mac/Carbon/Resources.py | 81 | # Generated from 'Resources.h'
resSysHeap = 64
resPurgeable = 32
resLocked = 16
resProtected = 8
resPreload = 4
resChanged = 2
mapReadOnly = 128
mapCompact = 64
mapChanged = 32
resSysRefBit = 7
resSysHeapBit = 6
resPurgeableBit = 5
resLockedBit = 4
resProtectedBit = 3
resPreloadBit = 2
resChangedBit = 1
mapReadOnlyBit = 7
mapCompactBit = 6
mapChangedBit = 5
kResFileNotOpened = -1
kSystemResFile = 0
kRsrcChainBelowSystemMap = 0
kRsrcChainBelowApplicationMap = 1
kRsrcChainAboveApplicationMap = 2
kRsrcChainAboveAllMaps = 4
|
chintal/tendril-sofficehelpers | refs/heads/master | sofficehelpers/ooutils.py | 1 | # LibreOffice utils.
#
# Based on code from:
# PyODConverter (Python OpenDocument Converter) v1.0.0 - 2008-05-05
# Copyright (C) 2008 Mirko Nasato <mirko@artofsolving.com>
# Licensed under the GNU LGPL v2.1 - or any later version.
# http://www.gnu.org/licenses/lgpl-2.1.html
#
import sys
import os
import time
import atexit
OPENOFFICE_PORT = 8100
# Find OpenOffice.
_oopaths = (
('/usr/lib64/ooo-2.0/program', '/usr/lib64/ooo-2.0/program'),
('/opt/openoffice.org3/program', '/opt/openoffice.org/basis3.1/program'),
('/usr/lib/libreoffice/program', '/usr/lib/libreoffice/program'),
('/usr/lib64/libreoffice/program', '/usr/lib/libreoffice/program'),
)
for p in _oopaths:
if os.path.exists(p[0]):
OPENOFFICE_PATH = p[0]
OPENOFFICE_BIN = os.path.join(OPENOFFICE_PATH, 'soffice')
OPENOFFICE_LIBPATH = p[1]
# Add to path so we can find uno.
if sys.path.count(OPENOFFICE_LIBPATH) == 0:
sys.path.insert(0, OPENOFFICE_LIBPATH)
break
import uno
from com.sun.star.beans import PropertyValue
from com.sun.star.connection import NoConnectException
class OORunner:
"""
Start, stop, and connect to OpenOffice.
"""
def __init__(self, port=OPENOFFICE_PORT):
""" Create OORunner that connects on the specified port. """
self.port = port
def connect(self, no_startup=False):
"""
Connect to OpenOffice.
If a connection cannot be established try to start OpenOffice.
"""
localContext = uno.getComponentContext()
resolver = localContext.ServiceManager.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", localContext)
context = None
did_start = False
n = 0
while n < 6:
try:
context = resolver.resolve("uno:socket,host=localhost,port=%d;urp;StarOffice.ComponentContext" % self.port)
break
except NoConnectException:
pass
# If first connect failed then try starting OpenOffice.
if n == 0:
# Exit loop if startup not desired.
if no_startup:
break
self.startup()
did_start = True
# Pause and try again to connect
time.sleep(1)
n += 1
if not context:
raise Exception("Failed to connect to OpenOffice on port %d" % self.port)
desktop = context.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", context)
if not desktop:
raise Exception("Failed to create OpenOffice desktop on port %d" % self.port)
if did_start:
_started_desktops[self.port] = desktop
return desktop
def startup(self):
"""
Start a headless instance of OpenOffice.
"""
args = [OPENOFFICE_BIN,
'--accept=socket,host=localhost,port=%d;urp;StarOffice.ServiceManager' % self.port,
'--norestore',
'--nodefault',
'--nologo',
'--headless',
]
env = {'PATH': '/bin:/usr/bin:%s' % OPENOFFICE_PATH,
'PYTHONPATH': OPENOFFICE_LIBPATH,
}
try:
pid = os.spawnve(os.P_NOWAIT, args[0], args, env)
except Exception as e:
raise Exception("Failed to start OpenOffice on port %d: %s" % (self.port, e))
if pid <= 0:
raise Exception("Failed to start OpenOffice on port %d" % self.port)
def shutdown(self):
"""
Shutdown OpenOffice.
"""
try:
if _started_desktops.get(self.port):
_started_desktops[self.port].terminate()
del _started_desktops[self.port]
except Exception as e:
pass
# Keep track of started desktops and shut them down on exit.
_started_desktops = {}
def _shutdown_desktops():
""" Shutdown all OpenOffice desktops that were started by the program. """
for port, desktop in _started_desktops.items():
try:
if desktop:
desktop.terminate()
except Exception as e:
pass
atexit.register(_shutdown_desktops)
def oo_shutdown_if_running(port=OPENOFFICE_PORT):
""" Shutdown OpenOffice if it's running on the specified port. """
oorunner = OORunner(port)
try:
desktop = oorunner.connect(no_startup=True)
desktop.terminate()
except Exception as e:
pass
def oo_properties(**args):
"""
Convert args to OpenOffice property values.
"""
props = []
for key in args:
prop = PropertyValue()
prop.Name = key
prop.Value = args[key]
props.append(prop)
return tuple(props)
|
CodeDJ/qt5-hidpi | refs/heads/master | qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/reftests/__init__.py | 6014 | # Required for Python to search this directory for module files
|
shaheemirza/pupy | refs/heads/master | pupy/genpayload.py | 18 | #!/usr/bin/env python
# -*- coding: UTF8 -*-
# ---------------------------------------------------------------
# Copyright (c) 2015, Nicolas VERDIER (contact@n1nj4.eu)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
# ---------------------------------------------------------------
import argparse
import sys
import os.path
def get_edit_binary(path, host, ip):
binary=b""
with open(path, 'rb') as f:
binary=f.read()
i=0
offsets=[]
while True:
i=binary.find("<default_connect_back_host>:<default_connect_back_port>\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", i+1)
if i==-1:
break
offsets.append(i)
if not offsets:
raise Exception("Error: the offset to edit IP:PORT have not been found")
elif len(offsets)!=1:
raise Exception("Error: multiple offsets to edit IP:PORT have been found")
new_host="%s:%s\x00\x00\x00\x00"%(host,ip)
if len(new_host)>100:
raise Exception("Error: host too long")
binary=binary[0:offsets[0]]+new_host+binary[offsets[0]+len(new_host):]
return binary
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-t', '--type', default='exe_x86', help="exe_x86/dll_x86 exe_x64/dll_x64 (default: exe_x86)")
parser.add_argument('-o', '--output', help="output path")
parser.add_argument('-p', '--port', type=int, default=443, help="connect back ip (default:443)")
parser.add_argument('host', help="connect back host")
args=parser.parse_args()
outpath=None
if args.type=="exe_x86":
binary=get_edit_binary(os.path.join("payloads","pupyx86.exe"), args.host, args.port)
outpath="pupyx86.exe"
if args.output:
outpath=args.output
with open(outpath, 'wb') as w:
w.write(binary)
elif args.type=="exe_x64":
binary=get_edit_binary(os.path.join("payloads","pupyx64.exe"), args.host, args.port)
outpath="pupyx64.exe"
if args.output:
outpath=args.output
with open(outpath, 'wb') as w:
w.write(binary)
elif args.type=="dll_x64":
binary=get_edit_binary(os.path.join("payloads","pupyx64.dll"), args.host, args.port)
outpath="pupyx64.dll"
if args.output:
outpath=args.output
with open(outpath, 'wb') as w:
w.write(binary)
elif args.type=="dll_x86":
binary=get_edit_binary(os.path.join("payloads","pupyx86.dll"), args.host, args.port)
outpath="pupyx86.dll"
if args.output:
outpath=args.output
with open(outpath, 'wb') as w:
w.write(binary)
else:
exit("Type %s is invalid."%(args.type))
print "binary generated to %s with HOST=%s"%(outpath,(args.host, args.port))
|
romain-dartigues/ansible | refs/heads/devel | lib/ansible/modules/network/vyos/vyos_vlan.py | 13 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_vlan
version_added: "2.5"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage VLANs on VyOS network devices
description:
- This module provides declarative management of VLANs
on VyOS network devices.
notes:
- Tested against VYOS 1.1.7
options:
name:
description:
- Name of the VLAN.
address:
description:
- Configure Virtual interface address.
vlan_id:
description:
- ID of the VLAN. Range 0-4094.
required: true
interfaces:
description:
- List of interfaces that should be associated to the VLAN.
required: true
associated_interfaces:
description:
- This is a intent option and checks the operational state of the for given vlan C(name)
for associated interfaces. If the value in the C(associated_interfaces) does not match with
the operational state of vlan on device it will result in failure.
version_added: "2.5"
delay:
description:
- Delay the play should wait to check for declarative intent params values.
default: 10
aggregate:
description: List of VLANs definitions.
purge:
description:
- Purge VLANs not defined in the I(aggregate) parameter.
default: no
type: bool
state:
description:
- State of the VLAN configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: vyos
"""
EXAMPLES = """
- name: Create vlan
vyos_vlan:
vlan_id: 100
name: vlan-100
interfaces: eth1
state: present
- name: Add interfaces to VLAN
vyos_vlan:
vlan_id: 100
interfaces:
- eth1
- eth2
- name: Configure virtual interface address
vyos_vlan:
vlan_id: 100
interfaces: eth1
address: 172.26.100.37/24
- name: vlan interface config + intent
vyos_vlan:
vlan_id: 100
interfaces: eth0
associated_interfaces:
- eth0
- name: vlan intent check
vyos_vlan:
vlan_id: 100
associated_interfaces:
- eth3
- eth4
- name: Delete vlan
vyos_vlan:
vlan_id: 100
interfaces: eth1
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set interfaces ethernet eth1 vif 100 description VLAN 100
- set interfaces ethernet eth1 vif 100 address 172.26.100.37/24
- delete interfaces ethernet eth1 vif 100
"""
import re
import time
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.vyos.vyos import load_config, run_commands
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def search_obj_in_list(vlan_id, lst):
obj = list()
for o in lst:
if o['vlan_id'] == vlan_id:
obj.append(o)
return obj
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
purge = module.params['purge']
for w in want:
vlan_id = w['vlan_id']
name = w['name']
address = w['address']
state = w['state']
interfaces = w['interfaces']
obj_in_have = search_obj_in_list(vlan_id, have)
if state == 'absent':
if obj_in_have:
for obj in obj_in_have:
for i in obj['interfaces']:
commands.append('delete interfaces ethernet {0} vif {1}'.format(i, vlan_id))
elif state == 'present':
if not obj_in_have:
if w['interfaces'] and w['vlan_id']:
for i in w['interfaces']:
cmd = 'set interfaces ethernet {0} vif {1}'.format(i, vlan_id)
if w['name']:
commands.append(cmd + ' description {}'.format(name))
elif w['address']:
commands.append(cmd + ' address {}'.format(address))
else:
commands.append(cmd)
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['vlan_id'], want)
if not obj_in_want:
for i in h['interfaces']:
commands.append('delete interfaces ethernet {0} vif {1}'.format(i, h['vlan_id']))
return commands
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
if not d['vlan_id']:
module.fail_json(msg='vlan_id is required')
d['vlan_id'] = str(d['vlan_id'])
module._check_required_one_of(module.required_one_of, item)
obj.append(d)
else:
obj.append({
'vlan_id': str(module.params['vlan_id']),
'name': module.params['name'],
'address': module.params['address'],
'state': module.params['state'],
'interfaces': module.params['interfaces'],
'associated_interfaces': module.params['associated_interfaces']
})
return obj
def map_config_to_obj(module):
objs = []
interfaces = list()
output = run_commands(module, 'show interfaces')
lines = output[0].strip().splitlines()[3:]
for l in lines:
splitted_line = re.split(r'\s{2,}', l.strip())
obj = {}
eth = splitted_line[0].strip("'")
if eth.startswith('eth'):
obj['interfaces'] = []
if '.' in eth:
interface = eth.split('.')[0]
obj['interfaces'].append(interface)
obj['vlan_id'] = eth.split('.')[-1]
else:
obj['interfaces'].append(eth)
obj['vlan_id'] = None
if splitted_line[1].strip("'") != '-':
obj['address'] = splitted_line[1].strip("'")
if len(splitted_line) > 3:
obj['name'] = splitted_line[3].strip("'")
obj['state'] = 'present'
objs.append(obj)
return objs
def check_declarative_intent_params(want, module, result):
have = None
obj_interface = list()
is_delay = False
for w in want:
if w.get('associated_interfaces') is None:
continue
if result['changed'] and not is_delay:
time.sleep(module.params['delay'])
is_delay = True
if have is None:
have = map_config_to_obj(module)
obj_in_have = search_obj_in_list(w['vlan_id'], have)
if obj_in_have:
for obj in obj_in_have:
obj_interface.extend(obj['interfaces'])
for w in want:
if w.get('associated_interfaces') is None:
continue
for i in w['associated_interfaces']:
if (set(obj_interface) - set(w['associated_interfaces'])) != set([]):
module.fail_json(msg='Interface {0} not configured on vlan {1}'.format(i, w['vlan_id']))
def main():
""" main entry point for module execution
"""
element_spec = dict(
vlan_id=dict(type='int'),
name=dict(),
address=dict(),
interfaces=dict(type='list'),
associated_interfaces=dict(type='list'),
delay=dict(default=10, type='int'),
state=dict(default='present',
choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(vyos_argument_spec)
required_one_of = [['vlan_id', 'aggregate'],
['aggregate', 'interfaces', 'associated_interfaces']]
mutually_exclusive = [['vlan_id', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
check_declarative_intent_params(want, module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
DazWorrall/ansible | refs/heads/devel | lib/ansible/modules/network/cloudengine/ce_eth_trunk.py | 27 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_eth_trunk
version_added: "2.4"
short_description: Manages Eth-Trunk interfaces on HUAWEI CloudEngine switches.
description:
- Manages Eth-Trunk specific configuration parameters on HUAWEI CloudEngine switches.
author: QijunPan (@CloudEngine-Ansible)
notes:
- C(state=absent) removes the Eth-Trunk config and interface if it
already exists. If members to be removed are not explicitly
passed, all existing members (if any), are removed,
and Eth-Trunk removed.
- Members must be a list.
options:
trunk_id:
description:
- Eth-Trunk interface number.
The value is an integer.
The value range depends on the assign forward eth-trunk mode command.
When 256 is specified, the value ranges from 0 to 255.
When 512 is specified, the value ranges from 0 to 511.
When 1024 is specified, the value ranges from 0 to 1023.
required: true
mode:
description:
- Specifies the working mode of an Eth-Trunk interface.
required: false
default: null
choices: ['manual','lacp-dynamic','lacp-static']
min_links:
description:
- Specifies the minimum number of Eth-Trunk member links in the Up state.
The value is an integer ranging from 1 to the maximum number of interfaces
that can be added to a Eth-Trunk interface.
required: false
default: null
hash_type:
description:
- Hash algorithm used for load balancing among Eth-Trunk member interfaces.
required: false
default: null
choices: ['src-dst-ip', 'src-dst-mac', 'enhanced', 'dst-ip', 'dst-mac', 'src-ip', 'src-mac']
members:
description:
- List of interfaces that will be managed in a given Eth-Trunk.
The interface name must be full name.
required: false
default: null
force:
description:
- When true it forces Eth-Trunk members to match what is
declared in the members param. This can be used to remove
members.
required: false
default: false
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: eth_trunk module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Ensure Eth-Trunk100 is created, add two members, and set to mode lacp-static
ce_eth_trunk:
trunk_id: 100
members: ['10GE1/0/24','10GE1/0/25']
mode: 'lacp-static'
state: present
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"trunk_id": "100", "members": ['10GE1/0/24','10GE1/0/25'], "mode": "lacp-static"}
existing:
description: k/v pairs of existing Eth-Trunk
returned: always
type: dict
sample: {"trunk_id": "100", "hash_type": "mac", "members_detail": [
{"memberIfName": "10GE1/0/25", "memberIfState": "Down"}],
"min_links": "1", "mode": "manual"}
end_state:
description: k/v pairs of Eth-Trunk info after module execution
returned: always
type: dict
sample: {"trunk_id": "100", "hash_type": "mac", "members_detail": [
{"memberIfName": "10GE1/0/24", "memberIfState": "Down"},
{"memberIfName": "10GE1/0/25", "memberIfState": "Down"}],
"min_links": "1", "mode": "lacp-static"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["interface Eth-Trunk 100",
"mode lacp-static",
"interface 10GE1/0/25",
"eth-trunk 100"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_TRUNK = """
<filter type="subtree">
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<TrunkIfs>
<TrunkIf>
<ifName>Eth-Trunk%s</ifName>
<minUpNum></minUpNum>
<maxUpNum></maxUpNum>
<trunkType></trunkType>
<hashType></hashType>
<workMode></workMode>
<upMemberIfNum></upMemberIfNum>
<memberIfNum></memberIfNum>
<TrunkMemberIfs>
<TrunkMemberIf>
<memberIfName></memberIfName>
<memberIfState></memberIfState>
</TrunkMemberIf>
</TrunkMemberIfs>
</TrunkIf>
</TrunkIfs>
</ifmtrunk>
</filter>
"""
CE_NC_XML_BUILD_TRUNK_CFG = """
<config>
<ifmtrunk xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<TrunkIfs>%s</TrunkIfs>
</ifmtrunk>
</config>
"""
CE_NC_XML_DELETE_TRUNK = """
<TrunkIf operation="delete">
<ifName>Eth-Trunk%s</ifName>
</TrunkIf>
"""
CE_NC_XML_CREATE_TRUNK = """
<TrunkIf operation="merge">
<ifName>Eth-Trunk%s</ifName>
</TrunkIf>
"""
CE_NC_XML_MERGE_MINUPNUM = """
<TrunkIf operation="merge">
<ifName>Eth-Trunk%s</ifName>
<minUpNum>%s</minUpNum>
</TrunkIf>
"""
CE_NC_XML_MERGE_HASHTYPE = """
<TrunkIf operation="merge">
<ifName>Eth-Trunk%s</ifName>
<hashType>%s</hashType>
</TrunkIf>
"""
CE_NC_XML_MERGE_WORKMODE = """
<TrunkIf operation="merge">
<ifName>Eth-Trunk%s</ifName>
<workMode>%s</workMode>
</TrunkIf>
"""
CE_NC_XML_BUILD_MEMBER_CFG = """
<TrunkIf>
<ifName>Eth-Trunk%s</ifName>
<TrunkMemberIfs>%s</TrunkMemberIfs>
</TrunkIf>
"""
CE_NC_XML_MERGE_MEMBER = """
<TrunkMemberIf operation="merge">
<memberIfName>%s</memberIfName>
</TrunkMemberIf>
"""
CE_NC_XML_DELETE_MEMBER = """
<TrunkMemberIf operation="delete">
<memberIfName>%s</memberIfName>
</TrunkMemberIf>
"""
MODE_XML2CLI = {"Manual": "manual", "Dynamic": "lacp-dynamic", "Static": "lacp-static"}
MODE_CLI2XML = {"manual": "Manual", "lacp-dynamic": "Dynamic", "lacp-static": "Static"}
HASH_XML2CLI = {"IP": "src-dst-ip", "MAC": "src-dst-mac", "Enhanced": "enhanced",
"Desip": "dst-ip", "Desmac": "dst-mac", "Sourceip": "src-ip", "Sourcemac": "src-mac"}
HASH_CLI2XML = {"src-dst-ip": "IP", "src-dst-mac": "MAC", "enhanced": "Enhanced",
"dst-ip": "Desip", "dst-mac": "Desmac", "src-ip": "Sourceip", "src-mac": "Sourcemac"}
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def mode_xml_to_cli_str(mode):
"""convert mode to cli format string"""
if not mode:
return ""
return MODE_XML2CLI.get(mode)
def hash_type_xml_to_cli_str(hash_type):
"""convert trunk hash type netconf xml to cli format string"""
if not hash_type:
return ""
return HASH_XML2CLI.get(hash_type)
class EthTrunk(object):
"""
Manages Eth-Trunk interfaces.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.__init_module__()
# module input info
self.trunk_id = self.module.params['trunk_id']
self.mode = self.module.params['mode']
self.min_links = self.module.params['min_links']
self.hash_type = self.module.params['hash_type']
self.members = self.module.params['members']
self.state = self.module.params['state']
self.force = self.module.params['force']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
# interface info
self.trunk_info = dict()
def __init_module__(self):
""" init module """
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def netconf_set_config(self, xml_str, xml_name):
""" netconf set config """
recv_xml = set_nc_config(self.module, xml_str)
if "<ok/>" not in recv_xml:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_trunk_dict(self, trunk_id):
""" get one interface attributes dict."""
trunk_info = dict()
conf_str = CE_NC_GET_TRUNK % trunk_id
recv_xml = get_nc_config(self.module, conf_str)
if "<data/>" in recv_xml:
return trunk_info
# get trunk base info
base = re.findall(
r'.*<ifName>(.*)</ifName>.*\s*'
r'<minUpNum>(.*)</minUpNum>.*\s*'
r'<maxUpNum>(.*)</maxUpNum>.*\s*'
r'<trunkType>(.*)</trunkType>.*\s*'
r'<hashType>(.*)</hashType>.*\s*'
r'<workMode>(.*)</workMode>.*\s*'
r'<upMemberIfNum>(.*)</upMemberIfNum>.*\s*'
r'<memberIfNum>(.*)</memberIfNum>.*', recv_xml)
if base:
trunk_info = dict(ifName=base[0][0],
trunkId=base[0][0].lower().replace("eth-trunk", "").replace(" ", ""),
minUpNum=base[0][1],
maxUpNum=base[0][2],
trunkType=base[0][3],
hashType=base[0][4],
workMode=base[0][5],
upMemberIfNum=base[0][6],
memberIfNum=base[0][7])
# get trunk member interface info
member = re.findall(
r'.*<memberIfName>(.*)</memberIfName>.*\s*'
r'<memberIfState>(.*)</memberIfState>.*', recv_xml)
trunk_info["TrunkMemberIfs"] = list()
for mem in member:
trunk_info["TrunkMemberIfs"].append(
dict(memberIfName=mem[0], memberIfState=mem[1]))
return trunk_info
def is_member_exist(self, ifname):
"""is trunk member exist"""
if not self.trunk_info["TrunkMemberIfs"]:
return False
for mem in self.trunk_info["TrunkMemberIfs"]:
if ifname.replace(" ", "").upper() == mem["memberIfName"].replace(" ", "").upper():
return True
return False
def get_mode_xml_str(self):
"""trunk mode netconf xml fromat string"""
return MODE_CLI2XML.get(self.mode)
def get_hash_type_xml_str(self):
"""trunk hash type netconf xml format string"""
return HASH_CLI2XML.get(self.hash_type)
def create_eth_trunk(self):
"""Create Eth-Trunk interface"""
xml_str = CE_NC_XML_CREATE_TRUNK % self.trunk_id
self.updates_cmd.append("interface Eth-Trunk %s" % self.trunk_id)
if self.hash_type:
self.updates_cmd.append("load-balance %s" % self.hash_type)
xml_str += CE_NC_XML_MERGE_HASHTYPE % (self.trunk_id, self.get_hash_type_xml_str())
if self.mode:
self.updates_cmd.append("mode %s" % self.mode)
xml_str += CE_NC_XML_MERGE_WORKMODE % (self.trunk_id, self.get_mode_xml_str())
if self.min_links:
self.updates_cmd.append("least active-linknumber %s" % self.min_links)
xml_str += CE_NC_XML_MERGE_MINUPNUM % (self.trunk_id, self.min_links)
if self.members:
mem_xml = ""
for mem in self.members:
mem_xml += CE_NC_XML_MERGE_MEMBER % mem.upper()
self.updates_cmd.append("interface %s" % mem)
self.updates_cmd.append("eth-trunk %s" % self.trunk_id)
xml_str += CE_NC_XML_BUILD_MEMBER_CFG % (self.trunk_id, mem_xml)
cfg_xml = CE_NC_XML_BUILD_TRUNK_CFG % xml_str
self.netconf_set_config(cfg_xml, "CREATE_TRUNK")
self.changed = True
def delete_eth_trunk(self):
"""Delete Eth-Trunk interface and remove all member"""
if not self.trunk_info:
return
xml_str = ""
mem_str = ""
if self.trunk_info["TrunkMemberIfs"]:
for mem in self.trunk_info["TrunkMemberIfs"]:
mem_str += CE_NC_XML_DELETE_MEMBER % mem["memberIfName"]
self.updates_cmd.append("interface %s" % mem["memberIfName"])
self.updates_cmd.append("undo eth-trunk")
if mem_str:
xml_str += CE_NC_XML_BUILD_MEMBER_CFG % (self.trunk_id, mem_str)
xml_str += CE_NC_XML_DELETE_TRUNK % self.trunk_id
self.updates_cmd.append("undo interface Eth-Trunk %s" % self.trunk_id)
cfg_xml = CE_NC_XML_BUILD_TRUNK_CFG % xml_str
self.netconf_set_config(cfg_xml, "DELETE_TRUNK")
self.changed = True
def remove_member(self):
"""delete trunk member"""
if not self.members:
return
change = False
mem_xml = ""
xml_str = ""
for mem in self.members:
if self.is_member_exist(mem):
mem_xml += CE_NC_XML_DELETE_MEMBER % mem.upper()
self.updates_cmd.append("interface %s" % mem)
self.updates_cmd.append("undo eth-trunk")
if mem_xml:
xml_str += CE_NC_XML_BUILD_MEMBER_CFG % (self.trunk_id, mem_xml)
change = True
if not change:
return
cfg_xml = CE_NC_XML_BUILD_TRUNK_CFG % xml_str
self.netconf_set_config(cfg_xml, "REMOVE_TRUNK_MEMBER")
self.changed = True
def merge_eth_trunk(self):
"""Create or merge Eth-Trunk"""
change = False
xml_str = ""
self.updates_cmd.append("interface Eth-Trunk %s" % self.trunk_id)
if self.hash_type and self.get_hash_type_xml_str() != self.trunk_info["hashType"]:
self.updates_cmd.append("load-balance %s" %
self.hash_type)
xml_str += CE_NC_XML_MERGE_HASHTYPE % (
self.trunk_id, self.get_hash_type_xml_str())
change = True
if self.min_links and self.min_links != self.trunk_info["minUpNum"]:
self.updates_cmd.append(
"least active-linknumber %s" % self.min_links)
xml_str += CE_NC_XML_MERGE_MINUPNUM % (
self.trunk_id, self.min_links)
change = True
if self.mode and self.get_mode_xml_str() != self.trunk_info["workMode"]:
self.updates_cmd.append("mode %s" % self.mode)
xml_str += CE_NC_XML_MERGE_WORKMODE % (
self.trunk_id, self.get_mode_xml_str())
change = True
if not change:
self.updates_cmd.pop() # remove 'interface Eth-Trunk' command
# deal force:
# When true it forces Eth-Trunk members to match
# what is declared in the members param.
if self.force and self.trunk_info["TrunkMemberIfs"]:
mem_xml = ""
for mem in self.trunk_info["TrunkMemberIfs"]:
if not self.members or mem["memberIfName"].replace(" ", "").upper() not in self.members:
mem_xml += CE_NC_XML_DELETE_MEMBER % mem["memberIfName"]
self.updates_cmd.append("interface %s" % mem["memberIfName"])
self.updates_cmd.append("undo eth-trunk")
if mem_xml:
xml_str += CE_NC_XML_BUILD_MEMBER_CFG % (self.trunk_id, mem_xml)
change = True
if self.members:
mem_xml = ""
for mem in self.members:
if not self.is_member_exist(mem):
mem_xml += CE_NC_XML_MERGE_MEMBER % mem.upper()
self.updates_cmd.append("interface %s" % mem)
self.updates_cmd.append("eth-trunk %s" % self.trunk_id)
if mem_xml:
xml_str += CE_NC_XML_BUILD_MEMBER_CFG % (
self.trunk_id, mem_xml)
change = True
if not change:
return
cfg_xml = CE_NC_XML_BUILD_TRUNK_CFG % xml_str
self.netconf_set_config(cfg_xml, "MERGE_TRUNK")
self.changed = True
def check_params(self):
"""Check all input params"""
# trunk_id check
if not self.trunk_id.isdigit():
self.module.fail_json(msg='The parameter of trunk_id is invalid.')
# min_links check
if self.min_links and not self.min_links.isdigit():
self.module.fail_json(msg='The parameter of min_links is invalid.')
# members check and convert members to upper
if self.members:
for mem in self.members:
if not get_interface_type(mem.replace(" ", "")):
self.module.fail_json(
msg='The parameter of members is invalid.')
for mem_id in range(len(self.members)):
self.members[mem_id] = self.members[mem_id].replace(" ", "").upper()
def get_proposed(self):
"""get proposed info"""
self.proposed["trunk_id"] = self.trunk_id
self.proposed["mode"] = self.mode
if self.min_links:
self.proposed["min_links"] = self.min_links
self.proposed["hash_type"] = self.hash_type
if self.members:
self.proposed["members"] = self.members
self.proposed["state"] = self.state
self.proposed["force"] = self.force
def get_existing(self):
"""get existing info"""
if not self.trunk_info:
return
self.existing["trunk_id"] = self.trunk_info["trunkId"]
self.existing["min_links"] = self.trunk_info["minUpNum"]
self.existing["hash_type"] = hash_type_xml_to_cli_str(self.trunk_info["hashType"])
self.existing["mode"] = mode_xml_to_cli_str(self.trunk_info["workMode"])
self.existing["members_detail"] = self.trunk_info["TrunkMemberIfs"]
def get_end_state(self):
"""get end state info"""
trunk_info = self.get_trunk_dict(self.trunk_id)
if not trunk_info:
return
self.end_state["trunk_id"] = trunk_info["trunkId"]
self.end_state["min_links"] = trunk_info["minUpNum"]
self.end_state["hash_type"] = hash_type_xml_to_cli_str(trunk_info["hashType"])
self.end_state["mode"] = mode_xml_to_cli_str(trunk_info["workMode"])
self.end_state["members_detail"] = trunk_info["TrunkMemberIfs"]
def work(self):
"""worker"""
self.check_params()
self.trunk_info = self.get_trunk_dict(self.trunk_id)
self.get_existing()
self.get_proposed()
# deal present or absent
if self.state == "present":
if not self.trunk_info:
# create
self.create_eth_trunk()
else:
# merge trunk
self.merge_eth_trunk()
else:
if self.trunk_info:
if not self.members:
# remove all members and delete trunk
self.delete_eth_trunk()
else:
# remove some trunk members
self.remove_member()
else:
self.module.fail_json(msg='Error: Eth-Trunk does not exist.')
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
trunk_id=dict(required=True),
mode=dict(required=False,
choices=['manual', 'lacp-dynamic', 'lacp-static'],
type='str'),
min_links=dict(required=False, type='str'),
hash_type=dict(required=False,
choices=['src-dst-ip', 'src-dst-mac', 'enhanced',
'dst-ip', 'dst-mac', 'src-ip', 'src-mac'],
type='str'),
members=dict(required=False, default=None, type='list'),
force=dict(required=False, default=False, type='bool'),
state=dict(required=False, default='present',
choices=['present', 'absent'])
)
argument_spec.update(ce_argument_spec)
module = EthTrunk(argument_spec)
module.work()
if __name__ == '__main__':
main()
|
byronmccollum/rpc-openstack | refs/heads/master | maas/plugins/service_api_local_check.py | 11 | #!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import ipaddr
from maas_common import get_auth_ref
from maas_common import get_keystone_client
from maas_common import metric
from maas_common import metric_bool
from maas_common import print_output
from maas_common import status_ok
import requests
from requests import exceptions as exc
def check(args):
headers = {'Content-type': 'application/json'}
path_options = {}
if args.auth:
auth_ref = get_auth_ref()
keystone = get_keystone_client(auth_ref)
auth_token = keystone.auth_token
project_id = keystone.project_id
headers['auth_token'] = auth_token
path_options['project_id'] = project_id
scheme = args.ssl and 'https' or 'http'
endpoint = '{scheme}://{ip}:{port}'.format(ip=args.ip, port=args.port,
scheme=scheme)
if args.version is not None:
path_options['version'] = args.version
path = args.path.format(path_options)
s = requests.Session()
s.headers.update(headers)
if path and not path.startswith('/'):
url = '/'.join((endpoint, path))
else:
url = ''.join((endpoint, path))
try:
r = s.get(url, verify=False, timeout=10)
except (exc.ConnectionError,
exc.HTTPError,
exc.Timeout):
up = False
else:
up = True
status_ok()
metric_bool('{name}_api_local_status'.format(name=args.name), up)
if up and r.ok:
milliseconds = r.elapsed.total_seconds() * 1000
metric('{name}_api_local_response_time'.format(name=args.name),
'double',
'%.3f' % milliseconds,
'ms')
def main(args):
check(args)
if __name__ == "__main__":
with print_output():
parser = argparse.ArgumentParser(description='Check service is up.')
parser.add_argument('name', help='Service name.')
parser.add_argument('ip', type=ipaddr.IPv4Address,
help='Service IP address.')
parser.add_argument('port', type=int, help='Service port.')
parser.add_argument('--path', default='',
help='Service API path, this should include '
'placeholders for the version "{version}" and'
' tenant ID "{tenant_id}" if required.')
parser.add_argument('--auth', action='store_true', default=False,
help='Does this API check require auth?')
parser.add_argument('--ssl', action='store_true', default=False,
help='Should SSL be used.')
parser.add_argument('--version', help='Service API version.')
args = parser.parse_args()
main(args)
|
gdi2290/django | refs/heads/master | tests/proxy_model_inheritance/app1/models.py | 515 | # TODO: why can't I make this ..app2
from app2.models import NiceModel
class ProxyModel(NiceModel):
class Meta:
proxy = True
|
storm-computers/odoo | refs/heads/9.0 | openerp/addons/test_limits/__init__.py | 2355 | # -*- coding: utf-8 -*-
import models
|
nightjean/Deep-Learning | refs/heads/master | tensorflow/python/ops/lookup_ops.py | 15 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
"""Lookup operations."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_lookup_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
# TODO(yleon): Remove this function.
@deprecated("2017-03-02", "Use `tf.tables_initializer` instead.")
def initialize_all_tables(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
return tables_initializer(name)
def tables_initializer(name="init_all_tables"):
"""Returns an Op that initializes all tables of the default graph.
Args:
name: Optional name for the initialization op.
Returns:
An Op that initializes all tables. Note that if there are
not tables the returned Op is a NoOp.
"""
initializers = ops.get_collection(ops.GraphKeys.TABLE_INITIALIZERS)
if initializers:
return control_flow_ops.group(*initializers, name=name)
return control_flow_ops.no_op(name=name)
def _check_table_dtypes(table, key_dtype, value_dtype):
"""Check that the given key_dtype and value_dtype matches the table dtypes.
Args:
table: The table to check types against to.
key_dtype: The key data type to check.
value_dtype: The value data type to check.
Raises:
TypeError: when 'key_dtype' or 'value_dtype' doesn't match the table data
types.
"""
if key_dtype != table.key_dtype:
raise TypeError("Invalid key dtype, expected %s but got %s." %
(table.key_dtype, key_dtype))
if value_dtype != table.value_dtype:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(table.value_dtype, value_dtype))
class LookupInterface(object):
"""Represent a lookup table that persists across different steps."""
def __init__(self, key_dtype, value_dtype, name):
"""Construct a lookup table interface.
Args:
key_dtype: The table key type.
value_dtype: The table value type.
name: A name for the operation (optional).
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
self._name = name
@property
def key_dtype(self):
"""The table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The table value dtype."""
return self._value_dtype
@property
def name(self):
"""The name of the table."""
return self._name
@property
def init(self):
"""The table initialization op."""
raise NotImplementedError
def size(self, name=None):
"""Compute the number of elements in this table."""
raise NotImplementedError
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values."""
raise NotImplementedError
class InitializableLookupTableBase(LookupInterface):
"""Initializable lookup table interface.
An initializable lookup tables persist across different steps.
"""
def __init__(self, table_ref, default_value, initializer):
"""Construct a table object from a table reference.
If requires a table initializer object (subclass of `TableInitializerBase`).
It provides the table key and value types, as well as the op to initialize
the table. The caller is responsible to execute the initialization op.
Args:
table_ref: The table reference, i.e. the output of the lookup table ops.
default_value: The value to use if a key is missing in the table.
initializer: The table initializer to use.
"""
super(InitializableLookupTableBase,
self).__init__(initializer.key_dtype, initializer.value_dtype,
table_ref.op.name.split("/")[-1])
self._table_ref = table_ref
self._default_value = ops.convert_to_tensor(
default_value, dtype=self._value_dtype)
self._default_value.get_shape().merge_with(tensor_shape.scalar())
self._init = initializer.initialize(self)
@property
def table_ref(self):
"""Get the underlying table reference."""
return self._table_ref
@property
def default_value(self):
"""The default value of the table."""
return self._default_value
@property
def init(self):
"""The table initialization op."""
return self._init
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as scope:
# pylint: disable=protected-access
return gen_lookup_ops._lookup_table_size_v2(self._table_ref, name=scope)
# pylint: enable=protected-access
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: A name for the operation (optional).
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` or `default_value` doesn't match the table data
types.
"""
key_tensor = keys
if isinstance(keys, sparse_tensor.SparseTensor):
key_tensor = keys.values
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(name, "%s_Lookup" % self._name,
(self._table_ref, key_tensor,
self._default_value)) as scope:
# pylint: disable=protected-access
values = gen_lookup_ops._lookup_table_find_v2(
self._table_ref, key_tensor, self._default_value, name=scope)
# pylint: enable=protected-access
values.set_shape(key_tensor.get_shape())
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, values, keys.dense_shape)
else:
return values
class HashTable(InitializableLookupTableBase):
"""A generic hash table implementation.
Example usage:
```python
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values), -1)
out = table.lookup(input_tensor).
table.init.run()
print out.eval()
```
"""
def __init__(self, initializer, default_value, shared_name=None, name=None):
"""Creates a non-initialized `HashTable` object.
Creates a table, the type of its keys and values are specified by the
initializer.
Before using the table you will have to initialize it. After initialization
the table will be immutable.
Args:
initializer: The table initializer to use. See `HashTable` kernel for
supported key and value types.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
Returns:
A `HashTable` object.
"""
with ops.name_scope(name, "hash_table", (initializer,
default_value)) as scope:
# pylint: disable=protected-access
table_ref = gen_lookup_ops._hash_table_v2(
shared_name=shared_name,
key_dtype=initializer.key_dtype,
value_dtype=initializer.value_dtype,
name=scope)
# pylint: enable=protected-access
super(HashTable, self).__init__(table_ref, default_value, initializer)
class TableInitializerBase(object):
"""Base class for lookup table initializers."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
raise NotImplementedError
class KeyValueTensorInitializer(TableInitializerBase):
"""Table initializers given `keys` and `values` tensors."""
def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):
"""Constructs a table initializer object based on keys and values tensors.
Args:
keys: The tensor for the keys.
values: The tensor for the values.
key_dtype: The `keys` data type. Used when `keys` is a python array.
value_dtype: The `values` data type. Used when `values` is a python array.
name: A name for the operation (optional).
"""
with ops.name_scope(name, "key_value_init", [keys, values]) as scope:
self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name="keys")
self._values = ops.convert_to_tensor(
values, dtype=value_dtype, name="values")
self._name = scope
super(KeyValueTensorInitializer, self).__init__(self._keys.dtype,
self._values.dtype)
def initialize(self, table):
"""Initializes the given `table` with `keys` and `values` tensors.
Args:
table: The table to initialize.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self._keys.dtype, self._values.dtype)
with ops.name_scope(
self._name, values=(table.table_ref, self._keys,
self._values)) as scope:
# pylint: disable=protected-access
init_op = gen_lookup_ops._initialize_table_v2(
table.table_ref, self._keys, self._values, name=scope)
# pylint: enable=protected-access
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
return init_op
class TextFileIndex(object):
WHOLE_LINE = -2
LINE_NUMBER = -1
class TextFileInitializer(TableInitializerBase):
"""Table initializers from a text file.
This initializer assigns one entry in the table for each line in the file.
The key and value type of the table to initialize is given by `key_dtype` and
`value_dtype`.
The key and value content to get from each line is specified by
the `key_index` and `value_index`.
* `TextFileIndex.LINE_NUMBER` means use the line number starting from zero,
expects data type int64.
* `TextFileIndex.WHOLE_LINE` means use the whole line content, expects data
type string.
* A value `>=0` means use the index (starting at zero) of the split line based
on `delimiter`.
For example if we have a file with the following content:
```
emerson 10
lake 20
palmer 30
```
The following snippet initializes a table with the first column as keys and
second column as values:
* `emerson -> 10`
* `lake -> 20`
* `palmer -> 30`
```python
table = tf.contrib.lookup.HashTable(tf.contrib.lookup.TextFileInitializer(
"test.txt", tf.string, 0, tf.int64, 1, delimiter=" "), -1)
...
table.init.run()
```
Similarly to initialize the whole line as keys and the line number as values.
* `emerson 10 -> 0`
* `lake 20 -> 1`
* `palmer 30 -> 2`
```python
table = tf.contrib.lookup.HashTable(tf.contrib.lookup.TextFileInitializer(
"test.txt", tf.string, tf.contrib.lookup.TextFileIndex.WHOLE_LINE,
tf.int64, tf.contrib.lookup.TextFileIndex.LINE_NUMBER, delimiter=" "), -1)
...
table.init.run()
```
"""
def __init__(self,
filename,
key_dtype,
key_index,
value_dtype,
value_index,
vocab_size=None,
delimiter="\t",
name=None):
"""Constructs a table initializer object to populate from a text file.
It generates one key-value pair per line. The type of table key and
value are specified by `key_dtype` and `value_dtype`, respectively.
Similarly the content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_dtype: The `key` data type.
key_index: the index that represents information of a line to get the
table 'key' values from.
value_dtype: The `value` data type.
value_index: the index that represents information of a line to get the
table 'value' values from.'
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: A name for the operation (optional).
Raises:
ValueError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
if not isinstance(filename, ops.Tensor) and not filename:
raise ValueError("Filename required for %s." % name)
key_dtype = dtypes.as_dtype(key_dtype)
value_dtype = dtypes.as_dtype(value_dtype)
if key_index < -2:
raise ValueError("Invalid key index %s." % (key_index))
if key_index == TextFileIndex.LINE_NUMBER and key_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Keys must be dtype %s, got %s." %
(dtypes.int64, key_dtype))
if ((key_index == TextFileIndex.WHOLE_LINE) and
(not key_dtype.is_integer) and (key_dtype != dtypes.string)):
raise ValueError(
"Signature mismatch. Keys must be integer or string, got %s." %
key_dtype)
if value_index < -2:
raise ValueError("Invalid value index %s." % (value_index))
if value_index == TextFileIndex.LINE_NUMBER and value_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.int64, value_dtype))
if value_index == TextFileIndex.WHOLE_LINE and value_dtype != dtypes.string:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.string, value_dtype))
if (vocab_size is not None) and (vocab_size <= 0):
raise ValueError("Invalid vocab_size %s." % vocab_size)
self._filename = filename
self._key_index = key_index
self._value_index = value_index
self._vocab_size = vocab_size
self._delimiter = delimiter
self._name = name
super(TextFileInitializer, self).__init__(key_dtype, value_dtype)
def initialize(self, table):
"""Initializes the table from a text file.
Args:
table: The table to be initialized.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
_check_table_dtypes(table, self.key_dtype, self.value_dtype)
with ops.name_scope(self._name, "text_file_init",
(table.table_ref,)) as scope:
filename = ops.convert_to_tensor(
self._filename, dtypes.string, name="asset_filepath")
# pylint: disable=protected-access
init_op = gen_lookup_ops._initialize_table_from_text_file_v2(
table.table_ref,
filename,
self._key_index,
self._value_index,
-1 if self._vocab_size is None else self._vocab_size,
self._delimiter,
name=scope)
# pylint: enable=protected-access
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename)
return init_op
class TextFileStringTableInitializer(TextFileInitializer):
"""Table initializer for `int64` IDs to string tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.LINE_NUMBER,
value_column_index=TextFileIndex.WHOLE_LINE,
vocab_size=None,
delimiter="\t",
name="text_file_string_table_init"):
"""Constructs an initializer for an id-to-string table from a text file.
It populates a table that its key and value types are int64 and string,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by `key_column_index`
and `value_column_index`.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the keys
from. The default is 0 that represents the whole line content.
value_column_index: The column index from the text file to get the
values from. The default is to use the line number, starting from zero.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileStringTableInitializer, self).__init__(
filename,
dtypes.int64,
key_column_index,
dtypes.string,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class TextFileIdTableInitializer(TextFileInitializer):
"""Table initializer for string to `int64` IDs tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.WHOLE_LINE,
value_column_index=TextFileIndex.LINE_NUMBER,
vocab_size=None,
delimiter="\t",
name="text_file_id_table_init",
key_dtype=dtypes.string):
"""Constructs an initializer for an string-to-id table from a text file.
It populates a table that its key and value types are string and int64,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the `key`
values from. The default is to use the line number, starting from zero.
value_column_index: The column index from the text file ro get the `value`
values from. The default is 0 that represents the whole line content.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
key_dtype: The `key` data type.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileIdTableInitializer, self).__init__(
filename,
key_dtype,
key_column_index,
dtypes.int64,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class HasherSpec(collections.namedtuple("HasherSpec", ["hasher", "key"])):
"""A structure for the spec of the hashing function to use for hash buckets.
`hasher` is the name of the hashing function to use (eg. "fasthash",
"stronghash").
`key` is optional and specify the key to use for the hash function if
supported, currently only used by a strong hash.
Fields:
hasher: The hasher name to use.
key: The key to be used by the hashing function, if required.
"""
__slots__ = ()
FastHashSpec = HasherSpec("fasthash", None) # pylint: disable=invalid-name
class StrongHashSpec(HasherSpec):
"""A structure to specify a key of the strong keyed hash spec.
The strong hash requires a `key`, which is a list of 2 unsigned integer
numbers. These should be non-zero; random numbers generated from random.org
would be a fine choice.
Fields:
key: The key to be used by the keyed hashing function.
"""
__slots__ = ()
def __new__(cls, key):
if len(key) != 2:
raise ValueError("key must have size 2, got %s." % len(key))
if not isinstance(key[0], compat.integral_types) or not isinstance(
key[1], compat.integral_types):
raise TypeError("Invalid key %s. Must be unsigned integer values." % key)
return super(cls, StrongHashSpec).__new__(cls, "stronghash", key)
def _as_string(tensor):
if dtypes.string == tensor.dtype.base_dtype:
return tensor
return string_ops.as_string(tensor)
class IdTableWithHashBuckets(LookupInterface):
"""String to Id table wrapper that assigns out-of-vocabulary keys to buckets.
For example, if an instance of `IdTableWithHashBuckets` is initialized with a
string-to-id table that maps:
- emerson -> 0
- lake -> 1
- palmer -> 2
The `IdTableWithHashBuckets` object will performs the following mapping:
- emerson -> 0
- lake -> 1
- palmer -> 2
- <other term> -> bucket id between 3 and 3 + num_oov_buckets, calculated by:
hash(<term>) % num_oov_buckets + vocab_size
If input_tensor is ["emerson", "lake", "palmer", "king", "crimson"],
the lookup result is [0, 1, 2, 4, 7]
If `table` is None, only out-of-vocabulary buckets are used.
Example usage:
```python
num_oov_buckets = 3
input_tensor = tf.constant(["emerson", "lake", "palmer", "king", "crimnson"])
table = tf.IdTableWithHashBuckets(
tf.HashTable(tf.TextFileIdTableInitializer(filename), default_value),
num_oov_buckets)
out = table.lookup(input_tensor).
table.init.run()
print out.eval()
```
The hash function used for generating out-of-vocabulary buckets ID is handled
by `hasher_spec`.
"""
def __init__(self,
table,
num_oov_buckets,
hasher_spec=FastHashSpec,
name=None,
key_dtype=None):
"""Construct a `IdTableWithHashBuckets` object.
Args:
table: Table that maps `tf.string` or `tf.int64` keys to `tf.int64` ids.
num_oov_buckets: Number of buckets to use for out-of-vocabulary keys.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets (optional).
name: A name for the operation (optional).
key_dtype: Data type of keys passed to `lookup`. Defaults to
`table.key_dtype` if `table` is specified, otherwise `tf.string`.
Must be string or integer, and must be castable to `table.key_dtype`.
Raises:
ValueError: when `table` in None and `num_oov_buckets` is not positive.
TypeError: when `hasher_spec` is invalid.
"""
# If a name ends with a '/' it is a "name scope", remove all trailing '/'
# characters to use as table name.
if name:
name = name.rstrip("/")
if table:
if key_dtype is None:
key_dtype = table.key_dtype
supported_table_key_dtypes = (dtypes.int64, dtypes.string)
if table.key_dtype not in supported_table_key_dtypes:
raise TypeError("Invalid key dtype, expected one of %s, but got %s." %
(supported_table_key_dtypes, key_dtype))
if table.key_dtype.is_integer != key_dtype.is_integer:
raise TypeError("Invalid key dtype, expected %s but got %s." %
("integer" if key_dtype.is_integer else "non-integer",
table.key_dtype))
if table.value_dtype != dtypes.int64:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(dtypes.int64, table.value_dtype))
self._table = table
name = name or self._table.name
else:
if num_oov_buckets <= 0:
raise ValueError("oov_buckets must be > 0 if no table is supplied.")
key_dtype = dtypes.string if key_dtype is None else key_dtype
self._table = None
name = name or "hash_bucket"
if (not key_dtype.is_integer) and (dtypes.string != key_dtype):
raise TypeError(
"Invalid key_dtype, expected integer or string, got %s." % key_dtype)
self._num_oov_buckets = num_oov_buckets
if not isinstance(hasher_spec, HasherSpec):
raise TypeError(
"hasher_spec must be of type HasherSpec, got %s" % hasher_spec)
self._hasher_spec = hasher_spec
super(IdTableWithHashBuckets, self).__init__(key_dtype, dtypes.int64,
name.split("/")[-1])
@property
def init(self):
"""The table initialization op."""
if self._table:
return self._table.init
with ops.name_scope(None, "init"):
return control_flow_ops.no_op()
def size(self, name=None):
"""Compute the number of elements in this table."""
with ops.name_scope(name, "%s_Size" % self.name) as scope:
if self._table:
tsize = self._table.size(scope)
else:
tsize = ops.convert_to_tensor(0, dtype=dtypes.int64)
return tsize + self._num_oov_buckets
def _get_string_to_hash_bucket_fn(self, hasher_spec):
"""Returns the string_to_hash_bucket op to use based on `hasher_spec`."""
if not isinstance(hasher_spec, HasherSpec):
raise TypeError("hasher_spec must be of type HasherSpec %s" % hasher_spec)
if hasher_spec.hasher == "fasthash":
return string_ops.string_to_hash_bucket_fast
if hasher_spec.hasher == "legacy":
return string_ops.string_to_hash_bucket
if hasher_spec.hasher == "stronghash":
return functools.partial(
string_ops.string_to_hash_bucket_strong, key=hasher_spec.key)
raise ValueError("Unknown hasher %s" % hasher_spec.hasher)
def lookup(self, keys, name=None):
"""Looks up `keys` in the table, outputs the corresponding values.
It assigns out-of-vocabulary keys to buckets based in their hashes.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` doesn't match the table key data type.
"""
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
values = keys
if isinstance(keys, sparse_tensor.SparseTensor):
values = keys.values
if self._table and (self._table.key_dtype.base_dtype == dtypes.int64):
values = math_ops.to_int64(values)
if self._num_oov_buckets == 0:
ids = self._table.lookup(values, name=name)
else:
# TODO(yleon): Consider moving this functionality to its own kernel.
with ops.name_scope(name, "%s_Lookup" % self.name) as scope:
str_to_hash_bucket = self._get_string_to_hash_bucket_fn(
self._hasher_spec)
buckets = str_to_hash_bucket(
_as_string(values),
num_buckets=self._num_oov_buckets,
name="hash_bucket")
if self._table:
ids = self._table.lookup(values)
buckets = math_ops.add(buckets, self._table.size())
is_id_non_default = math_ops.not_equal(ids, self._table.default_value)
ids = array_ops.where(is_id_non_default, ids, buckets, name=scope)
else:
ids = buckets
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)
return ids
def index_table_from_file(vocabulary_file=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=FastHashSpec,
key_dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the key and the zero-based line
number is the ID.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is `[vocabulary size, vocabulary size + num_oov_buckets]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
features = tf.constant(["emerson", "lake", "and", "palmer"])
table = tf.contrib.lookup.index_table_from_file(
vocabulary_file="test.txt", num_oov_buckets=1)
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 3, 2] # where 3 is the out-of-vocabulary bucket
```
Args:
vocabulary_file: The vocabulary filename.
num_oov_buckets: The number of out-of-vocabulary buckets.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets.
key_dtype: The `key` data type.
name: A name for this op (optional).
Returns:
The lookup table to map a `key_dtype` `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `vocabulary_file` is not set.
ValueError: If `num_oov_buckets` is negative or `vocab_size` is not greater
than zero.
"""
if not vocabulary_file:
raise ValueError("vocabulary_file must be specified.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if vocab_size is not None and vocab_size < 1:
raise ValueError("vocab_size must be greater than 0, got %d." % vocab_size)
if (not key_dtype.is_integer) and (dtypes.string != key_dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
table = None
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
if vocab_size:
# Keep the shared_name:
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
TextFileIndex.WHOLE_LINE,
TextFileIndex.LINE_NUMBER)
else:
# Keep the shared_name
# <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file,
TextFileIndex.WHOLE_LINE,
TextFileIndex.LINE_NUMBER)
init = TextFileIdTableInitializer(
vocabulary_file,
vocab_size=vocab_size,
key_dtype=dtypes.int64 if key_dtype.is_integer else key_dtype,
name="table_init")
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=key_dtype)
return table
def index_table_from_tensor(vocabulary_list,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a string `vocabulary_list` 1-D
tensor where each element is a key and corresponding index within the tensor
is the value.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is `[mapping size, mapping size + num_oov_buckets]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = t.constant(["emerson", "lake", "palmer")
table = tf.contrib.lookup.index_table_from_tensor(
vocabulary_list=vocabulary_list, num_oov_buckets=1, default_value=-1)
features = tf.constant(["emerson", "lake", "and", "palmer"])
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 4, 2]
```
Args:
vocabulary_list: A 1-D `Tensor` that specifies the mapping of keys to
indices. Thetype of this object must be castable to `dtype`.
num_oov_buckets: The number of out-of-vocabulary buckets.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignment of out-of-vocabulary buckets.
dtype: The type of values passed to `lookup`. Only string and integers are
supported.
name: A name for this op (optional).
Returns:
The lookup table to map an input `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `mapping` is invalid.
ValueError: If `num_oov_buckets` is negative.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if (not dtype.is_integer) and (dtypes.string != dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
keys = ops.convert_to_tensor(vocabulary_list)
if keys.dtype.is_integer != dtype.is_integer:
raise ValueError("Expected %s, got %s." %
("integer"
if dtype.is_integer else "non-integer", keys.dtype))
if (not dtype.is_integer) and (keys.dtype.base_dtype != dtype):
raise ValueError("Expected %s, got %s." % (dtype, keys.dtype))
num_elements = array_ops.size(keys)
values = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
table_keys = math_ops.to_int64(keys) if keys.dtype.is_integer else keys
init = KeyValueTensorInitializer(
table_keys,
values,
table_keys.dtype.base_dtype,
dtypes.int64,
name="table_init")
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=dtype)
return table
def index_to_string_table_from_file(vocabulary_file,
vocab_size=None,
default_value="UNK",
name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The table is initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the value and the
zero-based line number is the index.
Any input which does not have a corresponding index in the vocabulary file
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_file(
vocabulary_file="test.txt", default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_file: The vocabulary filename.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_file` is empty.
ValueError: when `vocab_size` is invalid.
"""
if not vocabulary_file:
raise ValueError("vocabulary_file must be specified.")
if vocab_size is not None and vocab_size < 1:
raise ValueError("vocab_size must be greater than 0, got %d." % vocab_size)
with ops.name_scope(name, "index_to_string") as scope:
shared_name = ""
if vocab_size:
# Keep a shared_name
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
TextFileIndex.LINE_NUMBER,
TextFileIndex.WHOLE_LINE)
else:
# Keep a shared_name <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file,
TextFileIndex.LINE_NUMBER,
TextFileIndex.WHOLE_LINE)
init = TextFileStringTableInitializer(
vocabulary_file, vocab_size=vocab_size, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
def index_to_string_table_from_tensor(vocabulary_list,
default_value="UNK",
name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
vocabulary_list = t.constant(["emerson", "lake", "palmer")
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
vocabulary_list, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_list: A 1-D string `Tensor` that specifies the strings to map
from indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_list` is not set.
"""
if vocabulary_list is None:
raise ValueError("vocabulary_list must be specified.")
with ops.name_scope(name, "index_to_string") as scope:
vocabulary_list = ops.convert_to_tensor(vocabulary_list, dtypes.string)
num_elements = array_ops.size(vocabulary_list)
keys = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
init = KeyValueTensorInitializer(
keys, vocabulary_list, dtypes.int64, dtypes.string, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
ops.NotDifferentiable("LookupTableFind")
ops.NotDifferentiable("LookupTableFindV2")
ops.NotDifferentiable("LookupTableInsert")
ops.NotDifferentiable("LookupTableInsertV2")
ops.NotDifferentiable("LookupTableSize")
ops.NotDifferentiable("LookupTableSizeV2")
ops.NotDifferentiable("HashTable")
ops.NotDifferentiable("HashTableV2")
ops.NotDifferentiable("InitializeTable")
ops.NotDifferentiable("InitializeTableV2")
ops.NotDifferentiable("InitializeTableFromTextFile")
ops.NotDifferentiable("InitializeTableFromTextFileV2")
ops.NotDifferentiable("MutableDenseHashTable")
ops.NotDifferentiable("MutableDenseHashTableV2")
ops.NotDifferentiable("MutableHashTable")
ops.NotDifferentiable("MutableHashTableV2")
ops.NotDifferentiable("MutableHashTableOfTensors")
ops.NotDifferentiable("MutableHashTableOfTensorsV2")
|
azureplus/hue | refs/heads/master | apps/proxy/src/proxy/urls.py | 33 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
urlpatterns = patterns('proxy',
# Prefix the names of your views with the app name.
url(r'^(?P<host>[^/]+)/(?P<port>\d+)(?P<path>/.*)$', 'views.proxy'),
)
|
BorisJeremic/Real-ESSI-Examples | refs/heads/master | analytic_solution/test_cases/Contact/Dynamic_Shear_Behaviour/Frictional_SDOF_With_Damping/c_t_1/NonLinHardSoftShear/compare_txt.py | 637 | #!/usr/bin/python
import h5py
import sys
import numpy as np
import os
import re
import random
# find the path to my own python function:
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own function for color and comparator
from mycomparator import *
from mycolor_fun import *
# analytic_solution = sys.argv[1]
# numeric_result = sys.argv[2]
analytic_solution = 'analytic_solution.txt'
numeric_result = 'numeric_result.txt'
analytic_sol = np.loadtxt(analytic_solution)
numeric_res = np.loadtxt(numeric_result)
abs_error = abs(analytic_sol - numeric_res)
rel_error = abs_error/analytic_sol
analytic_sol = float(analytic_sol)
numeric_res = float(numeric_res)
rel_error = float(rel_error)
# print the results
case_flag=1
print headrun() , "-----------Testing results-----------------"
print headstep() ,'{0} {1} {2} '.format('analytic_solution ','numeric_result ','error[%]')
print headOK() ,'{0:+e} {1:+e} {2:+0.2f} '.format(analytic_sol, numeric_res, rel_error )
if(case_flag==1):
print headOKCASE(),"-----------Done this case!-----------------"
# legacy backup
# find . -name 'element.fei' -exec bash -c 'mv $0 ${0/element.fei/add_element.include}' {} \;
# find . -name 'constraint.fei' -exec bash -c 'mv $0 ${0/constraint.fei/add_constraint.include}' {} \;
# find . -name 'node.fei' -exec bash -c 'mv $0 ${0/node.fei/add_node.include}' {} \;
# find . -name 'add_node.fei' -exec bash -c 'mv $0 ${0/add_node.fei/add_node.include}' {} \;
# find . -name 'elementLT.fei' -exec bash -c 'mv $0 ${0/elementLT.fei/add_elementLT.include}' {} \;
# sed -i "s/node\.fei/add_node.include/" main.fei
# sed -i "s/add_node\.fei/add_node.include/" main.fei
# sed -i "s/element\.fei/add_element.include/" main.fei
# sed -i "s/elementLT\.fei/add_elementLT.include/" main.fei
# sed -i "s/constraint\.fei/add_constraint.include/" main.fei
# find . -name '*_bak.h5.feioutput' -exec bash -c 'mv $0 ${0/\_bak.h5.feioutput/\_original\.h5.feioutput}' {} \;
|
khagler/boto | refs/heads/develop | scripts/rebuild_endpoints.py | 79 | import json
from pyquery import PyQuery as pq
import requests
class FetchError(Exception):
pass
def fetch_endpoints():
# We utilize what the Java SDK publishes as a baseline.
resp = requests.get('https://raw2.github.com/aws/aws-sdk-java/master/src/main/resources/etc/regions.xml')
if int(resp.status_code) != 200:
raise FetchError("Failed to fetch the endpoints. Got {0}: {1}".format(
resp.status,
resp.body
))
return resp.text
def parse_xml(raw_xml):
return pq(raw_xml, parser='xml')
def build_data(doc):
data = {}
# Run through all the regions. These have all the data we need.
for region_elem in doc('Regions').find('Region'):
region = pq(region_elem, parser='xml')
region_name = region.find('Name').text()
for endp in region.find('Endpoint'):
service_name = endp.find('ServiceName').text
endpoint = endp.find('Hostname').text
data.setdefault(service_name, {})
data[service_name][region_name] = endpoint
return data
def main():
raw_xml = fetch_endpoints()
doc = parse_xml(raw_xml)
data = build_data(doc)
print(json.dumps(data, indent=4, sort_keys=True))
if __name__ == '__main__':
main()
|
raildo/nova | refs/heads/master | nova/api/openstack/compute/deferred_delete.py | 26 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The deferred instance delete extension."""
import webob
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
ALIAS = 'os-deferred-delete'
authorize = extensions.os_compute_authorizer(ALIAS)
class DeferredDeleteController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(DeferredDeleteController, self).__init__(*args, **kwargs)
self.compute_api = compute.API(skip_policy_check=True)
@wsgi.response(202)
@extensions.expected_errors((404, 409, 403))
@wsgi.action('restore')
def _restore(self, req, id, body):
"""Restore a previously deleted instance."""
context = req.environ["nova.context"]
authorize(context)
instance = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.restore(context, instance)
except exception.InstanceUnknownCell as error:
raise webob.exc.HTTPNotFound(explanation=error.format_message())
except exception.QuotaError as error:
raise webob.exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'restore', id)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('forceDelete')
def _force_delete(self, req, id, body):
"""Force delete of instance before deferred cleanup."""
context = req.environ["nova.context"]
authorize(context)
instance = common.get_instance(self.compute_api, context, id)
try:
self.compute_api.force_delete(context, instance)
except exception.InstanceIsLocked as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
class DeferredDelete(extensions.V21APIExtensionBase):
"""Instance deferred delete."""
name = "DeferredDelete"
alias = "os-deferred-delete"
version = 1
def get_controller_extensions(self):
controller = DeferredDeleteController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
|
marcel-dancak/QGIS | refs/heads/master | python/plugins/processing/algs/grass7/ext/i_albedo.py | 4 | # -*- coding: utf-8 -*-
"""
***************************************************************************
i_albedo.py
-----------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from .i import verifyRasterNum
def checkParameterValuesBeforeExecuting(alg, parameters, context):
if alg.parameterAsBoolean(parameters, '-m', context):
return verifyRasterNum(alg, parameters, context, 'input', 7)
elif alg.parameterAsBoolean(parameters, '-n', context):
return verifyRasterNum(alg, parameters, context, 'input', 2)
elif (alg.parameterAsBoolean(parameters, '-l', context)
or alg.parameterAsBoolean(parameters, '-a', context)):
return verifyRasterNum(alg, parameters, context, 'input', 6)
return True, None
|
procrasti/electrum | refs/heads/master | plugins/greenaddress_instant/__init__.py | 24 | from electrum.i18n import _
fullname = 'GreenAddress instant'
description = _("Allows validating if your transactions have instant confirmations by GreenAddress")
available_for = ['qt']
|
cristiana214/cristianachavez214-cristianachavez | refs/heads/master | python/src/Lib/encodings/cp850.py | 593 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP850.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp850',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00f0, # LATIN SMALL LETTER ETH
0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x00fe, # LATIN SMALL LETTER THORN
0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2017, # DOUBLE LOW LINE
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa9' # 0x00b8 -> COPYRIGHT SIGN
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0x00bd -> CENT SIGN
u'\xa5' # 0x00be -> YEN SIGN
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\xf0' # 0x00d0 -> LATIN SMALL LETTER ETH
u'\xd0' # 0x00d1 -> LATIN CAPITAL LETTER ETH
u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\u0131' # 0x00d5 -> LATIN SMALL LETTER DOTLESS I
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\xa6' # 0x00dd -> BROKEN BAR
u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\xfe' # 0x00e7 -> LATIN SMALL LETTER THORN
u'\xde' # 0x00e8 -> LATIN CAPITAL LETTER THORN
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
u'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xaf' # 0x00ee -> MACRON
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2017' # 0x00f2 -> DOUBLE LOW LINE
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d0: 0x00d1, # LATIN CAPITAL LETTER ETH
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x00e8, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f0: 0x00d0, # LATIN SMALL LETTER ETH
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x00e7, # LATIN SMALL LETTER THORN
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0131: 0x00d5, # LATIN SMALL LETTER DOTLESS I
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x2017: 0x00f2, # DOUBLE LOW LINE
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
mancoast/CPythonPyc_test | refs/heads/master | cpython/200_regex_tests.py | 12 |
# Regex test suite and benchmark suite v1.5a2
# Due to the use of r"aw" strings, this file will
# only work with Python 1.5 or higher.
# The 3 possible outcomes for each pattern
[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
# Benchmark suite (needs expansion)
#
# The benchmark suite does not test correctness, just speed. The
# first element of each tuple is the regex pattern; the second is a
# string to match it against. The benchmarking code will embed the
# second string inside several sizes of padding, to test how regex
# matching performs on large strings.
benchmarks = [
('Python', 'Python'), # Simple text literal
('.*Python', 'Python'), # Bad text literal
('.*Python.*', 'Python'), # Worse text literal
('.*\\(Python\\)', 'Python'), # Bad text literal with grouping
('(Python\\|Perl\\|Tcl', 'Perl'), # Alternation
('\\(Python\\|Perl\\|Tcl\\)', 'Perl'), # Grouped alternation
('\\(Python\\)\\1', 'PythonPython'), # Backreference
# ('\\([0a-z][a-z]*,\\)+', 'a5,b7,c9,'), # Disable the fastmap optimization
('\\([a-z][a-z0-9]*,\\)+', 'a5,b7,c9,') # A few sets
]
# Test suite (for verifying correctness)
#
# The test suite is a list of 5- or 3-tuples. The 5 parts of a
# complete tuple are:
# element 0: a string containing the pattern
# 1: the string to match against the pattern
# 2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
# 3: a string that will be eval()'ed to produce a test string.
# This is an arbitrary Python expression; the available
# variables are "found" (the whole match), and "g1", "g2", ...
# up to "g10" contain the contents of each group, or the
# string 'None' if the group wasn't given a value.
# 4: The expected result of evaluating the expression.
# If the two don't match, an error is reported.
#
# If the regex isn't expected to work, the latter two elements can be omitted.
tests = [
('abc', 'abc', SUCCEED,
'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED,
'found', 'abc'),
('abc', 'ababc', SUCCEED,
'found', 'abc'),
('ab*c', 'abc', SUCCEED,
'found', 'abc'),
('ab*bc', 'abc', SUCCEED,
'found', 'abc'),
('ab*bc', 'abbc', SUCCEED,
'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED,
'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED,
'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED,
'found', 'abbbbc'),
('ab?bc', 'abbc', SUCCEED,
'found', 'abbc'),
('ab?bc', 'abc', SUCCEED,
'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED,
'found', 'abc'),
('^abc$', 'abc', SUCCEED,
'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED,
'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED,
'found', 'abc'),
('^', 'abc', SUCCEED,
'found+"-"', '-'),
('$', 'abc', SUCCEED,
'found+"-"', '-'),
('a.c', 'abc', SUCCEED,
'found', 'abc'),
('a.c', 'axc', SUCCEED,
'found', 'axc'),
('a.*c', 'axyzc', SUCCEED,
'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED,
'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED,
'found', 'ace'),
('a[b-d]', 'aac', SUCCEED,
'found', 'ac'),
('a[-b]', 'a-', SUCCEED,
'found', 'a-'),
('a[b-]', 'a-', SUCCEED,
'found', 'a-'),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a\\', '-', SYNTAX_ERROR),
('abc\\)', '-', SYNTAX_ERROR),
('\\(abc', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED,
'found', 'a]'),
('a[]]b', 'a]b', SUCCEED,
'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED,
'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED,
'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED,
'found', 'adc'),
('\\ba\\b', 'a-', SUCCEED,
'"-"', '-'),
('\\ba\\b', '-a', SUCCEED,
'"-"', '-'),
('\\ba\\b', '-a-', SUCCEED,
'"-"', '-'),
('\\by\\b', 'xy', FAIL),
('\\by\\b', 'yz', FAIL),
('\\by\\b', 'xyz', FAIL),
('ab\\|cd', 'abc', SUCCEED,
'found', 'ab'),
('ab\\|cd', 'abcd', SUCCEED,
'found', 'ab'),
('\\(\\)ef', 'def', SUCCEED,
'found+"-"+g1', 'ef-'),
('$b', 'b', FAIL),
('a(b', 'a(b', SUCCEED,
'found+"-"+g1', 'a(b-None'),
('a(*b', 'ab', SUCCEED,
'found', 'ab'),
('a(*b', 'a((b', SUCCEED,
'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED,
'found', 'a\\b'),
('\\(\\(a\\)\\)', 'abc', SUCCEED,
'found+"-"+g1+"-"+g2', 'a-a-a'),
('\\(a\\)b\\(c\\)', 'abc', SUCCEED,
'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED,
'found', 'abc'),
('\\(a+\\|b\\)*', 'ab', SUCCEED,
'found+"-"+g1', 'ab-b'),
('\\(a+\\|b\\)+', 'ab', SUCCEED,
'found+"-"+g1', 'ab-b'),
('\\(a+\\|b\\)?', 'ab', SUCCEED,
'found+"-"+g1', 'a-a'),
('\\)\\(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED,
'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED,
'found', ''),
('a\\|b\\|c\\|d\\|e', 'e', SUCCEED,
'found', 'e'),
('\\(a\\|b\\|c\\|d\\|e\\)f', 'ef', SUCCEED,
'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED,
'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED,
'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED,
'found', 'a'),
('\\(ab\\|cd\\)e', 'abcde', SUCCEED,
'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED,
'found', 'hij'),
('^\\(ab\\|cd\\)e', 'abcde', FAIL,
'xg1y', 'xy'),
('\\(abc\\|\\)ef', 'abcdef', SUCCEED,
'found+"-"+g1', 'ef-'),
('\\(a\\|b\\)c*d', 'abcd', SUCCEED,
'found+"-"+g1', 'bcd-b'),
('\\(ab\\|ab*\\)bc', 'abc', SUCCEED,
'found+"-"+g1', 'abc-a'),
('a\\([bc]*\\)c*', 'abc', SUCCEED,
'found+"-"+g1', 'abc-bc'),
('a\\([bc]*\\)\\(c*d\\)', 'abcd', SUCCEED,
'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a\\([bc]+\\)\\(c*d\\)', 'abcd', SUCCEED,
'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a\\([bc]*\\)\\(c+d\\)', 'abcd', SUCCEED,
'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED,
'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('\\(ab\\|a\\)b*c', 'abc', SUCCEED,
'found+"-"+g1', 'abc-ab'),
('\\(\\(a\\)\\(b\\)c\\)\\(d\\)', 'abcd', SUCCEED,
'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED,
'found', 'alpha'),
('^a\\(bc+\\|b[eh]\\)g\\|.h$', 'abh', SUCCEED,
'found+"-"+g1', 'bh-None'),
('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'effgz', SUCCEED,
'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'ij', SUCCEED,
'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'effg', FAIL),
('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'bcdd', FAIL),
('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'reffgz', SUCCEED,
'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('\\(\\(\\(\\(\\(\\(\\(\\(\\(a\\)\\)\\)\\)\\)\\)\\)\\)\\)', 'a', SUCCEED,
'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED,
'found', 'multiple words'),
('\\(.*\\)c\\(.*\\)', 'abcde', SUCCEED,
'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('(\\(.*\\), \\(.*\\))', '(a, b)', SUCCEED,
'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED,
'found', 'ac'),
('\\(abc\\)\\1', 'abcabc', SUCCEED,
'g1', 'abc'),
('\\([a-c]*\\)\\1', 'abcabc', SUCCEED,
'g1', 'abc'),
('^\\(.+\\)?B', 'AB', SUCCEED,
'g1', 'A'),
('\\(a+\\).\\1$', 'aaaaa', SUCCEED,
'found+"-"+g1', 'aaaaa-aa'),
('^\\(a+\\).\\1$', 'aaaa', FAIL),
('\\(abc\\)\\1', 'abcabc', SUCCEED,
'found+"-"+g1', 'abcabc-abc'),
('\\([a-c]+\\)\\1', 'abcabc', SUCCEED,
'found+"-"+g1', 'abcabc-abc'),
('\\(a\\)\\1', 'aa', SUCCEED,
'found+"-"+g1', 'aa-a'),
('\\(a+\\)\\1', 'aa', SUCCEED,
'found+"-"+g1', 'aa-a'),
('\\(a+\\)+\\1', 'aa', SUCCEED,
'found+"-"+g1', 'aa-a'),
('\\(a\\).+\\1', 'aba', SUCCEED,
'found+"-"+g1', 'aba-a'),
('\\(a\\)ba*\\1', 'aba', SUCCEED,
'found+"-"+g1', 'aba-a'),
('\\(aa\\|a\\)a\\1$', 'aaa', SUCCEED,
'found+"-"+g1', 'aaa-a'),
('\\(a\\|aa\\)a\\1$', 'aaa', SUCCEED,
'found+"-"+g1', 'aaa-a'),
('\\(a+\\)a\\1$', 'aaa', SUCCEED,
'found+"-"+g1', 'aaa-a'),
('\\([abc]*\\)\\1', 'abcabc', SUCCEED,
'found+"-"+g1', 'abcabc-abc'),
('\\(a\\)\\(b\\)c\\|ab', 'ab', SUCCEED,
'found+"-"+g1+"-"+g2', 'ab-None-None'),
('\\(a\\)+x', 'aaax', SUCCEED,
'found+"-"+g1', 'aaax-a'),
('\\([ac]\\)+x', 'aacx', SUCCEED,
'found+"-"+g1', 'aacx-c'),
('\\([^/]*/\\)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED,
'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
('\\([^.]*\\)\\.\\([^:]*\\):[T ]+\\(.*\\)', 'track1.title:TBlah blah blah', SUCCEED,
'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
('\\([^N]*N\\)+', 'abNNxyzN', SUCCEED,
'found+"-"+g1', 'abNNxyzN-xyzN'),
('\\([^N]*N\\)+', 'abNNxyz', SUCCEED,
'found+"-"+g1', 'abNN-N'),
('\\([abc]*\\)x', 'abcx', SUCCEED,
'found+"-"+g1', 'abcx-abc'),
('\\([abc]*\\)x', 'abc', FAIL),
('\\([xyz]*\\)x', 'abcx', SUCCEED,
'found+"-"+g1', 'x-'),
('\\(a\\)+b\\|aac', 'aac', SUCCEED,
'found+"-"+g1', 'aac-None'),
('\<a', 'a', SUCCEED, 'found', 'a'),
('\<a', '!', FAIL),
('a\<b', 'ab', FAIL),
('a\>', 'ab', FAIL),
('a\>', 'a!', SUCCEED, 'found', 'a'),
('a\>', 'a', SUCCEED, 'found', 'a'),
]
|
debasishm89/OpenXMolar | refs/heads/master | ExtDepLibs/autoit/autoit.py | 1 | # -*- coding: utf-8 -*-
__author__ = 'Jace Xu'
import ctypes
import os
import platform
from ctypes.wintypes import *
from functools import wraps
dll = "AutoItX3.dll"
bit, _ = platform.architecture()
if "(x86)" in os.environ['PROGRAMFILES'] and bit == "64bit":
# if 64bit version of python within 64bit version of Windows,
# load AutoItX3_x64.dll
dll = "AutoItX3_x64.dll"
dll_path = os.path.join(os.path.dirname(__file__), "lib", dll)
if not os.path.exists(dll_path):
raise IOError("Cannot load AutoItX from path: %s" % dll_path)
AUTO_IT = ctypes.windll.LoadLibrary(dll_path)
class AutoItError(Exception):
pass
def error():
return AUTO_IT.AU3_error()
class AutoItAPI(object):
def __init__(self):
self.msg = {}
@staticmethod
def _has_error():
return True if error() == 1 else False
@staticmethod
def _has_unexpected_ret(ret, unexpected):
if ret in unexpected:
return True
return False
@staticmethod
def _parser(x, y):
if x["num"] >= y:
x["flags"].append(y)
x["num"] -= y
return x
def check(self, mark=0, err_msg="", **kwds):
"""
:param mark:
0 - do not need check return value or error()
1 - check error()
2 - check return value
"""
unexpected_ret = kwds.get("unexpected_ret", (0,))
def _check(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
ret = fn(*args, **kwargs)
flags = reduce(
self._parser, [dict(num=mark, flags=[]), 2, 1])["flags"]
if 1 in flags:
if self._has_error():
raise AutoItError(err_msg)
if 2 in flags:
if self._has_unexpected_ret(ret, unexpected_ret):
raise AutoItError(err_msg)
return ret
return wrapper
return _check
api = AutoItAPI()
@api.check()
def auto_it_set_option(option, param):
"""
Changes the operation of various AutoIt functions/parameters
:param option: The option to change
:param param: The parameter (varies by option).
:return:
"""
pre_value = AUTO_IT.AU3_AutoItSetOption(LPCWSTR(option), INT(param))
return pre_value
class Properties(object):
"""
Below is an list of all the properties available in AutoItX.
"""
SW_HIDE = 0
SW_MAXIMIZE = 3
SW_MINIMIZE = 6
SW_RESTORE = 9
SW_SHOW = 5
SW_SHOWDEFAULT = 10
SW_SHOWMAXIMIZED = 3
SW_SHOWMINIMIZED = 2
SW_SHOWMINNOACTIVE = 7
SW_SHOWNA = 8
SW_SHOWNOACTIVATE = 4
SW_SHOWNORMAL = 1
class _Options(object):
def __init__(self):
self._caret_coord_mode = 1
self._mouse_click_delay = 10
self._mouse_click_down_delay = 10
self._mouse_click_drag_delay = 250
self._mouse_coord_mode = 1
self._pixel_coord_mode = 1
self._send_attach_mode = 0
self._send_capslock_mode = 1
self._send_key_delay = 5
self._send_key_down_delay = 10
self._win_detect_hidden_text = 0
self._win_search_children = 0
self._win_text_match_mode = 1
self._win_title_match_mode = 1
self._win_wait_delay = 250
@property
def caret_coord_mode(self):
return self._caret_coord_mode
@caret_coord_mode.setter
def caret_coord_mode(self, value):
auto_it_set_option("CaretCoordMode", value)
self._caret_coord_mode = value
@property
def mouse_click_delay(self):
return self._mouse_click_delay
@mouse_click_delay.setter
def mouse_click_delay(self, value):
auto_it_set_option("MouseClickDelay", value)
self._mouse_click_delay = value
@property
def mouse_click_down_delay(self):
return self._mouse_click_down_delay
@mouse_click_down_delay.setter
def mouse_click_down_delay(self, value):
auto_it_set_option("MouseClickDownDelay", value)
self._mouse_click_down_delay = value
@property
def mouse_click_drag_delay(self):
return self._mouse_click_drag_delay
@mouse_click_drag_delay.setter
def mouse_click_drag_delay(self, value):
auto_it_set_option("MouseClickDragDelay", value)
self._mouse_click_drag_delay = value
@property
def mouse_coord_mode(self):
return self._mouse_coord_mode
@mouse_coord_mode.setter
def mouse_coord_mode(self, value):
auto_it_set_option("MouseCoordMode", value)
self._mouse_coord_mode = value
@property
def pixel_coord_mode(self):
return self._pixel_coord_mode
@pixel_coord_mode.setter
def pixel_coord_mode(self, value):
auto_it_set_option("PixelCoordMode", value)
self._pixel_coord_mode = value
@property
def send_attach_mode(self):
return self._send_attach_mode
@send_attach_mode.setter
def send_attach_mode(self, value):
auto_it_set_option("SendAttachMode", INT(value))
self._send_attach_mode = value
@property
def send_capslock_mode(self):
return self._send_capslock_mode
@send_capslock_mode.setter
def send_capslock_mode(self, value):
auto_it_set_option("SendCapslockMode", value)
self._send_capslock_mode = value
@property
def send_key_delay(self):
return self._send_key_delay
@send_key_delay.setter
def send_key_delay(self, value):
auto_it_set_option("SendKeyDelay", value)
self._send_key_delay = value
@property
def send_key_down_delay(self):
return self._send_key_down_delay
@send_key_down_delay.setter
def send_key_down_delay(self, value):
auto_it_set_option("SendKeyDownDelay", value)
self._send_key_down_delay = value
@property
def win_detect_hidden_text(self):
return self._win_detect_hidden_text
@win_detect_hidden_text.setter
def win_detect_hidden_text(self, value):
auto_it_set_option("WinDetectHiddenText", value)
self._win_detect_hidden_text = value
@property
def win_search_children(self):
return self._win_search_children
@win_search_children.setter
def win_search_children(self, value):
auto_it_set_option("WinSearchChildren", value)
self._win_search_children = value
@property
def win_text_match_mode(self):
return self._win_text_match_mode
@win_text_match_mode.setter
def win_text_match_mode(self, value):
auto_it_set_option("WinTextMatchMode", value)
self._win_text_match_mode = value
@property
def win_title_match_mode(self):
return self._win_title_match_mode
@win_title_match_mode.setter
def win_title_match_mode(self, value):
auto_it_set_option("WinTitleMatchMode", value)
self._win_title_match_mode = value
@property
def win_wait_delay(self):
return self._win_wait_delay
@win_wait_delay.setter
def win_wait_delay(self, value):
auto_it_set_option("WinWaitDelay", value)
self._win_wait_delay = value
class Commands(object):
is_visible = "IsVisible"
is_enabled = "IsEnabled"
show_drop_down = "ShowDropDown"
hide_drop_down = "HideDropDown"
add_string = "AddString"
del_string = "DelString"
find_string = "FindString"
set_current_selection = "SetCurrentSelection"
is_checked = "IsChecked"
check = "Check"
un_check = "UnCheck"
get_current_line = "GetCurrentLine"
get_current_col = "GetCurrentCol"
get_current_selection = "GetCurrentSelection"
get_line_count = "GetLineCount"
get_line = "GetLine"
get_selected = "GetSelected"
edit_paste = "EditPaste"
current_tab = "CurrentTab"
tab_right = "TabRight"
tab_left = "TabLeft"
de_select = "DeSelect"
find_item = "FindItem"
get_item_count = "GetItemCount"
get_selected_count = "GetSelectedCount"
get_sub_item_count = "GetSubItemCount"
get_text = "GetText"
is_selected = "IsSelected"
select = "Select"
select_all = "SelectAll"
select_clear = "SelectClear"
select_invert = "SelectInvert"
view_change = "View"
collapse = "Collapse"
exists = "Exists"
expand = "Expand"
uncheck = "Uncheck"
options = _Options()
properties = Properties
commands = Commands
INTDEFAULT = -2147483647
@api.check(1, err_msg="clipboard is empty or contains a non-text entry")
def clip_get(buf_size=256):
"""
:param buf_size:
:return:
"""
clip = ctypes.create_unicode_buffer(buf_size)
AUTO_IT.AU3_ClipGet(clip, INT(buf_size))
return clip.value.rstrip()
@api.check(2, err_msg="Write text to clipboard failed")
def clip_put(value):
"""
:param value:
:return:
"""
ret = AUTO_IT.AU3_ClipPut(LPCWSTR(value))
return ret
def is_admin():
"""
:return:
"""
ret = AUTO_IT.AU3_IsAdmin()
return ret
def drive_map_add(device, share, flag=0, user="", pwd="", buf_size=256):
"""
:param device:
:param share:
:param flag: 0 = default
1 = Persistant mapping
8 = Show authentication dialog if required
:param user:
:param pwd:
:param buf_size:
:return:
"""
result = ctypes.create_unicode_buffer(buf_size)
err_code = {
1: "Undefined / Other error",
2: "Access to the remote share was denied",
3: "The device is already assigned",
4: "Invalid device name",
5: "Invalid remote share",
6: "Invalid password"
}
AUTO_IT.AU3_DriveMapAdd(
LPCWSTR(device), LPCWSTR(share), INT(flag), LPCWSTR(user),
LPCWSTR(pwd), result, INT(buf_size))
if error():
raise AutoItError(err_code.get(error(), None))
return result.value.rstrip()
@api.check(2, err_msg="the disconnection was unsuccessful")
def drive_map_del(device):
"""
:param device:
:return:
"""
ret = AUTO_IT.AU3_DriveMapDel(LPCWSTR(device))
return ret
@api.check(1, err_msg="get the details of a mapped drive failed")
def drive_map_get(device, buf_size=256):
"""
:param device:
:param buf_size:
:return:
"""
mapping = ctypes.create_unicode_buffer(buf_size)
AUTO_IT.AU3_DriveMapGet(LPCWSTR(device), mapping, INT(buf_size))
return mapping.value.rstrip()
def mouse_click(button="left", x=INTDEFAULT, y=INTDEFAULT, clicks=1, speed=-1):
"""
:param button:
:param x:
:param y:
:param clicks:
:param speed:
:return:
"""
ret = AUTO_IT.AU3_MouseClick(
LPCWSTR(button), INT(x), INT(y), INT(clicks), INT(speed)
)
return ret
def mouse_click_drag(x1, y1, x2, y2, button="left", speed=-1):
"""
:param x1:
:param y1:
:param x2:
:param y2:
:param button:
:param speed:
:return:
"""
ret = AUTO_IT.AU3_MouseClickDrag(
LPCWSTR(button), INT(x1), INT(y1), INT(x2), INT(y2), INT(speed)
)
return ret
def mouse_down(button="left"):
"""
:param button:
:return:
"""
AUTO_IT.AU3_MouseDown(LPCWSTR(button))
def mouse_get_cursor():
"""
:return:
"""
ret = AUTO_IT.AU3_MouseGetCursor()
return ret
def mouse_get_pos():
"""
:return:
"""
p = POINT()
AUTO_IT.AU3_MouseGetPos(ctypes.byref(p))
return p.x, p.y
def mouse_move(x, y, speed=-1):
"""
:param x:
:param y:
:param speed:
:return:
"""
ret = AUTO_IT.AU3_MouseMove(INT(x), INT(y), INT(speed))
return ret
def mouse_up(button="left"):
"""
:param button:
:return:
"""
AUTO_IT.AU3_MouseUp(LPCWSTR(button))
@api.check(1, err_msg="the direction is not recognized")
def mouse_wheel(direction, clicks=-1):
"""
:param direction: "up" or "down"
:param clicks:
:return:
"""
AUTO_IT.AU3_MouseWheel(LPCWSTR(direction), INT(clicks))
def opt(option, value):
"""
:param option:
:param value:
:return:
"""
return auto_it_set_option(option, value)
def pixel_checksum(left, top, right, bottom, step=1):
"""
:param left:
:param top:
:param right:
:param bottom:
:param step:
:return:
"""
rect = RECT(left, top, right, bottom)
ret = AUTO_IT.AU3_PixelChecksum(ctypes.byref(rect), INT(step))
return ret
@api.check(2, unexpected_ret=(-1,), err_msg="invalid coordinates")
def pixel_get_color(x, y):
"""
:param x:
:param y:
:return:
"""
ret = AUTO_IT.AU3_PixelGetColor(INT(x), INT(y))
return ret
@api.check(1, err_msg="color is not found")
def pixel_search(left, top, right, bottom, col, var=1, step=1):
"""
:param left:
:param top:
:param right:
:param bottom:
:param col:
:param var:
:param step:
:return:
"""
p = POINT()
rect = RECT(left, top, right, bottom)
AUTO_IT.AU3_PixelSearch(
ctypes.byref(rect), INT(col), INT(var), INT(step), ctypes.byref(p)
)
return p.x, p.y
def send(send_text, mode=0):
"""
Sends simulated keystrokes to the active window.
:param send_text:
:param mode: Changes how "keys" is processed:
flag = 0 (default), Text contains special characters like + and ! to
indicate SHIFT and ALT key presses.
flag = 1, keys are sent raw.
:return:
"""
AUTO_IT.AU3_Send(LPCWSTR(send_text), INT(mode))
def tooltip(tip, x=INTDEFAULT, y=INTDEFAULT):
"""
:param tip:
:param x:
:param y:
:return:
"""
AUTO_IT.AU3_ToolTip(LPCWSTR(tip), INT(x), INT(y))
|
robovm/robovm-studio | refs/heads/master | python/testData/refactoring/changeSignature/addPositionalParam.before.py | 415 | def bar(a, b):
pass
bar(1, 2) |
simongoffin/my_odoo_tutorial | refs/heads/master | addons/mass_mailing/wizard/mail_compose_message.py | 33 | # -*- coding: utf-8 -*-
from openerp.osv import osv, fields
class MailComposeMessage(osv.TransientModel):
"""Add concept of mass mailing campaign to the mail.compose.message wizard
"""
_inherit = 'mail.compose.message'
_columns = {
'mass_mailing_campaign_id': fields.many2one(
'mail.mass_mailing.campaign', 'Mass Mailing Campaign',
),
'mass_mailing_id': fields.many2one(
'mail.mass_mailing', 'Mass Mailing'
),
'mass_mailing_name': fields.char('Mass Mailing'),
'mailing_list_ids': fields.many2many(
'mail.mass_mailing.list', string='Mailing List'
),
}
def get_mail_values(self, cr, uid, wizard, res_ids, context=None):
""" Override method that generated the mail content by creating the
mail.mail.statistics values in the o2m of mail_mail, when doing pure
email mass mailing. """
res = super(MailComposeMessage, self).get_mail_values(cr, uid, wizard, res_ids, context=context)
# use only for allowed models in mass mailing
if wizard.composition_mode == 'mass_mail' and \
(wizard.mass_mailing_name or wizard.mass_mailing_id) and \
wizard.model in [item[0] for item in self.pool['mail.mass_mailing']._get_mailing_model(cr, uid, context=context)]:
mass_mailing = wizard.mass_mailing_id
if not mass_mailing:
mass_mailing_id = self.pool['mail.mass_mailing'].create(
cr, uid, {
'mass_mailing_campaign_id': wizard.mass_mailing_campaign_id and wizard.mass_mailing_campaign_id.id or False,
'name': wizard.mass_mailing_name,
'template_id': wizard.template_id and wizard.template_id.id or False,
'state': 'done',
'mailing_type': wizard.model,
'mailing_domain': wizard.active_domain,
}, context=context)
mass_mailing = self.pool['mail.mass_mailing'].browse(cr, uid, mass_mailing_id, context=context)
for res_id in res_ids:
res[res_id].update({
'mailing_id': mass_mailing.id,
'statistics_ids': [(0, 0, {
'model': wizard.model,
'res_id': res_id,
'mass_mailing_id': mass_mailing.id,
})],
# email-mode: keep original message for routing
'notification': mass_mailing.reply_to_mode == 'thread',
'auto_delete': True,
})
return res
|
lhidalgo42/DTSA | refs/heads/master | Libreria/Python/tests/test_ieee.py | 5 | #! /usr/bin/python
"""
test_ieee.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
Tests the XBee (IEEE 802.15.4) implementation class for XBee API compliance
"""
import unittest, sys, traceback
from xbee.tests.Fake import FakeDevice, FakeReadDevice
from xbee.ieee import XBee
from xbee.python2to3 import byteToInt, intToByte, stringToBytes
class InitXBee(unittest.TestCase):
"""
Base initalization class
"""
def setUp(self):
"""
Initialize XBee object
"""
self.xbee = XBee(None)
class TestBuildCommand(InitXBee):
"""
_build_command should properly build a command packet
"""
def test_build_at_data_mismatch(self):
"""
if not enough or incorrect data is provided, an exception should
be raised.
"""
try:
self.xbee._build_command("at")
except KeyError:
# Test passes
return
# No exception? Fail.
self.fail(
"An exception was not raised with improper data supplied"
)
def test_build_at_data_len_mismatch(self):
"""
if data of incorrect length is provided, an exception should be
raised
"""
try:
self.xbee._build_command("at", frame_id="AB", command="MY")
except ValueError:
# Test passes
return
# No exception? Fail.
self.fail(
"An exception was not raised with improper data length"
)
def test_build_at(self):
"""
_build_command should build a valid at command packet which has
no parameter data to be saved
"""
at_command = stringToBytes("MY")
frame = intToByte(43)
data = self.xbee._build_command(
"at",
frame_id=frame,
command=at_command
)
expected_data = b'\x08+MY'
self.assertEqual(data, expected_data)
def test_build_at_with_default(self):
"""
_build_command should build a valid at command packet which has
no parameter data to be saved and no frame specified (the
default value of \x00 should be used)
"""
at_command = stringToBytes("MY")
data = self.xbee._build_command("at", command=at_command)
expected_data = b'\x08\x00MY'
self.assertEqual(data, expected_data)
class TestSplitResponse(InitXBee):
"""
_split_response should properly split a response packet
"""
def test_unrecognized_response(self):
"""
if a response begins with an unrecognized id byte,
_split_response should raise an exception
"""
data = b'\x23\x00\x00\x00'
try:
self.xbee._split_response(data)
except KeyError:
# Passes
return
# Test Fails
self.fail()
def test_transmit_packet_received(self):
"""
if a response begins with an ID that is unrecognized as a response
ID but is a valid transmission ID, show a helpful error indicating
that a device may be in command mode.
"""
from xbee.base import CommandFrameException
data = b'\x01\x00\x00\x00'
try:
self.xbee._split_response(data)
except CommandFrameException:
# Passes
return
# Test Fails
self.fail()
def test_bad_data_long(self):
"""
if a response doesn't match the specification's layout,
_split_response should raise an exception
"""
# Over length
data = b'\x8a\x00\x00\x00'
self.assertRaises(ValueError, self.xbee._split_response, data)
def test_bad_data_short(self):
"""
if a response doesn't match the specification's layout,
_split_response should raise an exception
"""
# Under length
data = b'\x8a'
self.assertRaises(ValueError, self.xbee._split_response, data)
def test_split_status_response(self):
"""
_split_response should properly split a status response packet
"""
data = b'\x8a\x01'
info = self.xbee._split_response(data)
expected_info = {'id':'status',
'status':b'\x01'}
self.assertEqual(info, expected_info)
def test_split_short_at_response(self):
"""
_split_response should properly split an at_response packet which
has no parameter data
"""
data = b'\x88DMY\x01'
info = self.xbee._split_response(data)
expected_info = {'id':'at_response',
'frame_id':b'D',
'command':b'MY',
'status':b'\x01'}
self.assertEqual(info, expected_info)
def test_split_at_resp_with_param(self):
"""
_split_response should properly split an at_response packet which
has parameter data
"""
data = b'\x88DMY\x01ABCDEF'
info = self.xbee._split_response(data)
expected_info = {'id':'at_response',
'frame_id':b'D',
'command':b'MY',
'status':b'\x01',
'parameter':b'ABCDEF'}
self.assertEqual(info, expected_info)
def test_generalized_packet_parsing(self):
"""
_split_response should properly parse packets in a generalized
manner when specified by the protocol definition.
"""
# Temporarily add parsing rule
self.xbee.api_responses[b"\x88"]["parsing"] = [("parameter", lambda self,orig: b"GHIJKL")]
data = b'\x88DMY\x01ABCDEF'
info = self.xbee._split_response(data)
expected_info = {'id':'at_response',
'frame_id':b'D',
'command':b'MY',
'status':b'\x01',
'parameter':b'GHIJKL'}
# Remove all parsing rules
del(self.xbee.api_responses[b"\x88"]["parsing"])
self.assertEqual(info, expected_info)
class TestParseIOData(InitXBee):
"""
XBee class should properly parse IO data received from an XBee
device
"""
def test_parse_single_dio(self):
"""
_parse_samples should properly parse a packet containing a single
sample of only digital io data
"""
# One sample, ADC disabled and DIO8 enabled, DIO 0-7 enabled
header = b'\x01\x01\xFF'
# First 7 bits ignored, DIO8 high, DIO 0-7 high
sample = b'\x01\xFF'
data = header + sample
expected_results = [{'dio-0':True,
'dio-1':True,
'dio-2':True,
'dio-3':True,
'dio-4':True,
'dio-5':True,
'dio-6':True,
'dio-7':True,
'dio-8':True}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_single_dio_again(self):
"""
_parse_samples should properly parse a packet containing a single
sample of only digital io data, which alternates between on and
off
"""
# One sample, ADC disabled and DIO8 enabled, DIO 0-7 enabled
header = b'\x01\x01\xFF'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
sample = b'\x00\xAA'
data = header + sample
expected_results = [{'dio-0':False,
'dio-1':True,
'dio-2':False,
'dio-3':True,
'dio-4':False,
'dio-5':True,
'dio-6':False,
'dio-7':True,
'dio-8':False}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_single_dio_subset(self):
"""
_parse_samples should properly parse a packet containing a single
sample of only digital io data for only a subset of the
available pins
"""
# One sample, ADC disabled
# DIO 1,3,5,7 enabled
header = b'\x01\x00\xAA'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
sample = b'\x00\xAA'
data = header + sample
expected_results = [{'dio-1':True,
'dio-3':True,
'dio-5':True,
'dio-7':True}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_single_dio_subset_again(self):
"""
_parse_samples should properly parse a packet containing a single
sample of only digital io data for only a subset of the
available pins
"""
# One sample, ADC disabled
# DIO 0 enabled
header = b'\x01\x00\x01'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
sample = b'\x00\xAA'
data = header + sample
expected_results = [{'dio-0':False}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_multiple_dio_subset(self):
"""
_parse_samples should properly parse a packet containing two
samples of only digital io data for one dio line
"""
# Two samples, ADC disabled
# DIO 0 enabled
header = b'\x02\x00\x01'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
sample = b'\x00\xAA' + b'\x00\x01'
data = header + sample
expected_results = [{'dio-0':False},
{'dio-0':True}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_multiple_dio(self):
"""
_parse_samples should properly parse a packet containing three
samples of only digital io data
"""
# Three samples, ADC disabled and DIO8 enabled, DIO 0-7 enabled
header = b'\x03\x01\xFF'
# First 7 bits ignored
# First sample: all bits on
# Second sample: alternating bits on
# Third sample: all bits off
sample = b'\x01\xFF' + b'\x00\xAA' + b'\x00\x00'
data = header + sample
expected_results = [{'dio-0':True,
'dio-1':True,
'dio-2':True,
'dio-3':True,
'dio-4':True,
'dio-5':True,
'dio-6':True,
'dio-7':True,
'dio-8':True},
{'dio-0':False,
'dio-1':True,
'dio-2':False,
'dio-3':True,
'dio-4':False,
'dio-5':True,
'dio-6':False,
'dio-7':True,
'dio-8':False},
{'dio-0':False,
'dio-1':False,
'dio-2':False,
'dio-3':False,
'dio-4':False,
'dio-5':False,
'dio-6':False,
'dio-7':False,
'dio-8':False}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_multiple_adc_subset(self):
"""
_parse_samples should parse a data packet containing multiple
samples of adc data from multiple pins in the proper order
"""
# One sample, ADC 0,1 enabled
# DIO disabled
header = b'\x02\x06\x00'
# No dio data
# ADC0 value of 0
# ADC1 value of 255
# ADC0 value of 5
# ADC1 value of 7
sample = b'\x00\x00' + b'\x00\xFF' + b'\x00\x05' + b'\x00\x07'
data = header + sample
expected_results = [{'adc-0':0,
'adc-1':255},
{'adc-0':5,
'adc-1':7}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_single_dio_adc_subset(self):
"""
_parse_samples should properly parse a packet containing a single
sample of digital and analog io data for only a subset of the
available pins
"""
# One sample, ADC 0 enabled
# DIO 1,3,5,7 enabled
header = b'\x01\x02\xAA'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
# ADC0 value of 255
sample = b'\x00\xAA\x00\xFF'
data = header + sample
expected_results = [{'dio-1':True,
'dio-3':True,
'dio-5':True,
'dio-7':True,
'adc-0':255}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
class TestWriteToDevice(unittest.TestCase):
"""
XBee class should properly write binary data in a valid API
frame to a given serial device, including a valid command packet.
"""
def test_send_at_command(self):
"""
calling send should write a full API frame containing the
API AT command packet to the serial device.
"""
serial_port = FakeDevice()
xbee = XBee(serial_port)
# Send an AT command
xbee.send('at', frame_id=stringToBytes('A'), command=stringToBytes('MY'))
# Expect a full packet to be written to the device
expected_data = b'\x7E\x00\x04\x08AMY\x10'
self.assertEqual(serial_port.data, expected_data)
def test_send_at_command_with_param(self):
"""
calling send should write a full API frame containing the
API AT command packet to the serial device.
"""
serial_port = FakeDevice()
xbee = XBee(serial_port)
# Send an AT command
xbee.send(
'at',
frame_id=stringToBytes('A'),
command=stringToBytes('MY'),
parameter=b'\x00\x00'
)
# Expect a full packet to be written to the device
expected_data = b'\x7E\x00\x06\x08AMY\x00\x00\x10'
self.assertEqual(serial_port.data, expected_data)
class TestSendShorthand(unittest.TestCase):
"""
Tests shorthand for sending commands to an XBee provided by
XBee.__getattr__
"""
def setUp(self):
"""
Prepare a fake device to read from
"""
self.ser = FakeDevice()
self.xbee = XBee(self.ser)
def test_send_at_command(self):
"""
Send an AT command with a shorthand call
"""
# Send an AT command
self.xbee.at(frame_id=stringToBytes('A'), command=stringToBytes('MY'))
# Expect a full packet to be written to the device
expected_data = b'\x7E\x00\x04\x08AMY\x10'
self.assertEqual(self.ser.data, expected_data)
def test_send_at_command_with_param(self):
"""
calling send should write a full API frame containing the
API AT command packet to the serial device.
"""
# Send an AT command
self.xbee.at(frame_id=stringToBytes('A'), command=stringToBytes('MY'), parameter=b'\x00\x00')
# Expect a full packet to be written to the device
expected_data = b'\x7E\x00\x06\x08AMY\x00\x00\x10'
self.assertEqual(self.ser.data, expected_data)
def test_shorthand_disabled(self):
"""
When shorthand is disabled, any attempt at calling a
non-existant attribute should raise AttributeError
"""
self.xbee = XBee(self.ser, shorthand=False)
try:
self.xbee.at
except AttributeError:
pass
else:
self.fail("Specified shorthand command should not exist")
class TestReadFromDevice(unittest.TestCase):
"""
XBee class should properly read and parse binary data from a serial
port device.
"""
def test_read_at(self):
"""
read and parse a parameterless AT command
"""
device = FakeReadDevice(b'\x7E\x00\x05\x88DMY\x01\x8c')
xbee = XBee(device)
info = xbee.wait_read_frame()
expected_info = {'id':'at_response',
'frame_id':b'D',
'command':b'MY',
'status':b'\x01'}
self.assertEqual(info, expected_info)
def test_read_at_params(self):
"""
read and parse an AT command with a parameter
"""
device = FakeReadDevice(
b'\x7E\x00\x08\x88DMY\x01\x00\x00\x00\x8c'
)
xbee = XBee(device)
info = xbee.wait_read_frame()
expected_info = {'id':'at_response',
'frame_id':b'D',
'command':b'MY',
'status':b'\x01',
'parameter':b'\x00\x00\x00'}
self.assertEqual(info, expected_info)
def test_read_io_data(self):
"""
XBee class should properly read and parse incoming IO data
"""
## Build IO data
# One sample, ADC 0 enabled
# DIO 1,3,5,7 enabled
header = b'\x01\x02\xAA'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
# ADC0 value of 255
sample = b'\x00\xAA\x00\xFF'
data = header + sample
## Wrap data in frame
# RX frame data
rx_io_resp = b'\x83\x00\x01\x28\x00'
device = FakeReadDevice(
b'\x7E\x00\x0C'+ rx_io_resp + data + b'\xfd'
)
xbee = XBee(device)
info = xbee.wait_read_frame()
expected_info = {'id':'rx_io_data',
'source_addr':b'\x00\x01',
'rssi':b'\x28',
'options':b'\x00',
'samples': [{'dio-1':True,
'dio-3':True,
'dio-5':True,
'dio-7':True,
'adc-0':255}]
}
self.assertEqual(info, expected_info)
def test_read_empty_string(self):
"""
Reading an empty string must not cause a crash
Occasionally, the serial port fails to read properly, and returns
an empty string. In this event, we must not crash.
"""
class BadReadDevice(FakeReadDevice):
def __init__(self, bad_read_index, data):
self.read_id = 0
self.bad_read_index = bad_read_index
super(BadReadDevice, self).__init__(data)
def inWaiting(self):
return 1
def read(self):
if self.read_id == self.bad_read_index:
self.read_id += 1
return ''
else:
self.read_id += 1
return super(BadReadDevice, self).read()
badDevice = BadReadDevice(1, b'\x7E\x00\x05\x88DMY\x01\x8c')
xbee = XBee(badDevice)
try:
xbee.wait_read_frame()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.fail("".join(traceback.format_exception(exc_type, exc_value, exc_traceback)))
def test_read_at_params_in_escaped_mode(self):
"""
read and parse an AT command with a parameter in escaped API mode
"""
device = FakeReadDevice(
b'~\x00\t\x88DMY\x01}^}]}1}3m'
)
xbee = XBee(device, escaped=True)
info = xbee.wait_read_frame()
expected_info = {'id':'at_response',
'frame_id':b'D',
'command':b'MY',
'status':b'\x01',
'parameter':b'\x7E\x7D\x11\x13'}
self.assertEqual(info, expected_info)
if __name__ == '__main__':
unittest.main()
|
aileron/dokku | refs/heads/master | tests/apps/python-flask/hello.py | 236 | import os
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello():
return 'python/flask'
|
Walt280/cosmos | refs/heads/master | code/graph-algorithms/hamiltonian_cycle/hamiltonian_cycle.py | 3 | # Part of Cosmos by OpenGenus Foundation
# Python program for solution of
# hamiltonian cycle problem
class Graph():
def __init__(self, vertices):
self.graph = [[0 for column in range(vertices)]\
for row in range(vertices)]
self.V = vertices
''' Check if this vertex is an adjacent vertex
of the previously added vertex and is not
included in the path earlier '''
def isSafe(self, v, pos, path):
# Check if current vertex and last vertex
# in path are adjacent
if self.graph[ path[pos-1] ][v] == 0:
return False
# Check if current vertex not already in path
for vertex in path:
if vertex == v:
return False
return True
# A recursive utility function to solve
# hamiltonian cycle problem
def hamCycleUtil(self, path, pos):
# base case: if all vertices are
# included in the path
if pos == self.V:
# Last vertex must be adjacent to the
# first vertex in path to make a cyle
if self.graph[ path[pos-1] ][ path[0] ] == 1:
return True
else:
return False
# Try different vertices as a next candidate
# in Hamiltonian Cycle. We don't try for 0 as
# we included 0 as starting point in in hamCycle()
for v in range(1,self.V):
if self.isSafe(v, pos, path) == True:
path[pos] = v
if self.hamCycleUtil(path, pos+1) == True:
return True
# Remove current vertex if it doesn't
# lead to a solution
path[pos] = -1
return False
def hamCycle(self):
path = [-1] * self.V
''' Let us put vertex 0 as the first vertex
in the path. If there is a Hamiltonian Cycle,
then the path can be started from any point
of the cycle as the graph is undirected '''
path[0] = 0
if self.hamCycleUtil(path,1) == False:
print "Solution does not exist\n"
return False
self.printSolution(path)
return True
def printSolution(self, path):
print "Solution Exists: Following is one Hamiltonian Cycle"
for vertex in path:
print vertex,
print path[0], "\n"
# Driver Code
''' Let us create the following graph
(0)--(1)--(2)
| / \ |
| / \ |
| / \ |
(3)-------(4) '''
g1 = Graph(5)
g1.graph = [ [0, 1, 0, 1, 0], [1, 0, 1, 1, 1],
[0, 1, 0, 0, 1,],[1, 1, 0, 0, 1],
[0, 1, 1, 1, 0], ]
# Print the solution
g1.hamCycle();
''' Let us create the following graph
(0)--(1)--(2)
| / \ |
| / \ |
| / \ |
(3) (4) '''
g2 = Graph(5)
g2.graph = [ [0, 1, 0, 1, 0], [1, 0, 1, 1, 1],
[0, 1, 0, 0, 1,], [1, 1, 0, 0, 0],
[0, 1, 1, 0, 0], ]
# Print the solution
g2.hamCycle();
|
yawnosnorous/python-for-android | refs/heads/master | python3-alpha/extra_modules/pyxmpp2/expdict.py | 46 | #
# (C) Copyright 2003-2011 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""Dictionary with item expiration."""
__docformat__ = "restructuredtext en"
import time
import threading
import logging
logger = logging.getLogger("pyxmpp2.expdict")
_NO_DEFAULT = object()
class ExpiringDictionary(dict):
"""An extension to standard Python dictionary objects which implements item
expiration.
Each item in ExpiringDictionary has its expiration time assigned, after
which the item is removed from the mapping.
:Ivariables:
- `_timeouts`: a dictionary with timeout values and timeout callback for
stored objects.
- `_default_timeout`: the default timeout value (in seconds from now).
- `_lock`: access synchronization lock.
:Types:
- `_timeouts`: `dict`
- `_default_timeout`: `float`
- `_lock`: :std:`threading.RLock`"""
__slots__ = ['_timeouts', '_default_timeout', '_lock']
def __init__(self, default_timeout = 300.0):
"""Initialize an `ExpiringDictionary` object.
:Parameters:
- `default_timeout`: default timeout value (in seconds) for stored
objects.
:Types:
- `default_timeout`: `float`
"""
dict.__init__(self)
self._timeouts = {}
self._default_timeout = default_timeout
self._lock = threading.RLock()
def __delitem__(self, key):
with self._lock:
logger.debug("expdict.__delitem__({0!r})".format(key))
del self._timeouts[key]
return dict.__delitem__(self, key)
def __getitem__(self, key):
with self._lock:
logger.debug("expdict.__getitem__({0!r})".format(key))
self._expire_item(key)
return dict.__getitem__(self, key)
def pop(self, key, default = _NO_DEFAULT):
with self._lock:
self._expire_item(key)
del self._timeouts[key]
if default is not _NO_DEFAULT:
return dict.pop(self, key, default)
else:
return dict.pop(self, key)
def __setitem__(self, key, value):
logger.debug("expdict.__setitem__({0!r}, {1!r})".format(key, value))
return self.set_item(key, value)
def set_item(self, key, value, timeout = None, timeout_callback = None):
"""Set item of the dictionary.
:Parameters:
- `key`: the key.
- `value`: the object to store.
- `timeout`: timeout value for the object (in seconds from now).
- `timeout_callback`: function to be called when the item expires.
The callback should accept none, one (the key) or two (the key
and the value) arguments.
:Types:
- `key`: any hashable value
- `value`: any python object
- `timeout`: `int`
- `timeout_callback`: callable
"""
with self._lock:
logger.debug("expdict.__setitem__({0!r}, {1!r}, {2!r}, {3!r})"
.format(key, value, timeout, timeout_callback))
if not timeout:
timeout = self._default_timeout
self._timeouts[key] = (time.time() + timeout, timeout_callback)
return dict.__setitem__(self, key, value)
def expire(self):
"""Do the expiration of dictionary items.
Remove items that expired by now from the dictionary.
:Return: time, in seconds, when the next item expires or `None`
:returntype: `float`
"""
with self._lock:
logger.debug("expdict.expire. timeouts: {0!r}"
.format(self._timeouts))
next_timeout = None
for k in list(self._timeouts.keys()):
ret = self._expire_item(k)
if ret is not None:
if next_timeout is None:
next_timeout = ret
else:
next_timeout = min(next_timeout, ret)
return next_timeout
def clear(self):
with self._lock:
self._timeouts.clear()
dict.clear(self)
def _expire_item(self, key):
"""Do the expiration of a dictionary item.
Remove the item if it has expired by now.
:Parameters:
- `key`: key to the object.
:Types:
- `key`: any hashable value
"""
(timeout, callback) = self._timeouts[key]
now = time.time()
if timeout <= now:
item = dict.pop(self, key)
del self._timeouts[key]
if callback:
try:
callback(key, item)
except TypeError:
try:
callback(key)
except TypeError:
callback()
return None
else:
return timeout - now
# vi: sts=4 et sw=4
|
jimga150/HealthNet | refs/heads/master | HealthNet/core/urls.py | 1 | from django.conf.urls import url
from django.core.urlresolvers import reverse_lazy
from . import views
urlpatterns = [
url(r'^register/', views.register_patient_page, name='register'),
url(r'^patient/', views.patient_landing, name='patient'),
url(r'^patients/', views.view_patients, name='view patients'),
url(r'^patientss/(?P<patient_id>\d+)/$', views.admitPatient, name='admit patients'),
url(r'^update_self/', views.editownprofile, name='update'),
# url(r'^update_user/', views.editownpatientprofile, name='update user'),
url(r'^update_med_info/(?P<patient_id>[0-9]+)/',
views.EditPatientMediInfo.as_view(success_url=reverse_lazy('view patients')), name='update medical info for'),
url(r'^view_med_info/(?P<patient_id>[0-9]+)/',
views.ViewPatientMediInfo.as_view(), name='view medical info for'),
url(r'^profile/', views.profile, name='profile'),
url(r'^export/', views.download, name='download'),
url(r'^email/', views.email, name='email_info'),
url(r'^landing/', views.landing, name='landing'),
url(r'^nurse/', views.nurse_landing, name='nurse'),
url(r'^doctor/', views.doctor_landing, name='doctor'),
url(r'^hadmin/', views.admin_landing, name='admin'),
url(r'^new_hospital/', views.NewHospital.as_view(), name='create new hospital'),
url(r'^login/$', views.user_login, name='login'),
url(r'^tests/', views.patient_tests, name='tests'), # Not used any more, redirects to testResults
url(r'^logout/', views.user_logout, name='logout'),
url(r'^logs/', views.logs, name='logs'),
url(r'^upload_patient/', views.upload_patient_info, name='upload patient'),
url(r'^staffregister/', views.registerStaff, name='staffregister'),
url(r'^adminregister/', views.register_admin_page, name='adminregister'),
url(r'^doctorregister/', views.register_doctor_page, name='doctorregister'),
url(r'^nurseregister/', views.register_nurse_page, name='nurseregister'),
url(r'^swag/', views.swag, name='swag'),
url(r'^$', views.main, name='main')
]
|
ieuan1630-cmis/ieuan1630-cmis-cs2 | refs/heads/master | functions2.py | 1 | #import:
from math import pi
from math import sqrt
import str_manip
#define functions:
#basic maths functions
def add(x, y): #takes two variables and adds their values
return x + y
def sub(x, y): #takes two variables and subtracts the second from the first value
return x - y
def mul(x, y): #takes two variables and multiplies their values
return x * y
def div(x, y): #takes two variables and divides the second from the first value
return x / (y + 0.0)
#complex functions
def secs_to_hrs(secs): #takes a value of seconds and converts it to a value in hrs
return secs / 3600.0
def circle_area(r): #takes the radius r of a circle and returns its area
return pi * r**2
def sphere_vol(r): #takes the radius r of a sphere and returns its volume
return (4/3.0) * pi * r**3
def avg_two_vols(r1, r2): #takes the radii r1 and r2 of two spheres and returns the average of their volumes
return (sphere_vol(r1) + sphere_vol(r2))/ 2.0
def tri_area(s1, s2, s3): #takes the side lengths of a triangle and returns its area
s = (s1 + s2 + s3)/2.0 #s = side length
return sqrt(s * (s - s1) * (s - s2) * (s - s3))
def right_align(txt): #aligns txt to the right side of an 80 characters wide screen
return " " * (80 - len(txt)) + txt
def centre(txt): #centres text for an 80 characters wide screen
return " " * (40- (len(txt)/2)) + txt
def msg_box(txt): #takes text and returns it in a message box composed of various strings
return "+" + ((len(txt) + 4) * "-") + "+" + "\n" + "| " + txt + " |" + "\n" + "+" + ((len(txt) + 4) * "-") + "+"
#storing function calls (2 per function) in variables:
#list of functions needing to be called twice each: add2, sub2, mul2, div2, secs_to_hrs2, circle_area2, sphere_vol2, avg_two_vols2, tri_area2, right_align, centre, msg_box
output = "There are " + str(mul(24, 3600)) + " seconds in a day. Which means there are " + str(secs_to_hrs(mul(24, 3600))) + " hours in a day. If you have 6000 seconds to complete an exam, you have " + str(secs_to_hrs(6000)) + " hours to complete it. Taking that there are 5 weekdays and 2 weekend days, there are " + str(add(5, 2)) + " days in a week. Also, " + str(add(7, 7)) + " days in a fortnight. Given 365 days in a year there are " + str(div(365, 7)) + " weeks in a year. Given 41 days passed this year, " + str(div(41, 7)) + " weeks have passed; and there are " + str(sub(365, 41)) + " days left this year. And " + str(sub(100, 41)) + " days until the 100th day of the year." + "\n" + "\n" + "Given a circle's radius as 5, that circle's area is " + str(circle_area(5)) + " units squared; if this circle was instead a sphere its volume would be " + str(sphere_vol(5)) + " units cubed. If our circle's radius is 6 units, this yields an area of " + str(circle_area(6)) + " units squared; a sphere with the same radius has a volume of " + str(sphere_vol(6)) + " units cubed. The average of the volumes of spheres with these two radii is " + str(avg_two_vols(5, 6)) + " units cubed. The average of the volumes of shperes with raddi 7 and 8 respectively is " + str(avg_two_vols(7, 8)) + " units cubed." + "\n" + "\n" + "Moving briefly on to triangles, given side lengths 3, 4, 5 - the area of this right triangle is " + str(tri_area(3, 4, 5)) + " units squared. This function also works for non right triangles such as one with side lenghts 6, 7, 8 - this hypothetical triangle's area would be " + str(tri_area(6, 7, 8)) + " units squared." + "\n" + "\n" + centre("Hey") + "\n" + right_align("You") + centre("This text is centred") + "\n" + right_align("This text is right aligned") + msg_box("Here's a message box") + "\n" + msg_box("and another")
#print output:
print str_manip.msg_box(output)
|
Immortalin/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/lib2to3/tests/data/py2_test_grammar.py | 285 | # Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
# NOTE: When you run this test as a script from the command line, you
# get warnings about certain hex/oct constants. Since those are
# issued by the parser, you can't suppress them by adding a
# filterwarnings() call to this module. Therefore, to shut up the
# regression test, the filterwarnings() call has been added to
# regrtest.py.
from test.test_support import run_unittest, check_syntax_error
import unittest
import sys
# testing import *
from sys import *
class TokenTests(unittest.TestCase):
def testBackslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
self.assertEquals(x, 2, 'backslash for line continuation')
# Backslash does not means continuation in comments :\
x = 0
self.assertEquals(x, 0, 'backslash ending comment')
def testPlainIntegers(self):
self.assertEquals(0xff, 255)
self.assertEquals(0377, 255)
self.assertEquals(2147483647, 017777777777)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxint
if maxint == 2147483647:
self.assertEquals(-2147483647-1, -020000000000)
# XXX -2147483648
self.assert_(037777777777 > 0)
self.assert_(0xffffffff > 0)
for s in '2147483648', '040000000000', '0x100000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxint == 9223372036854775807:
self.assertEquals(-9223372036854775807-1, -01000000000000000000000)
self.assert_(01777777777777777777777 > 0)
self.assert_(0xffffffffffffffff > 0)
for s in '9223372036854775808', '02000000000000000000000', \
'0x10000000000000000':
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
else:
self.fail('Weird maxint value %r' % maxint)
def testLongIntegers(self):
x = 0L
x = 0l
x = 0xffffffffffffffffL
x = 0xffffffffffffffffl
x = 077777777777777777L
x = 077777777777777777l
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
def testFloats(self):
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
def testStringLiterals(self):
x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
self.assert_(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
self.assert_(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
self.assertEquals(x, y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''
self.assertEquals(x, y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"
self.assertEquals(x, y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'
self.assertEquals(x, y)
class GrammarTests(unittest.TestCase):
# single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
# file_input: (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
def testEvalInput(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
def testFuncdef(self):
### 'def' NAME parameters ':' suite
### parameters: '(' [varargslist] ')'
### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
### | ('**'|'*' '*') NAME)
### | fpdef ['=' test] (',' fpdef ['=' test])* [',']
### fpdef: NAME | '(' fplist ')'
### fplist: fpdef (',' fpdef)* [',']
### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
### argument: [test '='] test # Really [keyword '='] test
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
def f4(two, (compound, (argument, list))): pass
def f5((compound, first), two): pass
self.assertEquals(f2.func_code.co_varnames, ('one_argument',))
self.assertEquals(f3.func_code.co_varnames, ('two', 'arguments'))
if sys.platform.startswith('java'):
self.assertEquals(f4.func_code.co_varnames,
('two', '(compound, (argument, list))', 'compound', 'argument',
'list',))
self.assertEquals(f5.func_code.co_varnames,
('(compound, first)', 'two', 'compound', 'first'))
else:
self.assertEquals(f4.func_code.co_varnames,
('two', '.1', 'compound', 'argument', 'list'))
self.assertEquals(f5.func_code.co_varnames,
('.0', 'two', 'compound', 'first'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
def v3(a, (b, c), *rest): return a, b, c, rest
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
f4(1, (2, (3, 4)))
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
v3(1,(2,3))
v3(1,(2,3),4)
v3(1,(2,3),4,5,6,7,8,9,0)
# ceval unpacks the formal arguments into the first argcount names;
# thus, the names nested inside tuples must appear after these names.
if sys.platform.startswith('java'):
self.assertEquals(v3.func_code.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
else:
self.assertEquals(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
self.assertEquals(v3(1, (2, 3), 4), (1, 2, 3, (4,)))
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
def d31v((x)): pass
d31v(1)
def d32v((x,)): pass
d32v((1,))
# keyword arguments after *arglist
def f(*args, **kwargs):
return args, kwargs
self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
{'x':2, 'y':5}))
self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
def testLambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEquals(l1(), 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0L]]
self.assertEquals(l3(), [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
self.assertEquals(l4(), 1)
l5 = lambda x, y, z=2: x + y + z
self.assertEquals(l5(1, 2), 5)
self.assertEquals(l5(1, 2, 3), 6)
check_syntax_error(self, "lambda x: x = 2")
check_syntax_error(self, "lambda (None,): None")
### stmt: simple_stmt | compound_stmt
# Tested below
def testSimpleStmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
# verify statements that end with semi-colons
x = 1; pass; del x;
foo()
### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
# Tested below
def testExprStmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
def testPrintStmt(self):
# 'print' (test ',')* [test]
import StringIO
# Can't test printing to real stdout without comparing output
# which is not available in unittest.
save_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
print 1, 2, 3
print 1, 2, 3,
print
print 0 or 1, 0 or 1,
print 0 or 1
# 'print' '>>' test ','
print >> sys.stdout, 1, 2, 3
print >> sys.stdout, 1, 2, 3,
print >> sys.stdout
print >> sys.stdout, 0 or 1, 0 or 1,
print >> sys.stdout, 0 or 1
# test printing to an instance
class Gulp:
def write(self, msg): pass
gulp = Gulp()
print >> gulp, 1, 2, 3
print >> gulp, 1, 2, 3,
print >> gulp
print >> gulp, 0 or 1, 0 or 1,
print >> gulp, 0 or 1
# test print >> None
def driver():
oldstdout = sys.stdout
sys.stdout = Gulp()
try:
tellme(Gulp())
tellme()
finally:
sys.stdout = oldstdout
# we should see this once
def tellme(file=sys.stdout):
print >> file, 'hello world'
driver()
# we should not see this at all
def tellme(file=None):
print >> file, 'goodbye universe'
driver()
self.assertEqual(sys.stdout.getvalue(), '''\
1 2 3
1 2 3
1 1 1
1 2 3
1 2 3
1 1 1
hello world
''')
sys.stdout = save_stdout
# syntax errors
check_syntax_error(self, 'print ,')
check_syntax_error(self, 'print >> x,')
def testDelStmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
xyz = x, y, z
del abc
del x, y, (z, xyz)
def testPassStmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
def testBreakStmt(self):
# 'break'
while 1: break
def testContinueStmt(self):
# 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
if msg != "ok":
self.fail(msg)
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "ok"
if msg != "ok":
self.fail(msg)
def test_break_continue_loop(self):
# This test warrants an explanation. It is a test specifically for SF bugs
# #463359 and #462937. The bug is that a 'break' statement executed or
# exception raised inside a try/except inside a loop, *after* a continue
# statement has been executed in that loop, will cause the wrong number of
# arguments to be popped off the stack and the instruction pointer reset to
# a very small number (usually 0.) Because of this, the following test
# *must* written as a function, and the tracking vars *must* be function
# arguments with default values. Otherwise, the test will loop and loop.
def test_inner(extra_burning_oil = 1, count=0):
big_hippo = 2
while big_hippo:
count += 1
try:
if extra_burning_oil and big_hippo == 1:
extra_burning_oil -= 1
break
big_hippo -= 1
continue
except:
raise
if count > 2 or big_hippo <> 1:
self.fail("continue then break in try/except in loop broken!")
test_inner()
def testReturn(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
check_syntax_error(self, "class foo:return 1")
def testYield(self):
check_syntax_error(self, "class foo:yield 1")
def testRaise(self):
# 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
def testImport(self):
# 'import' dotted_as_names
import sys
import time, sys
# 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
from time import time
from time import (time)
# not testable inside a function, but already done at top of the module
# from sys import *
from sys import path, argv
from sys import (path, argv)
from sys import (path, argv,)
def testGlobal(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
def testExec(self):
# 'exec' expr ['in' expr [',' expr]]
z = None
del z
exec 'z=1+1\n'
if z != 2: self.fail('exec \'z=1+1\'\\n')
del z
exec 'z=1+1'
if z != 2: self.fail('exec \'z=1+1\'')
z = None
del z
import types
if hasattr(types, "UnicodeType"):
exec r"""if 1:
exec u'z=1+1\n'
if z != 2: self.fail('exec u\'z=1+1\'\\n')
del z
exec u'z=1+1'
if z != 2: self.fail('exec u\'z=1+1\'')"""
g = {}
exec 'z = 1' in g
if g.has_key('__builtins__'): del g['__builtins__']
if g != {'z': 1}: self.fail('exec \'z = 1\' in g')
g = {}
l = {}
import warnings
warnings.filterwarnings("ignore", "global statement", module="<string>")
exec 'global a; a = 1; b = 2' in g, l
if g.has_key('__builtins__'): del g['__builtins__']
if l.has_key('__builtins__'): del l['__builtins__']
if (g, l) != ({'a':1}, {'b':2}):
self.fail('exec ... in g (%s), l (%s)' %(g,l))
def testAssert(self):
# assert_stmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
try:
assert 0, "msg"
except AssertionError, e:
self.assertEquals(e.args[0], "msg")
else:
if __debug__:
self.fail("AssertionError not raised by assert 0")
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
def testIf(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
def testWhile(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
# Issue1920: "while 0" is optimized away,
# ensure that the "else" clause is still present.
x = 0
while 0:
x = 1
else:
x = 2
self.assertEquals(x, 2)
def testFor(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285:
self.fail('for over growing sequence')
result = []
for x, in [(1,), (2,), (3,)]:
result.append(x)
self.assertEqual(result, [1, 2, 3])
def testTry(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr [('as' | ',') expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError, msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError), msg: pass
try: pass
finally: pass
def testSuite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
def testTest(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
def testComparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 <> 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
def testBinaryMaskOps(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
def testShiftOps(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
def testAdditiveOps(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
def testMultiplicativeOps(self):
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
def testUnaryOps(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
def testSelectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
# A rough test of SF bug 1333982. http://python.org/sf/1333982
# The testing here is fairly incomplete.
# Test cases should include: commas with 1 and 2 colons
d = {}
d[1] = 1
d[1,] = 2
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
L.sort()
self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
def testAtoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
### dictmaker: test ':' test (',' test ':' test)* [',']
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = `x`
x = `1 or 2 or 3`
self.assertEqual(`1,2`, '(1, 2)')
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
def testClassdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
def class_decorator(x):
x.decorated = True
return x
@class_decorator
class G:
pass
self.assertEqual(G.decorated, True)
def testListcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
self.assertEqual([(i, s) for i in nums for s in strs],
[(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
(2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
(4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
[(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
(3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
(5, 'Banana'), (5, 'Coconut')])
self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
return [None < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
def test_nested_front():
self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
[[1, 2], [3, 4], [5, 6]])
test_nested_front()
check_syntax_error(self, "[i, s for i in nums for s in strs]")
check_syntax_error(self, "[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
x = [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
def testGenexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(g.next(), [x for x in range(10)])
try:
g.next()
self.fail('should produce StopIteration exception')
except StopIteration:
pass
a = 1
try:
g = (a for d in a)
g.next()
self.fail('should produce TypeError')
except TypeError:
pass
self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
a = [x for x in range(10)]
b = (x for x in (y for y in a))
self.assertEqual(sum(b), sum([x for x in range(10)]))
self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
check_syntax_error(self, "foo(x for x in range(10), 100)")
check_syntax_error(self, "foo(100, x for x in range(10))")
def testComprehensionSpecials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
# This should hold, since we're only precomputing outmost iterable.
x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
x = 5; t = True;
self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
# Grammar allows multiple adjacent 'if's in listcomps and genexps,
# even though it's silly. Make sure it works (ifelse broke this.)
self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
# verify unpacking single element tuples in listcomp/genexp.
self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
def test_with_statement(self):
class manager(object):
def __enter__(self):
return (1, 2)
def __exit__(self, *args):
pass
with manager():
pass
with manager() as x:
pass
with manager() as (x, y):
pass
with manager(), manager():
pass
with manager() as x, manager() as y:
pass
with manager() as x, manager():
pass
def testIfElseExpr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
print x
return ret
self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
self.assertEqual((5 and 6 if 0 else 1), 1)
self.assertEqual(((5 and 6) if 0 else 1), 1)
self.assertEqual((5 and (6 if 1 else 1)), 6)
self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
self.assertEqual((not 5 if 1 else 1), False)
self.assertEqual((not 5 if 0 else 1), 1)
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_main():
run_unittest(TokenTests, GrammarTests)
if __name__ == '__main__':
test_main()
|
keithlee/shakeAppPyDev | refs/heads/master | djangoappengine/db/base.py | 48 | from ..boot import DATA_ROOT
from ..utils import appid, on_production_server
from .creation import DatabaseCreation
from .stubs import stub_manager
from django.db.backends.util import format_number
from djangotoolbox.db.base import NonrelDatabaseFeatures, \
NonrelDatabaseOperations, NonrelDatabaseWrapper, NonrelDatabaseClient, \
NonrelDatabaseValidation, NonrelDatabaseIntrospection
from google.appengine.ext.db.metadata import get_kinds, get_namespaces
from google.appengine.api.datastore import Query, Delete
from google.appengine.api.namespace_manager import set_namespace
import logging
import os
import shutil
DATASTORE_PATHS = {
'datastore_path': os.path.join(DATA_ROOT, 'datastore'),
'blobstore_path': os.path.join(DATA_ROOT, 'blobstore'),
#'rdbms_sqlite_path': os.path.join(DATA_ROOT, 'rdbms'),
'prospective_search_path': os.path.join(DATA_ROOT, 'prospective-search'),
}
def get_datastore_paths(options):
paths = {}
for key, path in DATASTORE_PATHS.items():
paths[key] = options.get(key, path)
return paths
def destroy_datastore(paths):
"""Destroys the appengine datastore at the specified paths."""
for path in paths.values():
if not path:
continue
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError, error:
if error.errno != 2:
logging.error("Failed to clear datastore: %s" % error)
class DatabaseFeatures(NonrelDatabaseFeatures):
allows_primary_key_0 = True
supports_dicts = True
class DatabaseOperations(NonrelDatabaseOperations):
compiler_module = __name__.rsplit('.', 1)[0] + '.compiler'
DEFAULT_MAX_DIGITS = 16
def value_to_db_decimal(self, value, max_digits, decimal_places):
if value is None:
return None
if value.is_signed():
sign = u'-'
value = abs(value)
else:
sign = u''
if max_digits is None:
max_digits = self.DEFAULT_MAX_DIGITS
if decimal_places is None:
value = unicode(value)
else:
value = format_number(value, max_digits, decimal_places)
decimal_places = decimal_places or 0
n = value.find('.')
if n < 0:
n = len(value)
if n < max_digits - decimal_places:
value = u"0" * (max_digits - decimal_places - n) + value
return sign + value
def sql_flush(self, style, tables, sequences):
self.connection.flush()
return []
class DatabaseClient(NonrelDatabaseClient):
pass
class DatabaseValidation(NonrelDatabaseValidation):
pass
class DatabaseIntrospection(NonrelDatabaseIntrospection):
def table_names(self):
"""Returns a list of names of all tables that exist in the database."""
return [kind.key().name() for kind in Query(kind='__kind__').Run()]
class DatabaseWrapper(NonrelDatabaseWrapper):
def __init__(self, *args, **kwds):
super(DatabaseWrapper, self).__init__(*args, **kwds)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.validation = DatabaseValidation(self)
self.introspection = DatabaseIntrospection(self)
options = self.settings_dict
self.remote_app_id = options.get('REMOTE_APP_ID', appid)
self.domain = options.get('DOMAIN', 'appspot.com')
self.remote_api_path = options.get('REMOTE_API_PATH', None)
self.secure_remote_api = options.get('SECURE_REMOTE_API', True)
remote = options.get('REMOTE', False)
if on_production_server:
remote = False
if remote:
stub_manager.setup_remote_stubs(self)
else:
stub_manager.setup_stubs(self)
def flush(self):
"""Helper function to remove the current datastore and re-open the stubs"""
if stub_manager.active_stubs == 'remote':
import random
import string
code = ''.join([random.choice(string.ascii_letters) for x in range(4)])
print '\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print "Warning! You're about to delete the *production* datastore!"
print 'Only models defined in your INSTALLED_APPS can be removed!'
print 'If you want to clear the whole datastore you have to use the ' \
'datastore viewer in the dashboard. Also, in order to delete all '\
'unneeded indexes you have to run appcfg.py vacuum_indexes.'
print 'In order to proceed you have to enter the following code:'
print code
response = raw_input('Repeat: ')
if code == response:
print 'Deleting...'
delete_all_entities()
print "Datastore flushed! Please check your dashboard's " \
'datastore viewer for any remaining entities and remove ' \
'all unneeded indexes with appcfg.py vacuum_indexes.'
else:
print 'Aborting'
exit()
elif stub_manager.active_stubs == 'test':
stub_manager.deactivate_test_stubs()
stub_manager.activate_test_stubs()
else:
destroy_datastore(get_datastore_paths(self.settings_dict))
stub_manager.setup_local_stubs(self)
def delete_all_entities():
for namespace in get_namespaces():
set_namespace(namespace)
for kind in get_kinds():
if kind.startswith('__'):
continue
while True:
data = Query(kind=kind, keys_only=True).Get(200)
if not data:
break
Delete(data)
|
merc-devel/merc | refs/heads/master | merc/features/rfc1459/lusers.py | 1 | from merc import feature
from merc import message
class LUsersFeature(feature.Feature):
NAME = __name__
install = LUsersFeature.install
class LUserUnknown(message.Reply):
NAME = "253"
FORCE_TRAILING = True
MIN_ARITY = 2
def __init__(self, num_unknown, reason="unknown connections", *args):
self.num_unknown = num_unknown
self.reason = reason
def as_reply_params(self):
return [self.num_unknown, self.reason]
class LUserChannels(message.Reply):
NAME = "254"
FORCE_TRAILING = True
MIN_ARITY = 2
def __init__(self, num_channels, reason="channels formed", *args):
self.num_channels = num_channels
self.reason = reason
def as_reply_params(self):
return [self.num_channels, self.reason]
class LUserMe(message.Reply):
NAME = "255"
FORCE_TRAILING = True
MIN_ARITY = 1
def __init__(self, reason, *args):
self.reason = reason
def as_reply_params(self):
return [self.reason]
@LUsersFeature.register_user_command
class LUsers(message.Command):
NAME = "LUSERS"
MIN_ARITY = 0
@message.Command.requires_registration
def handle_for(self, app, user, prefix):
app.run_hooks("server.luser.user", user)
app.run_hooks("server.luser.oper", user)
user.send_reply(LUserUnknown(str(0)))
user.send_reply(LUserChannels(str(app.channels.count())))
user.send_reply(LUserMe("I have {} clients and {} servers".format(
sum(user.server_name == app.network.local.name
for user in app.users.all()),
len(list(app.network.neighbors())))))
@LUsersFeature.hook("user.welcome")
def send_lusers_on_welcome(app, user):
user.on_message(app, user.hostmask, LUsers())
|
yencarnacion/jaikuengine | refs/heads/master | .google_appengine/lib/django-1.3/tests/regressiontests/defer_regress/tests.py | 53 | from operator import attrgetter
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sessions.backends.db import SessionStore
from django.db import connection
from django.db.models.loading import cache
from django.test import TestCase
from models import ResolveThis, Item, RelatedItem, Child, Leaf
class DeferRegressionTest(TestCase):
def test_basic(self):
# Deferred fields should really be deferred and not accidentally use
# the field's default value just because they aren't passed to __init__
Item.objects.create(name="first", value=42)
obj = Item.objects.only("name", "other_value").get(name="first")
# Accessing "name" doesn't trigger a new database query. Accessing
# "value" or "text" should.
def test():
self.assertEqual(obj.name, "first")
self.assertEqual(obj.other_value, 0)
self.assertNumQueries(0, test)
def test():
self.assertEqual(obj.value, 42)
self.assertNumQueries(1, test)
def test():
self.assertEqual(obj.text, "xyzzy")
self.assertNumQueries(1, test)
def test():
self.assertEqual(obj.text, "xyzzy")
self.assertNumQueries(0, test)
# Regression test for #10695. Make sure different instances don't
# inadvertently share data in the deferred descriptor objects.
i = Item.objects.create(name="no I'm first", value=37)
items = Item.objects.only("value").order_by("-value")
self.assertEqual(items[0].name, "first")
self.assertEqual(items[1].name, "no I'm first")
RelatedItem.objects.create(item=i)
r = RelatedItem.objects.defer("item").get()
self.assertEqual(r.item_id, i.id)
self.assertEqual(r.item, i)
# Some further checks for select_related() and inherited model
# behaviour (regression for #10710).
c1 = Child.objects.create(name="c1", value=42)
c2 = Child.objects.create(name="c2", value=37)
Leaf.objects.create(name="l1", child=c1, second_child=c2)
obj = Leaf.objects.only("name", "child").select_related()[0]
self.assertEqual(obj.child.name, "c1")
self.assertQuerysetEqual(
Leaf.objects.select_related().only("child__name", "second_child__name"), [
"l1",
],
attrgetter("name")
)
# Models instances with deferred fields should still return the same
# content types as their non-deferred versions (bug #10738).
ctype = ContentType.objects.get_for_model
c1 = ctype(Item.objects.all()[0])
c2 = ctype(Item.objects.defer("name")[0])
c3 = ctype(Item.objects.only("name")[0])
self.assertTrue(c1 is c2 is c3)
# Regression for #10733 - only() can be used on a model with two
# foreign keys.
results = Leaf.objects.only("name", "child", "second_child").select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
results = Leaf.objects.only("name", "child", "second_child", "child__name", "second_child__name").select_related()
self.assertEqual(results[0].child.name, "c1")
self.assertEqual(results[0].second_child.name, "c2")
# Test for #12163 - Pickling error saving session with unsaved model
# instances.
SESSION_KEY = '2b1189a188b44ad18c35e1baac6ceead'
item = Item()
item._deferred = False
s = SessionStore(SESSION_KEY)
s.clear()
s["item"] = item
s.save()
s = SessionStore(SESSION_KEY)
s.modified = True
s.save()
i2 = s["item"]
self.assertFalse(i2._deferred)
# Regression for #11936 - loading.get_models should not return deferred
# models by default.
klasses = sorted(
cache.get_models(cache.get_app("defer_regress")),
key=lambda klass: klass.__name__
)
self.assertEqual(
klasses, [
Child,
Item,
Leaf,
RelatedItem,
ResolveThis,
]
)
klasses = sorted(
map(
attrgetter("__name__"),
cache.get_models(
cache.get_app("defer_regress"), include_deferred=True
),
)
)
self.assertEqual(
klasses, [
"Child",
"Child_Deferred_value",
"Item",
"Item_Deferred_name",
"Item_Deferred_name_other_value_text",
"Item_Deferred_name_other_value_value",
"Item_Deferred_other_value_text_value",
"Item_Deferred_text_value",
"Leaf",
"Leaf_Deferred_child_id_second_child_id_value",
"Leaf_Deferred_name_value",
"Leaf_Deferred_second_child_value",
"Leaf_Deferred_value",
"RelatedItem",
"RelatedItem_Deferred_",
"RelatedItem_Deferred_item_id",
"ResolveThis",
]
)
def test_resolve_columns(self):
rt = ResolveThis.objects.create(num=5.0, name='Foobar')
qs = ResolveThis.objects.defer('num')
self.assertEqual(1, qs.count())
self.assertEqual('Foobar', qs[0].name)
|
aequitas/home-assistant | refs/heads/dev | homeassistant/components/rainmachine/__init__.py | 2 | """Support for RainMachine devices."""
import asyncio
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_BINARY_SENSORS, CONF_IP_ADDRESS, CONF_PASSWORD,
CONF_PORT, CONF_SCAN_INTERVAL, CONF_SENSORS, CONF_SSL,
CONF_MONITORED_CONDITIONS, CONF_SWITCHES)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.service import verify_domain_control
from .config_flow import configured_instances
from .const import (
DATA_CLIENT, DEFAULT_PORT, DEFAULT_SCAN_INTERVAL, DEFAULT_SSL, DOMAIN,
PROVISION_SETTINGS, RESTRICTIONS_CURRENT, RESTRICTIONS_UNIVERSAL)
_LOGGER = logging.getLogger(__name__)
DATA_LISTENER = 'listener'
PROGRAM_UPDATE_TOPIC = '{0}_program_update'.format(DOMAIN)
SENSOR_UPDATE_TOPIC = '{0}_data_update'.format(DOMAIN)
ZONE_UPDATE_TOPIC = '{0}_zone_update'.format(DOMAIN)
CONF_CONTROLLERS = 'controllers'
CONF_PROGRAM_ID = 'program_id'
CONF_SECONDS = 'seconds'
CONF_ZONE_ID = 'zone_id'
CONF_ZONE_RUN_TIME = 'zone_run_time'
DEFAULT_ATTRIBUTION = 'Data provided by Green Electronics LLC'
DEFAULT_ICON = 'mdi:water'
DEFAULT_ZONE_RUN = 60 * 10
TYPE_FLOW_SENSOR = 'flow_sensor'
TYPE_FLOW_SENSOR_CLICK_M3 = 'flow_sensor_clicks_cubic_meter'
TYPE_FLOW_SENSOR_CONSUMED_LITERS = 'flow_sensor_consumed_liters'
TYPE_FLOW_SENSOR_START_INDEX = 'flow_sensor_start_index'
TYPE_FLOW_SENSOR_WATERING_CLICKS = 'flow_sensor_watering_clicks'
TYPE_FREEZE = 'freeze'
TYPE_FREEZE_PROTECTION = 'freeze_protection'
TYPE_FREEZE_TEMP = 'freeze_protect_temp'
TYPE_HOT_DAYS = 'extra_water_on_hot_days'
TYPE_HOURLY = 'hourly'
TYPE_MONTH = 'month'
TYPE_RAINDELAY = 'raindelay'
TYPE_RAINSENSOR = 'rainsensor'
TYPE_WEEKDAY = 'weekday'
BINARY_SENSORS = {
TYPE_FLOW_SENSOR: ('Flow Sensor', 'mdi:water-pump'),
TYPE_FREEZE: ('Freeze Restrictions', 'mdi:cancel'),
TYPE_FREEZE_PROTECTION: ('Freeze Protection', 'mdi:weather-snowy'),
TYPE_HOT_DAYS: ('Extra Water on Hot Days', 'mdi:thermometer-lines'),
TYPE_HOURLY: ('Hourly Restrictions', 'mdi:cancel'),
TYPE_MONTH: ('Month Restrictions', 'mdi:cancel'),
TYPE_RAINDELAY: ('Rain Delay Restrictions', 'mdi:cancel'),
TYPE_RAINSENSOR: ('Rain Sensor Restrictions', 'mdi:cancel'),
TYPE_WEEKDAY: ('Weekday Restrictions', 'mdi:cancel'),
}
SENSORS = {
TYPE_FLOW_SENSOR_CLICK_M3: (
'Flow Sensor Clicks', 'mdi:water-pump', 'clicks/m^3'),
TYPE_FLOW_SENSOR_CONSUMED_LITERS: (
'Flow Sensor Consumed Liters', 'mdi:water-pump', 'liter'),
TYPE_FLOW_SENSOR_START_INDEX: (
'Flow Sensor Start Index', 'mdi:water-pump', None),
TYPE_FLOW_SENSOR_WATERING_CLICKS: (
'Flow Sensor Clicks', 'mdi:water-pump', 'clicks'),
TYPE_FREEZE_TEMP: ('Freeze Protect Temperature', 'mdi:thermometer', '°C'),
}
BINARY_SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSORS)):
vol.All(cv.ensure_list, [vol.In(BINARY_SENSORS)])
})
SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)):
vol.All(cv.ensure_list, [vol.In(SENSORS)])
})
SERVICE_ALTER_PROGRAM = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_ALTER_ZONE = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
})
SERVICE_PAUSE_WATERING = vol.Schema({
vol.Required(CONF_SECONDS): cv.positive_int,
})
SERVICE_START_PROGRAM_SCHEMA = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_START_ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
vol.Optional(CONF_ZONE_RUN_TIME, default=DEFAULT_ZONE_RUN):
cv.positive_int,
})
SERVICE_STOP_PROGRAM_SCHEMA = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_STOP_ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
})
SWITCH_SCHEMA = vol.Schema({vol.Optional(CONF_ZONE_RUN_TIME): cv.positive_int})
CONTROLLER_SCHEMA = vol.Schema({
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL):
cv.time_period,
vol.Optional(CONF_BINARY_SENSORS, default={}): BINARY_SENSOR_SCHEMA,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
vol.Optional(CONF_SWITCHES, default={}): SWITCH_SCHEMA,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_CONTROLLERS):
vol.All(cv.ensure_list, [CONTROLLER_SCHEMA]),
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the RainMachine component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
hass.data[DOMAIN][DATA_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
for controller in conf[CONF_CONTROLLERS]:
if controller[CONF_IP_ADDRESS] in configured_instances(hass):
continue
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': SOURCE_IMPORT},
data=controller))
return True
async def async_setup_entry(hass, config_entry):
"""Set up RainMachine as config entry."""
from regenmaschine import login
from regenmaschine.errors import RainMachineError
_verify_domain_control = verify_domain_control(hass, DOMAIN)
websession = aiohttp_client.async_get_clientsession(hass)
try:
client = await login(
config_entry.data[CONF_IP_ADDRESS],
config_entry.data[CONF_PASSWORD],
websession,
port=config_entry.data[CONF_PORT],
ssl=config_entry.data[CONF_SSL])
rainmachine = RainMachine(
client,
config_entry.data.get(CONF_BINARY_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(BINARY_SENSORS)),
config_entry.data.get(CONF_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(SENSORS)),
config_entry.data.get(CONF_ZONE_RUN_TIME, DEFAULT_ZONE_RUN))
await rainmachine.async_update()
except RainMachineError as err:
_LOGGER.error('An error occurred: %s', err)
raise ConfigEntryNotReady
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = rainmachine
for component in ('binary_sensor', 'sensor', 'switch'):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
config_entry, component))
async def refresh(event_time):
"""Refresh RainMachine sensor data."""
_LOGGER.debug('Updating RainMachine sensor data')
await rainmachine.async_update()
async_dispatcher_send(hass, SENSOR_UPDATE_TOPIC)
hass.data[DOMAIN][DATA_LISTENER][
config_entry.entry_id] = async_track_time_interval(
hass,
refresh,
timedelta(seconds=config_entry.data[CONF_SCAN_INTERVAL]))
@_verify_domain_control
async def disable_program(call):
"""Disable a program."""
await rainmachine.client.programs.disable(
call.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def disable_zone(call):
"""Disable a zone."""
await rainmachine.client.zones.disable(call.data[CONF_ZONE_ID])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
@_verify_domain_control
async def enable_program(call):
"""Enable a program."""
await rainmachine.client.programs.enable(call.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def enable_zone(call):
"""Enable a zone."""
await rainmachine.client.zones.enable(call.data[CONF_ZONE_ID])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
@_verify_domain_control
async def pause_watering(call):
"""Pause watering for a set number of seconds."""
await rainmachine.client.watering.pause_all(call.data[CONF_SECONDS])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def start_program(call):
"""Start a particular program."""
await rainmachine.client.programs.start(call.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def start_zone(call):
"""Start a particular zone for a certain amount of time."""
await rainmachine.client.zones.start(
call.data[CONF_ZONE_ID], call.data[CONF_ZONE_RUN_TIME])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
@_verify_domain_control
async def stop_all(call):
"""Stop all watering."""
await rainmachine.client.watering.stop_all()
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def stop_program(call):
"""Stop a program."""
await rainmachine.client.programs.stop(call.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
@_verify_domain_control
async def stop_zone(call):
"""Stop a zone."""
await rainmachine.client.zones.stop(call.data[CONF_ZONE_ID])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
@_verify_domain_control
async def unpause_watering(call):
"""Unpause watering."""
await rainmachine.client.watering.unpause_all()
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
for service, method, schema in [
('disable_program', disable_program, SERVICE_ALTER_PROGRAM),
('disable_zone', disable_zone, SERVICE_ALTER_ZONE),
('enable_program', enable_program, SERVICE_ALTER_PROGRAM),
('enable_zone', enable_zone, SERVICE_ALTER_ZONE),
('pause_watering', pause_watering, SERVICE_PAUSE_WATERING),
('start_program', start_program, SERVICE_START_PROGRAM_SCHEMA),
('start_zone', start_zone, SERVICE_START_ZONE_SCHEMA),
('stop_all', stop_all, {}),
('stop_program', stop_program, SERVICE_STOP_PROGRAM_SCHEMA),
('stop_zone', stop_zone, SERVICE_STOP_ZONE_SCHEMA),
('unpause_watering', unpause_watering, {}),
]:
hass.services.async_register(DOMAIN, service, method, schema=schema)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an OpenUV config entry."""
hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)
remove_listener = hass.data[DOMAIN][DATA_LISTENER].pop(
config_entry.entry_id)
remove_listener()
for component in ('binary_sensor', 'sensor', 'switch'):
await hass.config_entries.async_forward_entry_unload(
config_entry, component)
return True
class RainMachine:
"""Define a generic RainMachine object."""
def __init__(
self, client, binary_sensor_conditions, sensor_conditions,
default_zone_runtime):
"""Initialize."""
self.binary_sensor_conditions = binary_sensor_conditions
self.client = client
self.data = {}
self.default_zone_runtime = default_zone_runtime
self.device_mac = self.client.mac
self.sensor_conditions = sensor_conditions
async def async_update(self):
"""Update sensor/binary sensor data."""
from regenmaschine.errors import RainMachineError
tasks = {}
if (TYPE_FLOW_SENSOR in self.binary_sensor_conditions
or any(c in self.sensor_conditions
for c in (TYPE_FLOW_SENSOR_CLICK_M3,
TYPE_FLOW_SENSOR_CONSUMED_LITERS,
TYPE_FLOW_SENSOR_START_INDEX,
TYPE_FLOW_SENSOR_WATERING_CLICKS))):
tasks[PROVISION_SETTINGS] = self.client.provisioning.settings()
if any(c in self.binary_sensor_conditions
for c in (TYPE_FREEZE, TYPE_HOURLY, TYPE_MONTH, TYPE_RAINDELAY,
TYPE_RAINSENSOR, TYPE_WEEKDAY)):
tasks[RESTRICTIONS_CURRENT] = self.client.restrictions.current()
if (any(c in self.binary_sensor_conditions
for c in (TYPE_FREEZE_PROTECTION, TYPE_HOT_DAYS))
or TYPE_FREEZE_TEMP in self.sensor_conditions):
tasks[RESTRICTIONS_UNIVERSAL] = (
self.client.restrictions.universal())
results = await asyncio.gather(*tasks.values(), return_exceptions=True)
for operation, result in zip(tasks, results):
if isinstance(result, RainMachineError):
_LOGGER.error(
'There was an error while updating %s: %s', operation,
result)
continue
self.data[operation] = result
class RainMachineEntity(Entity):
"""Define a generic RainMachine entity."""
def __init__(self, rainmachine):
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._dispatcher_handlers = []
self._name = None
self.rainmachine = rainmachine
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
'identifiers': {
(DOMAIN, self.rainmachine.client.mac)
},
'name': self.rainmachine.client.name,
'manufacturer': 'RainMachine',
'model': 'Version {0} (API: {1})'.format(
self.rainmachine.client.hardware_version,
self.rainmachine.client.api_version),
'sw_version': self.rainmachine.client.software_version,
}
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
return self._attrs
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
for handler in self._dispatcher_handlers:
handler()
|
sharph/lifx-python | refs/heads/master | lights_on.py | 1 | #!/usr/bin/env python3
import lifx
lifx.set_power(lifx.BCAST, True)
|
subramani95/neutron | refs/heads/master | neutron/tests/unit/test_post_mortem_debug.py | 3 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from six import moves
from neutron.tests import base
from neutron.tests import post_mortem_debug
class TestTesttoolsExceptionHandler(base.BaseTestCase):
def test_exception_handler(self):
try:
self.assertTrue(False)
except Exception:
exc_info = sys.exc_info()
with mock.patch('traceback.print_exception') as mock_print_exception:
with mock.patch('pdb.post_mortem') as mock_post_mortem:
with mock.patch.object(post_mortem_debug,
'get_ignored_traceback',
return_value=mock.Mock()):
post_mortem_debug.exception_handler(exc_info)
# traceback will become post_mortem_debug.FilteredTraceback
filtered_exc_info = (exc_info[0], exc_info[1], mock.ANY)
mock_print_exception.assert_called_once_with(*filtered_exc_info)
mock_post_mortem.assert_called_once_with(mock.ANY)
class TestFilteredTraceback(base.BaseTestCase):
def test_filter_traceback(self):
tb1 = mock.Mock()
tb2 = mock.Mock()
tb1.tb_next = tb2
tb2.tb_next = None
ftb1 = post_mortem_debug.FilteredTraceback(tb1, tb2)
for attr in ['lasti', 'lineno', 'frame']:
attr_name = 'tb_%s' % attr
self.assertEqual(getattr(tb1, attr_name, None),
getattr(ftb1, attr_name, None))
self.assertIsNone(ftb1.tb_next)
class TestGetIgnoredTraceback(base.BaseTestCase):
def _test_get_ignored_traceback(self, ignored_bit_array, expected):
root_tb = mock.Mock()
tb = root_tb
tracebacks = [tb]
for x in moves.xrange(len(ignored_bit_array) - 1):
tb.tb_next = mock.Mock()
tb = tb.tb_next
tracebacks.append(tb)
tb.tb_next = None
tb = root_tb
for ignored in ignored_bit_array:
if ignored:
tb.tb_frame.f_globals = ['__unittest']
else:
tb.tb_frame.f_globals = []
tb = tb.tb_next
actual = post_mortem_debug.get_ignored_traceback(root_tb)
if expected is not None:
expected = tracebacks[expected]
self.assertEqual(actual, expected)
def test_no_ignored_tracebacks(self):
self._test_get_ignored_traceback([0, 0, 0], None)
def test_single_member_trailing_chain(self):
self._test_get_ignored_traceback([0, 0, 1], 2)
def test_two_member_trailing_chain(self):
self._test_get_ignored_traceback([0, 1, 1], 1)
def test_first_traceback_ignored(self):
self._test_get_ignored_traceback([1, 0, 0], None)
def test_middle_traceback_ignored(self):
self._test_get_ignored_traceback([0, 1, 0], None)
|
tectronics/marioai | refs/heads/master | src/amico/python/agents/forwardjumpingagent.py | 21 | # -*- coding: utf-8 -*-
__author__ = "Sergey Karakovskiy, sergey at idsia fullstop ch"
__date__ = "$August 26, 2010 1:33:34 PM$"
from marioagent import MarioAgent
class ForwardJumpingAgent(MarioAgent):
""" In fact the Python twin of the
corresponding Java ForwardJumpingAgent.
"""
action = None
actionStr = None
KEY_JUMP = 3
KEY_SPEED = 4
#levelScene = None
mayMarioJump = None
isMarioOnGround = None
marioFloats = None
enemiesFloats = None
isEpisodeOver = False
marioState = None
def getName(self):
return self.agentName
def reset(self):
self.action = [0, 0, 0, 0, 0, 0]
self.action[1] = 1
self.action[self.KEY_SPEED] = 1
self.isEpisodeOver = False
def __init__(self):
"""Constructor"""
self.reset()
self.actionStr = ""
self.agentName = "Python Forward Jumping Agent"
def getAction(self):
""" Possible analysis of current observation and sending an action back
"""
#print "M: mayJump: %s, onGround: %s, level[11,12]: %d, level[11,13]: %d, jc: %d" % (self.mayMarioJump, self.isMarioOnGround, self.levelScene[11,12], self.levelScene[11,13], self.trueJumpCounter)
if (self.isEpisodeOver):
return (1, 1, 1, 1, 1, 1)
self.action[self.KEY_SPEED] = self.action[self.KEY_JUMP] = self.mayMarioJump or not self.isMarioOnGround;
t = tuple(self.action)
return t
def integrateObservation(self, squashedObservation, squashedEnemies, marioPos, enemiesPos, marioState):
"""This method stores the observation inside the agent"""
#print "Py: got observation::: squashedObservation: \n", squashedObservation
#print "Py: got observation::: squashedEnemies: \n", squashedEnemies
#print "Py: got observation::: marioPos: \n", marioPos
#print "Py: got observation::: enemiesPos: \n", enemiesPos
#print "Py: got observation::: marioState: \n", marioState
#a = numpy.array(squashedObservation)
#row = 19
#col = 19
#a.resize((row,col))
#print "\n a== \n", a
#levelScene = a
#enemiesObservation = numpy.array(squashedEnemies)
#enemiesObservation.resize((row,col))
self.marioFloats = marioPos
self.enemiesFloats = enemiesPos
self.mayMarioJump = marioState[3]
self.isMarioOnGround = marioState[2]
#self.levelScene = levelScene
self.marioState = marioState[1]
#self.printLevelScene()
def setObservationDetails(self, rfWidth, rfHeight, egoRow, egoCol):
self.receptiveFieldWidth = rfWidth
self.receptiveFieldHeight = rfHeight
self.marioEgoRow = egoRow;
self.marioEgoCol = egoCol;
|
JTarball/docker-django-polymer-starter-kit | refs/heads/master | docker/app/app/backend/apps/_archive/blog_old__/urls.py | 8 | """
URLconf for 'blog' application
"""
from django.conf.urls import patterns, url
from .views import PostListView, PostUserListView, PostYearListView, PostCategoryListView, PostDetailView
urlpatterns = patterns('',
url(r'^$', PostListView.as_view(), name="list"),
url(r'^user/(?P<username>[\w-]+)/', PostUserListView.as_view(), name="list_user"),
url(r'^year/(?P<year>[\w-]+)/', PostYearListView.as_view(), name="list_year"),
url(r'^category/(?P<category>[\w-]+)/', PostCategoryListView.as_view(), name="list_category"),
url(r'^post/(?P<slug>[\w-]+)/$', PostDetailView.as_view(), name="detail")
)
|
jollyroger/debian-buildbot | refs/heads/master | buildbot/monkeypatches/bug4520.py | 3 | # coding=utf-8
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.python import log
from twisted.spread import pb
def patch():
log.msg("Applying patch for http://twistedmatrix.com/trac/ticket/4520")
pb.RemoteError = RemoteError
pb.CopiedFailure.throwExceptionIntoGenerator = \
CopiedFailure_throwExceptionIntoGenerator
old_getStateToCopy = pb.CopyableFailure.getStateToCopy
def getStateToCopy(self):
state = old_getStateToCopy(self)
state['value'] = str(self.value) # Exception instance
return state
#
# Everything below this line was taken from Twisted, except as annotated. See
# http://twistedmatrix.com/trac/changeset/32211
#
# Merge copiedfailure-stringexc-4520
#
# Author: sirgolan, Koblaid, glyph
# Reviewer: exarkun, glyph
# Fixes: #4520
#
# Allow inlineCallbacks and exceptions raised from a twisted.spread remote
# call to work together. A new RemoteError exception will be raised into
# the generator when a yielded Deferred fails with a remote PB failure.
class RemoteError(Exception):
def __init__(self, remoteType, value, remoteTraceback):
Exception.__init__(self, value)
self.remoteType = remoteType
self.remoteTraceback = remoteTraceback
def CopiedFailure_throwExceptionIntoGenerator(self, g):
return g.throw(RemoteError(self.type, self.value, self.traceback))
|
Lokke/eden | refs/heads/master | modules/tests/core/core_dataTable.py | 26 | # -*- coding: utf-8 -*-
__all__ = ["dt_filter",
"dt_row_cnt",
"dt_data",
"dt_data_item",
"dt_find",
"dt_links",
"dt_action",
]
# @ToDo: There are performance issues
# - need to profile and find out in which functions are the bottlenecks
import time
from gluon import current
# -----------------------------------------------------------------------------
def convert_repr_number (number):
"""
Helper function to convert a string representation back to a number.
Assumptions:
* It may have a thousand separator
* It may have a decimal point
* If it has a thousand separator then it will have a decimal point
It will return false is the number doesn't look valid
"""
sep = ""
dec = ""
part_one = "0"
part_two = ""
for digit in number:
if digit.isdigit():
if sep == "":
part_one += digit
else:
part_two += digit
else:
if digit == "-" and part_one == "0":
part_one = "-0"
elif sep == "" and sep != digit:
sep = digit
elif dec == "":
dec = digit
part_two += "."
else:
# Doesn't look like a valid number repr so return
return False
if dec == "":
return float("%s.%s" % (part_one, part_two))
else:
return float("%s%s" % (part_one, part_two))
# -----------------------------------------------------------------------------
def dt_filter(reporter,
search_string=" ",
forceClear = True,
quiet = True):
"""
Filter the dataTable
"""
if forceClear:
if not dt_filter(reporter,
forceClear = False,
quiet = quiet):
return False
config = current.test_config
browser = config.browser
sleep_limit = 10
elem = browser.find_element_by_css_selector('label > input[type="text"]')
elem.clear()
elem.send_keys(search_string)
time.sleep(1) # give time for the list_processing element to appear
waiting_elem = browser.find_element_by_id("datatable_processing")
sleep_time = 0
while (waiting_elem.value_of_css_property("visibility") == "visible"):
time.sleep(1)
sleep_time += 1
if sleep_time > sleep_limit:
if not quiet:
reporter("DataTable filter didn't respond within %d seconds" % sleep_limit)
return False
return True
# -----------------------------------------------------------------------------
def dt_row_cnt(reporter,
check = (),
quiet = True,
utObj = None):
"""
return the rows that are being displayed and the total rows in the dataTable
"""
config = current.test_config
browser = config.browser
elem = browser.find_element_by_id("datatable_info")
details = elem.text
if not quiet:
reporter(details)
words = details.split()
start = int(words[1])
end = int(words[3])
length = int(words[5])
filtered = None
if len(words) > 10:
filtered = int(words[9])
if check != ():
if len(check ) == 3:
expected = "Showing %d to %d of %d entries" % check
actual = "Showing %d to %d of %d entries" % (start, end, length)
msg = "Expected result of '%s' doesn't equal '%s'" % (expected, actual)
if utObj != None:
utObj.assertEqual((start, end, length) == check, msg)
else:
assert (start, end, length) == check, msg
elif len(check) == 4:
expected = "Showing %d to %d of %d entries (filtered from %d total entries)" % check
if filtered:
actual = "Showing %d to %d of %d entries (filtered from %d total entries)" % (start, end, length, filtered)
else:
actual = "Showing %d to %d of %d entries" % (start, end, length)
msg = "Expected result of '%s' doesn't equal '%s'" % (expected, actual)
if utObj != None:
utObj.assertEqual((start, end, length) == check, msg)
else:
assert (start, end, length, filtered) == check, msg
if len(words) > 10:
return (start, end, length, filtered)
else:
return (start, end, length)
# -----------------------------------------------------------------------------
def dt_data(row_list = None,
add_header = False):
""" return the data in the displayed dataTable """
config = current.test_config
browser = config.browser
cell = browser.find_element_by_id("table-container")
text = cell.text
parts = text.splitlines()
records = []
cnt = 0
lastrow = ""
header = ""
for row in parts:
if row.startswith("Detail"):
header = lastrow
row = row[8:]
if row_list == None or cnt in row_list:
records.append(row)
cnt += 1
else:
lastrow = row
if add_header:
return [header] + records
return records
# -----------------------------------------------------------------------------
def dt_data_item(row = 1,
column = 1,
tableID = "datatable",
):
""" Returns the data found in the cell of the dataTable """
config = current.test_config
browser = config.browser
td = ".//*[@id='%s']/tbody/tr[%s]/td[%s]" % (tableID, row, column)
try:
elem = browser.find_element_by_xpath(td)
return elem.text
except:
return False
# -----------------------------------------------------------------------------
def dt_find(search = "",
row = None,
column = None,
cellList = None,
tableID = "datatable",
first = False,
):
"""
Find the cells where search is found in the dataTable
search: the string to search for. If you pass in a number (int, float)
then the function will attempt to convert all text values to
a float for comparison by using the convert_repr_number helper
function
row: The row or list of rows to search along
column: The column or list of columns to search down
cellList: This is a list of cells which may be returned from a previous
call, these cells will be searched again for the search string.
However if a row or column value is also provided then for
each cell in cellList the column or row will be offset.
For example cellList = [(3,1)] and column = 5, means rather
than looking in cell (3,1) the function will look in cell (3,5)
tableID: The HTML id of the table
first: Stop on the first match, or find all matches
Example of use (test url: /inv/warehouse/n/inv_item
{where n is the warehouse id}
):
match = dt_find("Plastic Sheets")
if match:
if not dt_find(4200, cellList=match, column=5, first=True):
assert 0, "Unable to find 4200 Plastic Sheets"
else:
assert 0, "Unable to find any Plastic Sheets"
"""
config = current.test_config
browser = config.browser
def find_match(search, tableID, r, c):
td = ".//*[@id='%s']/tbody/tr[%s]/td[%s]" % (tableID, r, c)
try:
elem = browser.find_element_by_xpath(td)
text = elem.text
if isinstance(search,(int, float)):
text = convert_repr_number(text)
if text == search:
return (r, c)
except:
return False
result = []
if cellList:
for cell in cellList:
if row:
r = row
else:
r = cell[0]
if column:
c = column
else:
c = cell[1]
found = find_match(search, tableID, r, c)
if found:
result.append(found)
if first:
return result
else:
# Calculate the rows that need to be navigated along to find the search string
colList = []
rowList = []
if row == None:
r = 1
while True:
tr = ".//*[@id='%s']/tbody/tr[%s]" % (tableID, r)
try:
elem = browser.find_element_by_xpath(tr)
rowList.append(r)
r += 1
except:
break
elif isinstance(row, int):
rowList = [row]
else:
rowList = row
# Calculate the columns that need to be navigated down to find the search string
if column == None:
c = 1
while True:
td = ".//*[@id='%s']/tbody/tr[1]/td[%s]" % (tableID, c)
try:
elem = browser.find_element_by_xpath(td)
colList.append(c)
c += 1
except:
break
elif isinstance(column, int):
colList = [column]
else:
colList = column
# Now try and find a match
for r in rowList:
for c in colList:
found = find_match(search, tableID, r, c)
if found:
result.append(found)
if first:
return result
return result
# -----------------------------------------------------------------------------
def dt_links(reporter,
row = 1,
tableID = "datatable",
quiet = True
):
""" Returns a list of links in the given row of the dataTable """
config = current.test_config
browser = config.browser
links = []
# loop through each column
column = 1
while True:
td = ".//*[@id='%s']/tbody/tr[%s]/td[%s]" % (tableID, row, column)
try:
elem = browser.find_element_by_xpath(td)
except:
break
# loop through looking for links in the cell
cnt = 1
while True:
link = ".//*[@id='%s']/tbody/tr[%s]/td[%s]/a[%s]" % (tableID, row, column, cnt)
try:
elem = browser.find_element_by_xpath(link)
except:
break
cnt += 1
if not quiet:
reporter("%2d) %s" % (column, elem.text))
links.append([column,elem.text])
column += 1
return links
# -----------------------------------------------------------------------------
def dt_action(row = 1,
action = None,
column = 1,
tableID = "datatable",
):
""" click the action button in the dataTable """
config = current.test_config
browser = config.browser
# What looks like a fairly fragile xpath, but it should work unless DataTable changes
if action:
button = ".//*[@id='%s']/tbody/tr[%s]/td[%s]/a[contains(text(),'%s')]" % (tableID, row, column, action)
else:
button = ".//*[@id='%s']/tbody/tr[%s]/td[%s]/a" % (tableID, row, column)
giveup = 0.0
sleeptime = 0.2
while giveup < 10.0:
try:
element = browser.find_element_by_xpath(button)
url = element.get_attribute("href")
if url:
browser.get(url)
return True
except Exception as inst:
print "%s with %s" % (type(inst), button)
time.sleep(sleeptime)
giveup += sleeptime
return False
# END ========================================================================= |
anthonydillon/horizon | refs/heads/master | openstack_dashboard/dashboards/project/loadbalancers/tabs.py | 30 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.loadbalancers import tables
class PoolsTab(tabs.TableTab):
table_classes = (tables.PoolsTable,)
name = _("Pools")
slug = "pools"
template_name = "horizon/common/_detail_table.html"
def get_poolstable_data(self):
pools = []
try:
request = self.tab_group.request
tenant_id = self.request.user.tenant_id
pools = api.lbaas.pool_list(request,
tenant_id=tenant_id)
fips = None
for pool in pools:
if hasattr(pool, "vip") and pool.vip:
if not fips:
fips = api.network.tenant_floating_ip_list(request)
vip_fip = [fip for fip in fips
if fip.port_id == pool.vip.port_id]
if vip_fip:
pool.vip.fip = vip_fip[0]
except Exception:
exceptions.handle(self.tab_group.request,
_('Unable to retrieve pools list.'))
return pools
class MembersTab(tabs.TableTab):
table_classes = (tables.MembersTable,)
name = _("Members")
slug = "members"
template_name = "horizon/common/_detail_table.html"
def get_memberstable_data(self):
try:
tenant_id = self.request.user.tenant_id
members = api.lbaas.member_list(self.tab_group.request,
tenant_id=tenant_id)
except Exception:
members = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve member list.'))
return members
class MonitorsTab(tabs.TableTab):
table_classes = (tables.MonitorsTable,)
name = _("Monitors")
slug = "monitors"
template_name = "horizon/common/_detail_table.html"
def get_monitorstable_data(self):
try:
tenant_id = self.request.user.tenant_id
monitors = api.lbaas.pool_health_monitor_list(
self.tab_group.request, tenant_id=tenant_id)
except Exception:
monitors = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve monitor list.'))
return monitors
class LoadBalancerTabs(tabs.TabGroup):
slug = "lbtabs"
tabs = (PoolsTab, MembersTab, MonitorsTab)
sticky = True
class PoolDetailsTab(tabs.Tab):
name = _("Pool Details")
slug = "pooldetails"
template_name = "project/loadbalancers/_pool_details.html"
def get_context_data(self, request):
pool = self.tab_group.kwargs['pool']
return {'pool': pool}
class VipDetailsTab(tabs.Tab):
name = _("VIP Details")
slug = "vipdetails"
template_name = "project/loadbalancers/_vip_details.html"
def get_context_data(self, request):
vid = self.tab_group.kwargs['vip_id']
vip = []
try:
vip = api.lbaas.vip_get(request, vid)
fips = api.network.tenant_floating_ip_list(self.tab_group.request)
vip_fip = [fip for fip in fips
if fip.port_id == vip.port.id]
if vip_fip:
vip.fip = vip_fip[0]
except Exception:
exceptions.handle(self.tab_group.request,
_('Unable to retrieve VIP details.'))
return {'vip': vip}
class MemberDetailsTab(tabs.Tab):
name = _("Member Details")
slug = "memberdetails"
template_name = "project/loadbalancers/_member_details.html"
def get_context_data(self, request):
member = self.tab_group.kwargs['member']
return {'member': member}
class MonitorDetailsTab(tabs.Tab):
name = _("Monitor Details")
slug = "monitordetails"
template_name = "project/loadbalancers/_monitor_details.html"
def get_context_data(self, request):
monitor = self.tab_group.kwargs['monitor']
return {'monitor': monitor}
class PoolDetailsTabs(tabs.TabGroup):
slug = "pooltabs"
tabs = (PoolDetailsTab,)
class VipDetailsTabs(tabs.TabGroup):
slug = "viptabs"
tabs = (VipDetailsTab,)
class MemberDetailsTabs(tabs.TabGroup):
slug = "membertabs"
tabs = (MemberDetailsTab,)
class MonitorDetailsTabs(tabs.TabGroup):
slug = "monitortabs"
tabs = (MonitorDetailsTab,)
|
Javiercerna/MissionPlanner | refs/heads/master | Lib/site-packages/scipy/fftpack/_fftpack.py | 53 | import sys
if sys.platform == 'cli':
import clr
clr.AddReference("_fftpack")
from scipy__fftpack___fftpack import *
|
KrzysztofStachanczyk/Sensors-WWW-website | refs/heads/master | www/env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/constants.py | 3007 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
|
salguarnieri/intellij-community | refs/heads/master | python/testData/refactoring/extractsuperclass/importNotBroken.before.py | 80 | from shared import SharedClass
class Source(SharedClass):
pass |
IshankGulati/scikit-learn | refs/heads/master | sklearn/neighbors/tests/test_kde.py | 26 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def check_results(kernel, bandwidth, atol, rtol, X, Y, dens_true):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol,
X, Y, dens_true)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
|
capravictoriae/Space-Mission-Design-and-Operations-EE585x-MOOC-Python- | refs/heads/master | Unit2/P2.1_A.py | 1 | iss_orbit_apogee = 417 * 1000 # Km to m
iss_orbit_perigee = 401 * 1000 # Km to m
earth_mu = 3.986e14 # m^3/s^-2
earth_r = 6.378e6 # m
dist = earth_r + iss_orbit_perigee
grav = earth_mu / dist**2
print(grav) |
reflectometry/osrefl | refs/heads/master | osrefl/viewers/view.py | 1 | # Copyright (C) 2008 University of Maryland
# All rights reserved.
# See LICENSE.txt for details.
# Author: Christopher Metting
#Starting Date:6/5/2009
from numpy import size,array,shape,indices, searchsorted, linspace
from numpy import log, log10, abs, min, max, nonzero,isnan
from .zoom_colorbar import *
import sys,copy
from numpy import roll
def feature_plot(unit,size,delta_xyz):
'''
A three dimensional plot the the voxilized feature. Does what spy_on but
does a three dimensional plot rather then z slices.
feature (type: Lego_Model): contains all of the information required to
calculate the scattering off the feature
'''
from enthought.mayavi import mlab
dumbie_unit = roll(unit,1,axis = 0)
dumbie_unit = roll(dumbie_unit,1,axis = 1)
dumbie_unit[:,0,:] = 0.0
dumbie_unit[:,-1,:] = 0.0
dumbie_unit[0,:,:] = 0.0
dumbie_unit[-1,:,:] = 0.0
xyz = indices((unit.shape), 'd')
xyz[0] *= delta_xyz[0]
xyz[1] *= delta_xyz[1]
xyz[2] *= delta_xyz[2]
feature_size = shape(unit)
mlab.figure(0)
s = mlab.contour3d(xyz[0],xyz[1],xyz[2],dumbie_unit,opacity=.07,contours = 20)
mlab.figure(1)
t = mlab.contour3d(xyz[0],xyz[1],xyz[2]*10,dumbie_unit,opacity=.07,contours = 20)
mlab.figure(2)
u = mlab.contour3d(dumbie_unit,opacity=.05,contours = 20)
mlab.show()
return
def intensity_plot(intensity,mins,maxs, header = None, bar = True,
vmin = None, vmax = None):
'''
creates a two three dimensional plot of the qz vs qx and the intensity
of the scattering.
This plotter can be used for both resolution corrected and uncorrected
intensity plots. The intensity has the lowest non-zero value added to it
to eliminated the limitations that exists when taking the log of zero
'''
from pylab import imshow,colorbar,show, title, xlabel, ylabel
plotxmin = mins[0]
plotxmax = maxs[0]
plotzmin = mins[-1]
plotzmax = maxs[-1]
print min(intensity)
if vmax == None:
vmax = max(log10(intensity))
print max(log(intensity))
if vmin == None:
vmin = max(log10(intensity)) - 15.0
intensity[isnan(intensity)] = 0.0
if size(abs(intensity[nonzero(intensity.real)])) == 0:
lower_lim = 0.0
else:
lower_lim = min(abs(intensity[nonzero(intensity.real)]))
plot_extent = (plotxmin,plotxmax,plotzmin,plotzmax)
graph = imshow(log10(abs(intensity.T+lower_lim)),aspect='auto',
interpolation='nearest',extent=plot_extent,origin='lower',
vmin = vmin, vmax = vmax)
zoom_colorbar(graph)
title(str(header))
xlabel('qx(A^-1)')
ylabel('qz(A^-1)')
return graph
def linear_plot(intensity,mins,maxs, header = None, bar = True,
vmin = None, vmax = None):
'''
creates a two three dimensional plot of the qz vs qx and the intensity
of the scattering.
This plotter can be used for both resolution corrected and uncorrected
intensity plots. The intensity has the lowest non-zero value added to it
to eliminated the limitations that excists when taking the log of zero
'''
from pylab import imshow,colorbar,show, title, xlabel, ylabel
plotxmin = mins[0]
plotxmax = maxs[0]
plotzmin = mins[-1]
plotzmax = maxs[-1]
lower_lim = min(intensity[nonzero(intensity.real)])
plot_extent = (plotxmin,plotxmax,plotzmin,plotzmax)
graph = imshow((abs(intensity.T+lower_lim)),aspect='auto',
interpolation='nearest',
extent=plot_extent,origin='lower')
colorbar()
title(str(header))
xlabel('qz(A^-1)')
ylabel('qx(A^-1)')
return graph
def qz_slice(intensity,mins,maxs,q_slice = 0.0,second_intensity = None):
'''
This takes a qz slice from the uncorrected intensity plot and, if the
resolution corrected plot exists, will also show the qz slice for that data
'''
from pylab import semilogy,plot, legend, title, xlabel, ylabel
print shape(intensity)
qz_array = linspace(mins[2],maxs[2],shape(intensity)[1])
z_position = searchsorted(qz_array,q_slice)
graph = plot(log10(intensity[z_position,:].real),
xdata = qz_array,label = 'Uncorrected')
if (second_intensity != None):
plot(log10(second_intensity[z_position,:].real),
xdata = qz_array,label= 'Corrected' )
legend()
title('Qz Slice at '+ str(q_slice))
xlabel('qz(A^-1)')
ylabel('Normalized Intensity')
return graph
def data_compare(intensity_one,intensity_two,mins,maxs):
from pylab import imshow,colorbar,show, title, xlabel, ylabel,subplot
plotxmin = mins[0]
plotxmax = maxs[0]
plotzmin = mins[-1]
plotzmax = maxs[-1]
intensity_one[isnan(intensity_one)] = 0.0
intensity_two[isnan(intensity_one)] = 0.0
intensity_one += min(intensity_one[(intensity_one)>0])/2
intensity_two += min(intensity_one[(intensity_one)>0])/2
vmin = min(log(intensity_one))
vmax = max(log(intensity_one))
plot_extent = (plotxmin,plotxmax,plotzmin,plotzmax)
subplot(311)
imshow(log(abs(intensity_one.T)),
aspect='auto',interpolation='nearest',
extent=plot_extent,origin='lower',
vmin = vmin, vmax = vmax)
colorbar()
subplot(312)
imshow(log(abs(intensity_two.T)),
aspect='auto',interpolation='nearest',
extent=plot_extent,origin='lower',
vmin = vmin, vmax = vmax)
colorbar()
subplot(313)
imshow((abs(intensity_one - intensity_two)/intensity_one).T,aspect='auto',
interpolation='nearest',extent=plot_extent,origin='lower',
vmin = vmin, vmax = vmax)
colorbar()
def test():
'''
this test was used to fix the plotters
'''
intensity = array([[1,2,3,4,5],[5,4,3,2,1],[1,5,2,4,3],[5,1,4,2,3],[5,1,4,2,3]])
mins = array([-3,-3,-3])
maxs = array([3,3,3])
qz_slice(intensity,mins,maxs)
if __name__=="__main__":test()
|
AnotherIvan/calibre | refs/heads/master | src/calibre/ebooks/html/__init__.py | 16 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
def tostring(root, strip_comments=False, pretty_print=False):
'''
Serialize processed XHTML.
'''
from lxml.etree import tostring as _tostring
root.set('xmlns', 'http://www.w3.org/1999/xhtml')
root.set('{http://www.w3.org/1999/xhtml}xlink', 'http://www.w3.org/1999/xlink')
for x in root.iter():
if hasattr(x.tag, 'rpartition') and x.tag.rpartition('}')[-1].lower() == 'svg':
x.set('xmlns', 'http://www.w3.org/2000/svg')
ans = _tostring(root, encoding='utf-8', pretty_print=pretty_print)
if strip_comments:
ans = re.compile(r'<!--.*?-->', re.DOTALL).sub('', ans)
ans = '<?xml version="1.0" encoding="utf-8" ?>\n'+ans
return ans
|
wenhuizhang/swift | refs/heads/master | test/unit/common/middleware/test_tempurl.py | 2 | # Copyright (c) 2011-2014 Greg Holt
# Copyright (c) 2012-2013 Peter Portante
# Copyright (c) 2012 Iryoung Jeong
# Copyright (c) 2012 Michael Barton
# Copyright (c) 2013 Alex Gaynor
# Copyright (c) 2013 Chuck Thier
# Copyright (c) 2013 David Goetz
# Copyright (c) 2015 Donagh McCabe
# Copyright (c) 2013 Greg Lange
# Copyright (c) 2013 John Dickinson
# Copyright (c) 2013 Kun Huang
# Copyright (c) 2013 Richard Hawkins
# Copyright (c) 2013 Samuel Merritt
# Copyright (c) 2013 Shri Javadekar
# Copyright (c) 2013 Tong Li
# Copyright (c) 2013 ZhiQiang Fan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hmac
import unittest
from hashlib import sha1
from time import time
from swift.common.middleware import tempauth, tempurl
from swift.common.swob import Request, Response, HeaderKeyDict
from swift.common import utils
class FakeApp(object):
def __init__(self, status_headers_body_iter=None):
self.calls = 0
self.status_headers_body_iter = status_headers_body_iter
if not self.status_headers_body_iter:
self.status_headers_body_iter = iter([('404 Not Found', {
'x-test-header-one-a': 'value1',
'x-test-header-two-a': 'value2',
'x-test-header-two-b': 'value3'}, '')])
self.request = None
def __call__(self, env, start_response):
self.calls += 1
self.request = Request.blank('', environ=env)
if 'swift.authorize' in env:
resp = env['swift.authorize'](self.request)
if resp:
return resp(env, start_response)
status, headers, body = next(self.status_headers_body_iter)
return Response(status=status, headers=headers,
body=body)(env, start_response)
class TestTempURL(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.auth = tempauth.filter_factory({'reseller_prefix': ''})(self.app)
self.tempurl = tempurl.filter_factory({})(self.auth)
def _make_request(self, path, environ=None, keys=(), **kwargs):
if environ is None:
environ = {}
_junk, account, _junk, _junk = utils.split_path(path, 2, 4)
self._fake_cache_environ(environ, account, keys)
req = Request.blank(path, environ=environ, **kwargs)
return req
def _fake_cache_environ(self, environ, account, keys):
"""
Fake out the caching layer for get_account_info(). Injects account data
into environ such that keys are the tempurl keys, if set.
"""
meta = {'swash': 'buckle'}
for idx, key in enumerate(keys):
meta_name = 'Temp-URL-key' + (("-%d" % (idx + 1) if idx else ""))
if key:
meta[meta_name] = key
environ['swift.account/' + account] = {
'status': 204,
'container_count': '0',
'total_object_count': '0',
'bytes': '0',
'meta': meta}
container_cache_key = 'swift.container/' + account + '/c'
environ.setdefault(container_cache_key, {'meta': {}})
def test_passthrough(self):
resp = self._make_request('/v1/a/c/o').get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' not in resp.body)
def test_allow_options(self):
self.app.status_headers_body_iter = iter([('200 Ok', {}, '')])
resp = self._make_request(
'/v1/a/c/o?temp_url_sig=abcde&temp_url_expires=12345',
environ={'REQUEST_METHOD': 'OPTIONS'}).get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
def assert_valid_sig(self, expires, path, keys, sig, environ=None):
if not environ:
environ = {}
environ['QUERY_STRING'] = 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)
req = self._make_request(path, keys=keys, environ=environ)
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['content-disposition'],
'attachment; filename="o"; ' + "filename*=UTF-8''o")
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_get_valid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
self.assert_valid_sig(expires, path, [key], sig)
def test_get_valid_key2(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key1 = 'abc123'
key2 = 'def456'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig1 = hmac.new(key1, hmac_body, sha1).hexdigest()
sig2 = hmac.new(key2, hmac_body, sha1).hexdigest()
for sig in (sig1, sig2):
self.assert_valid_sig(expires, path, [key1, key2], sig)
def test_get_valid_container_keys(self):
environ = {}
# Add two static container keys
container_keys = ['me', 'other']
meta = {}
for idx, key in enumerate(container_keys):
meta_name = 'Temp-URL-key' + (("-%d" % (idx + 1) if idx else ""))
if key:
meta[meta_name] = key
environ['swift.container/a/c'] = {'meta': meta}
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key1 = 'me'
key2 = 'other'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig1 = hmac.new(key1, hmac_body, sha1).hexdigest()
sig2 = hmac.new(key2, hmac_body, sha1).hexdigest()
account_keys = []
for sig in (sig1, sig2):
self.assert_valid_sig(expires, path, account_keys, sig, environ)
def test_get_valid_with_filename(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=bob%%20%%22killer%%22.txt' % (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['content-disposition'],
'attachment; filename="bob %22killer%22.txt"; ' +
"filename*=UTF-8''bob%20%22killer%22.txt")
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_head_valid(self):
method = 'HEAD'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s'
% (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
def test_get_valid_with_filename_and_inline(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=bob%%20%%22killer%%22.txt&inline=' % (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['content-disposition'], 'inline')
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_get_valid_with_inline(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'inline=' % (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['content-disposition'], 'inline')
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_obj_odd_chars(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/a\r\nb'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['content-disposition'],
'attachment; filename="a%0D%0Ab"; ' +
"filename*=UTF-8''a%0D%0Ab")
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_obj_odd_chars_in_content_disposition_metadata(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
headers = [('Content-Disposition', 'attachment; filename="fu\nbar"')]
self.tempurl.app = FakeApp(iter([('200 Ok', headers, '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['content-disposition'],
'attachment; filename="fu%0Abar"')
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_obj_trailing_slash(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o/'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['content-disposition'],
'attachment; filename="o"; ' +
"filename*=UTF-8''o")
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_filename_trailing_slash(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(path, keys=[key], environ={
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=/i/want/this/just/as/it/is/' % (sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', (), '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertEquals(
resp.headers['content-disposition'],
'attachment; filename="/i/want/this/just/as/it/is/"; ' +
"filename*=UTF-8''/i/want/this/just/as/it/is/")
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_get_valid_but_404(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertFalse('content-disposition' in resp.headers)
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_put_not_allowed_by_get(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'PUT',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_put_valid(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'PUT',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_get_not_allowed_by_put(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_missing_sig(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_expires=%s' % expires})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_missing_expires(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s' % sig})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_bad_path(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_no_key(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_head_allowed_by_get(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_head_allowed_by_put(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_head_allowed_by_post(self):
method = 'POST'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(req.environ['swift.authorize_override'], True)
self.assertEquals(req.environ['REMOTE_USER'], '.wsgi.tempurl')
def test_head_otherwise_not_allowed(self):
method = 'PUT'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
# Deliberately fudge expires to show HEADs aren't just automatically
# allowed.
expires += 1
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'HEAD',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_post_when_forbidden_by_config(self):
self.tempurl.methods.remove('POST')
method = 'POST'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'POST',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_delete_when_forbidden_by_config(self):
self.tempurl.methods.remove('DELETE')
method = 'DELETE'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'DELETE',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_delete_allowed(self):
method = 'DELETE'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'DELETE',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
def test_unknown_not_allowed(self):
method = 'UNKNOWN'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'REQUEST_METHOD': 'UNKNOWN',
'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_changed_path_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path + '2', keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_changed_sig_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
if sig[-1] != '0':
sig = sig[:-1] + '0'
else:
sig = sig[:-1] + '1'
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_changed_expires_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires + 1)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_different_key_invalid(self):
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key + '2'],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
def test_removed_incoming_header(self):
self.tempurl = tempurl.filter_factory({
'incoming_remove_headers': 'x-remove-this'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
headers={'x-remove-this': 'value'},
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertTrue('x-remove-this' not in self.app.request.headers)
def test_removed_incoming_headers_match(self):
self.tempurl = tempurl.filter_factory({
'incoming_remove_headers': 'x-remove-this-*',
'incoming_allow_headers': 'x-remove-this-except-this'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
headers={'x-remove-this-one': 'value1',
'x-remove-this-except-this': 'value2'},
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertTrue('x-remove-this-one' not in self.app.request.headers)
self.assertEquals(
self.app.request.headers['x-remove-this-except-this'], 'value2')
def test_allow_trumps_incoming_header_conflict(self):
self.tempurl = tempurl.filter_factory({
'incoming_remove_headers': 'x-conflict-header',
'incoming_allow_headers': 'x-conflict-header'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
headers={'x-conflict-header': 'value'},
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertTrue('x-conflict-header' in self.app.request.headers)
def test_allow_trumps_incoming_header_startswith_conflict(self):
self.tempurl = tempurl.filter_factory({
'incoming_remove_headers': 'x-conflict-header-*',
'incoming_allow_headers': 'x-conflict-header-*'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
headers={'x-conflict-header-test': 'value'},
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertTrue('x-conflict-header-test' in self.app.request.headers)
def test_removed_outgoing_header(self):
self.tempurl = tempurl.filter_factory({
'outgoing_remove_headers': 'x-test-header-one-a'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertTrue('x-test-header-one-a' not in resp.headers)
self.assertEquals(resp.headers['x-test-header-two-a'], 'value2')
def test_removed_outgoing_headers_match(self):
self.tempurl = tempurl.filter_factory({
'outgoing_remove_headers': 'x-test-header-two-*',
'outgoing_allow_headers': 'x-test-header-two-b'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 404)
self.assertEquals(resp.headers['x-test-header-one-a'], 'value1')
self.assertTrue('x-test-header-two-a' not in resp.headers)
self.assertEquals(resp.headers['x-test-header-two-b'], 'value3')
def test_allow_trumps_outgoing_header_conflict(self):
self.tempurl = tempurl.filter_factory({
'outgoing_remove_headers': 'x-conflict-header',
'outgoing_allow_headers': 'x-conflict-header'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
headers={},
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', {
'X-Conflict-Header': 'value'}, '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertTrue('x-conflict-header' in resp.headers)
self.assertEqual(resp.headers['x-conflict-header'], 'value')
def test_allow_trumps_outgoing_header_startswith_conflict(self):
self.tempurl = tempurl.filter_factory({
'outgoing_remove_headers': 'x-conflict-header-*',
'outgoing_allow_headers': 'x-conflict-header-*'})(self.auth)
method = 'GET'
expires = int(time() + 86400)
path = '/v1/a/c/o'
key = 'abc'
hmac_body = '%s\n%s\n%s' % (method, expires, path)
sig = hmac.new(key, hmac_body, sha1).hexdigest()
req = self._make_request(
path, keys=[key],
headers={},
environ={'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
sig, expires)})
self.tempurl.app = FakeApp(iter([('200 Ok', {
'X-Conflict-Header-Test': 'value'}, '123')]))
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 200)
self.assertTrue('x-conflict-header-test' in resp.headers)
self.assertEqual(resp.headers['x-conflict-header-test'], 'value')
def test_get_account(self):
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'HEAD', 'PATH_INFO': '/v1/a/c/o'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/o'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'PUT', 'PATH_INFO': '/v1/a/c/o'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'POST', 'PATH_INFO': '/v1/a/c/o'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'DELETE', 'PATH_INFO': '/v1/a/c/o'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'UNKNOWN', 'PATH_INFO': '/v1/a/c/o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c//////'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c///o///'}), 'a')
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a//o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1//c/o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '//a/c/o'}), None)
self.assertEquals(self.tempurl._get_account({
'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v2/a/c/o'}), None)
def test_get_temp_url_info(self):
s = 'f5d5051bddf5df7e27c628818738334f'
e = int(time() + 86400)
self.assertEquals(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
s, e)}),
(s, e, None, None))
self.assertEquals(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=bobisyouruncle' % (s, e)}),
(s, e, 'bobisyouruncle', None))
self.assertEquals(
self.tempurl._get_temp_url_info({}),
(None, None, None, None))
self.assertEquals(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_expires=%s' % e}),
(None, e, None, None))
self.assertEquals(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s' % s}),
(s, None, None, None))
self.assertEquals(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=bad' % (
s)}),
(s, 0, None, None))
self.assertEquals(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'inline=' % (s, e)}),
(s, e, None, True))
self.assertEquals(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s&'
'filename=bobisyouruncle&inline=' % (s, e)}),
(s, e, 'bobisyouruncle', True))
e = int(time() - 1)
self.assertEquals(
self.tempurl._get_temp_url_info(
{'QUERY_STRING': 'temp_url_sig=%s&temp_url_expires=%s' % (
s, e)}),
(s, 0, None, None))
def test_get_hmacs(self):
self.assertEquals(
self.tempurl._get_hmacs(
{'REQUEST_METHOD': 'GET', 'PATH_INFO': '/v1/a/c/o'},
1, ['abc']),
['026d7f7cc25256450423c7ad03fc9f5ffc1dab6d'])
self.assertEquals(
self.tempurl._get_hmacs(
{'REQUEST_METHOD': 'HEAD', 'PATH_INFO': '/v1/a/c/o'},
1, ['abc'], request_method='GET'),
['026d7f7cc25256450423c7ad03fc9f5ffc1dab6d'])
def test_invalid(self):
def _start_response(status, headers, exc_info=None):
self.assertTrue(status, '401 Unauthorized')
self.assertTrue('Temp URL invalid' in ''.join(
self.tempurl._invalid({'REQUEST_METHOD': 'GET'},
_start_response)))
self.assertEquals('', ''.join(
self.tempurl._invalid({'REQUEST_METHOD': 'HEAD'},
_start_response)))
def test_auth_scheme_value(self):
# Passthrough
environ = {}
resp = self._make_request('/v1/a/c/o', environ=environ).get_response(
self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' not in resp.body)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertTrue('swift.auth_scheme' not in environ)
# Rejected by TempURL
req = self._make_request('/v1/a/c/o', keys=['abc'],
environ={'REQUEST_METHOD': 'PUT',
'QUERY_STRING':
'temp_url_sig=dummy&temp_url_expires=1234'})
resp = req.get_response(self.tempurl)
self.assertEquals(resp.status_int, 401)
self.assertTrue('Temp URL invalid' in resp.body)
self.assert_('Www-Authenticate' in resp.headers)
def test_clean_incoming_headers(self):
irh = ''
iah = ''
env = {'HTTP_TEST_HEADER': 'value'}
tempurl.TempURL(
None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah}
)._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER' in env)
irh = 'test-header'
iah = ''
env = {'HTTP_TEST_HEADER': 'value'}
tempurl.TempURL(
None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah}
)._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER' not in env)
irh = 'test-header-*'
iah = ''
env = {'HTTP_TEST_HEADER_ONE': 'value',
'HTTP_TEST_HEADER_TWO': 'value'}
tempurl.TempURL(
None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah}
)._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER_ONE' not in env)
self.assertTrue('HTTP_TEST_HEADER_TWO' not in env)
irh = 'test-header-*'
iah = 'test-header-two'
env = {'HTTP_TEST_HEADER_ONE': 'value',
'HTTP_TEST_HEADER_TWO': 'value'}
tempurl.TempURL(
None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah}
)._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER_ONE' not in env)
self.assertTrue('HTTP_TEST_HEADER_TWO' in env)
irh = 'test-header-* test-other-header'
iah = 'test-header-two test-header-yes-*'
env = {'HTTP_TEST_HEADER_ONE': 'value',
'HTTP_TEST_HEADER_TWO': 'value',
'HTTP_TEST_OTHER_HEADER': 'value',
'HTTP_TEST_HEADER_YES': 'value',
'HTTP_TEST_HEADER_YES_THIS': 'value'}
tempurl.TempURL(
None, {'incoming_remove_headers': irh,
'incoming_allow_headers': iah}
)._clean_incoming_headers(env)
self.assertTrue('HTTP_TEST_HEADER_ONE' not in env)
self.assertTrue('HTTP_TEST_HEADER_TWO' in env)
self.assertTrue('HTTP_TEST_OTHER_HEADER' not in env)
self.assertTrue('HTTP_TEST_HEADER_YES' not in env)
self.assertTrue('HTTP_TEST_HEADER_YES_THIS' in env)
def test_clean_outgoing_headers(self):
orh = ''
oah = ''
hdrs = {'test-header': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(
None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.items()))
self.assertTrue('test-header' in hdrs)
orh = 'test-header'
oah = ''
hdrs = {'test-header': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(
None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.items()))
self.assertTrue('test-header' not in hdrs)
orh = 'test-header-*'
oah = ''
hdrs = {'test-header-one': 'value',
'test-header-two': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(
None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.items()))
self.assertTrue('test-header-one' not in hdrs)
self.assertTrue('test-header-two' not in hdrs)
orh = 'test-header-*'
oah = 'test-header-two'
hdrs = {'test-header-one': 'value',
'test-header-two': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(
None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.items()))
self.assertTrue('test-header-one' not in hdrs)
self.assertTrue('test-header-two' in hdrs)
orh = 'test-header-* test-other-header'
oah = 'test-header-two test-header-yes-*'
hdrs = {'test-header-one': 'value',
'test-header-two': 'value',
'test-other-header': 'value',
'test-header-yes': 'value',
'test-header-yes-this': 'value'}
hdrs = HeaderKeyDict(tempurl.TempURL(
None,
{'outgoing_remove_headers': orh, 'outgoing_allow_headers': oah}
)._clean_outgoing_headers(hdrs.items()))
self.assertTrue('test-header-one' not in hdrs)
self.assertTrue('test-header-two' in hdrs)
self.assertTrue('test-other-header' not in hdrs)
self.assertTrue('test-header-yes' not in hdrs)
self.assertTrue('test-header-yes-this' in hdrs)
def test_unicode_metadata_value(self):
meta = {"temp-url-key": "test", "temp-url-key-2": u"test2"}
results = tempurl.get_tempurl_keys_from_metadata(meta)
for str_value in results:
self.assertTrue(isinstance(str_value, str))
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_registered_defaults(self):
tempurl.filter_factory({})
swift_info = utils.get_swift_info()
self.assertTrue('tempurl' in swift_info)
self.assertEqual(set(swift_info['tempurl']['methods']),
set(('GET', 'HEAD', 'PUT', 'POST', 'DELETE')))
def test_non_default_methods(self):
tempurl.filter_factory({'methods': 'GET HEAD PUT DELETE BREW'})
swift_info = utils.get_swift_info()
self.assertTrue('tempurl' in swift_info)
self.assertEqual(set(swift_info['tempurl']['methods']),
set(('GET', 'HEAD', 'PUT', 'DELETE', 'BREW')))
if __name__ == '__main__':
unittest.main()
|
hatwar/buyback-frappe | refs/heads/master | frappe/templates/pages/message.py | 40 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import strip_html_tags
no_cache = 1
no_sitemap = 1
def get_context(context):
message_context = {}
if hasattr(frappe.local, "message"):
message_context["header"] = frappe.local.message_title
message_context["title"] = strip_html_tags(frappe.local.message_title)
message_context["message"] = frappe.local.message
if hasattr(frappe.local, "message_success"):
message_context["success"] = frappe.local.message_success
return message_context
|
jc0n/scrapy | refs/heads/master | scrapy/mail.py | 11 | """
Mail sending helpers
See documentation in docs/topics/email.rst
"""
import logging
from six.moves import cStringIO as StringIO
import six
from email.utils import COMMASPACE, formatdate
from six.moves.email_mime_multipart import MIMEMultipart
from six.moves.email_mime_text import MIMEText
from six.moves.email_mime_base import MIMEBase
if six.PY2:
from email.MIMENonMultipart import MIMENonMultipart
from email import Encoders
else:
from email.mime.nonmultipart import MIMENonMultipart
from email import encoders as Encoders
from twisted.internet import defer, reactor, ssl
logger = logging.getLogger(__name__)
class MailSender(object):
def __init__(self, smtphost='localhost', mailfrom='scrapy@localhost',
smtpuser=None, smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False):
self.smtphost = smtphost
self.smtpport = smtpport
self.smtpuser = smtpuser
self.smtppass = smtppass
self.smtptls = smtptls
self.smtpssl = smtpssl
self.mailfrom = mailfrom
self.debug = debug
@classmethod
def from_settings(cls, settings):
return cls(settings['MAIL_HOST'], settings['MAIL_FROM'], settings['MAIL_USER'],
settings['MAIL_PASS'], settings.getint('MAIL_PORT'),
settings.getbool('MAIL_TLS'), settings.getbool('MAIL_SSL'))
def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', charset=None, _callback=None):
if attachs:
msg = MIMEMultipart()
else:
msg = MIMENonMultipart(*mimetype.split('/', 1))
msg['From'] = self.mailfrom
msg['To'] = COMMASPACE.join(to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
rcpts = to[:]
if cc:
rcpts.extend(cc)
msg['Cc'] = COMMASPACE.join(cc)
if charset:
msg.set_charset(charset)
if attachs:
msg.attach(MIMEText(body, 'plain', charset or 'us-ascii'))
for attach_name, mimetype, f in attachs:
part = MIMEBase(*mimetype.split('/'))
part.set_payload(f.read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' \
% attach_name)
msg.attach(part)
else:
msg.set_payload(body)
if _callback:
_callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)
if self.debug:
logger.debug('Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': len(attachs)})
return
dfd = self._sendmail(rcpts, msg.as_string())
dfd.addCallbacks(self._sent_ok, self._sent_failed,
callbackArgs=[to, cc, subject, len(attachs)],
errbackArgs=[to, cc, subject, len(attachs)])
reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd)
return dfd
def _sent_ok(self, result, to, cc, subject, nattachs):
logger.info('Mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': nattachs})
def _sent_failed(self, failure, to, cc, subject, nattachs):
errstr = str(failure.value)
logger.error('Unable to send mail: To=%(mailto)s Cc=%(mailcc)s '
'Subject="%(mailsubject)s" Attachs=%(mailattachs)d'
'- %(mailerr)s',
{'mailto': to, 'mailcc': cc, 'mailsubject': subject,
'mailattachs': nattachs, 'mailerr': errstr})
def _sendmail(self, to_addrs, msg):
# Import twisted.mail here because it is not available in python3
from twisted.mail.smtp import ESMTPSenderFactory
msg = StringIO(msg)
d = defer.Deferred()
factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, \
to_addrs, msg, d, heloFallback=True, requireAuthentication=False, \
requireTransportSecurity=self.smtptls)
factory.noisy = False
if self.smtpssl:
reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())
else:
reactor.connectTCP(self.smtphost, self.smtpport, factory)
return d
|
meabsence/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/idlelib/textView.py | 45 | """Simple text browser for IDLE
"""
from tkinter import *
import tkinter.messagebox as tkMessageBox
class TextViewer(Toplevel):
"""A simple text viewer dialog for IDLE
"""
def __init__(self, parent, title, text):
"""Show the given text in a scrollable window with a 'close' button
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.geometry("=%dx%d+%d+%d" % (625, 500,
parent.winfo_rootx() + 10,
parent.winfo_rooty() + 10))
#elguavas - config placeholders til config stuff completed
self.bg = '#ffffff'
self.fg = '#000000'
self.CreateWidgets()
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Ok)
self.parent = parent
self.textView.focus_set()
#key bindings for this dialog
self.bind('<Return>',self.Ok) #dismiss dialog
self.bind('<Escape>',self.Ok) #dismiss dialog
self.textView.insert(0.0, text)
self.textView.config(state=DISABLED)
self.wait_window()
def CreateWidgets(self):
frameText = Frame(self, relief=SUNKEN, height=700)
frameButtons = Frame(self)
self.buttonOk = Button(frameButtons, text='Close',
command=self.Ok, takefocus=FALSE)
self.scrollbarView = Scrollbar(frameText, orient=VERTICAL,
takefocus=FALSE, highlightthickness=0)
self.textView = Text(frameText, wrap=WORD, highlightthickness=0,
fg=self.fg, bg=self.bg)
self.scrollbarView.config(command=self.textView.yview)
self.textView.config(yscrollcommand=self.scrollbarView.set)
self.buttonOk.pack()
self.scrollbarView.pack(side=RIGHT,fill=Y)
self.textView.pack(side=LEFT,expand=TRUE,fill=BOTH)
frameButtons.pack(side=BOTTOM,fill=X)
frameText.pack(side=TOP,expand=TRUE,fill=BOTH)
def Ok(self, event=None):
self.destroy()
def view_text(parent, title, text):
TextViewer(parent, title, text)
def view_file(parent, title, filename, encoding=None):
try:
with open(filename, 'r', encoding=encoding) as file:
contents = file.read()
except IOError:
import tkinter.messagebox as tkMessageBox
tkMessageBox.showerror(title='File Load Error',
message='Unable to load file %r .' % filename,
parent=parent)
else:
return view_text(parent, title, contents)
if __name__ == '__main__':
#test the dialog
root=Tk()
root.title('textView test')
filename = './textView.py'
text = file(filename, 'r').read()
btn1 = Button(root, text='view_text',
command=lambda:view_text(root, 'view_text', text))
btn1.pack(side=LEFT)
btn2 = Button(root, text='view_file',
command=lambda:view_file(root, 'view_file', filename))
btn2.pack(side=LEFT)
close = Button(root, text='Close', command=root.destroy)
close.pack(side=RIGHT)
root.mainloop()
|
Gordon01/vanilla | refs/heads/master | update-translations.py | 7 | #!/usr/bin/python
# Fetch the translations from http://crowdin.net/project/vanilla-music and save
# them to res/ in the current directory. Removes files that contain no
# translations and removes incomplete plurals (they cause crashes).
#
# This script does not force crowdin to rebuild the translation package. That
# should be done through the website before running this script.
#
# Requires python-lxml.
try:
# python 3
from urllib.request import urlopen
except ImportError:
# python 2
from urllib2 import urlopen
from lxml import etree
from zipfile import ZipFile
import io
import os
data = urlopen('http://crowdin.net/download/project/vanilla-music.zip')
ar = ZipFile(io.BytesIO(data.read()))
parser = etree.XMLParser(remove_blank_text=True)
for name in ar.namelist():
# ignore directories
if not name.endswith('translatable.xml'):
continue
doc = etree.parse(ar.open(name), parser)
# remove plurals without "other" quantity (they cause crashes)
for e in doc.xpath("//plurals[not(item/@quantity='other')]"):
e.getparent().remove(e)
# ignore languages with no translations
if len(doc.getroot()) == 0:
continue
# make some translations more general
lang = name.split('/')[0]
if lang == 'es-ES':
lang = 'es'
# The Android convention seems to be to put pt-PT in values-pt-rPT and
# put pt-BR in values-pt. But since we have no pt-BR translation yet,
# I'm just putting pt-PT in values-pt for now. Hopefully this doesn't
# cause any issues.
elif lang == 'pt-PT':
lang = 'pt'
# create dir if needed (assume res/ exists already)
path = 'res/values-' + lang
if not (os.path.isdir(path)):
os.mkdir(path)
# save result
with io.open(path + "/translatable.xml", "w") as file:
file.write(etree.tostring(doc, encoding='unicode', pretty_print=True, doctype='<?xml version="1.0" encoding="utf-8"?>'))
|
bwall/bamfdetect | refs/heads/master | BAMF_Detect/modules/cythosia.py | 1 | from common import Modules, data_strings_wide, load_yara_rules, PEParseModule, ModuleMetadata
class Cythosia(PEParseModule):
def __init__(self):
md = ModuleMetadata(
module_name="cythosia",
bot_name="Cythosia",
description="DDoS Bot",
authors=["Brian Wallace (@botnet_hunter)"],
version="1.0.0",
date="March 21, 2015",
references=[]
)
PEParseModule.__init__(self, md)
self.yara_rules = None
def _generate_yara_rules(self):
if self.yara_rules is None:
self.yara_rules = load_yara_rules("cythosia.yara")
return self.yara_rules
def get_bot_information(self, file_data):
results = {}
c2s = set()
for s in data_strings_wide(file_data):
if s.startswith("http://") and s != "http://":
c2s.add(s)
for c2 in c2s:
if "c2s" not in results:
results["c2s"] = []
results["c2s"].append({"c2_uri": c2})
return results
Modules.list.append(Cythosia()) |
olexiim/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/modulestore/store_utilities.py | 19 | import re
import logging
from collections import namedtuple
import uuid
def _prefix_only_url_replace_regex(pattern):
"""
Match urls in quotes pulling out the fields from pattern
"""
return re.compile(ur"""
(?x) # flags=re.VERBOSE
(?P<quote>\\?['"]) # the opening quotes
{}
(?P=quote) # the first matching closing quote
""".format(pattern))
def rewrite_nonportable_content_links(source_course_id, dest_course_id, text):
"""
rewrite any non-portable links to (->) relative links:
/c4x/<org>/<course>/asset/<name> -> /static/<name>
/jump_to/i4x://<org>/<course>/<category>/<name> -> /jump_to_id/<id>
"""
def portable_asset_link_subtitution(match):
quote = match.group('quote')
block_id = match.group('block_id')
return quote + '/static/' + block_id + quote
def portable_jump_to_link_substitution(match):
quote = match.group('quote')
rest = match.group('block_id')
return quote + '/jump_to_id/' + rest + quote
# if something blows up, log the error and continue
# create a serialized template for what the id will look like in the source_course but with
# the block_id as a regex pattern
placeholder_id = uuid.uuid4().hex
asset_block_pattern = unicode(source_course_id.make_asset_key('asset', placeholder_id))
asset_block_pattern = asset_block_pattern.replace(placeholder_id, r'(?P<block_id>.*?)')
try:
text = _prefix_only_url_replace_regex(asset_block_pattern).sub(portable_asset_link_subtitution, text)
except Exception as exc: # pylint: disable=broad-except
logging.warning("Error producing regex substitution %r for text = %r.\n\nError msg = %s", asset_block_pattern, text, str(exc))
placeholder_category = 'cat_{}'.format(uuid.uuid4().hex)
usage_block_pattern = unicode(source_course_id.make_usage_key(placeholder_category, placeholder_id))
usage_block_pattern = usage_block_pattern.replace(placeholder_category, r'(?P<category>[^/+@]+)')
usage_block_pattern = usage_block_pattern.replace(placeholder_id, r'(?P<block_id>.*?)')
jump_to_link_base = ur'/courses/{course_key_string}/jump_to/{usage_key_string}'.format(
course_key_string=unicode(source_course_id), usage_key_string=usage_block_pattern
)
try:
text = _prefix_only_url_replace_regex(jump_to_link_base).sub(portable_jump_to_link_substitution, text)
except Exception as exc: # pylint: disable=broad-except
logging.warning("Error producing regex substitution %r for text = %r.\n\nError msg = %s", jump_to_link_base, text, str(exc))
# Also, there commonly is a set of link URL's used in the format:
# /courses/<org>/<course>/<name> which will be broken if migrated to a different course_id
# so let's rewrite those, but the target will also be non-portable,
#
# Note: we only need to do this if we are changing course-id's
#
if source_course_id != dest_course_id:
try:
generic_courseware_link_base = u'/courses/{}/'.format(source_course_id.to_deprecated_string())
text = re.sub(_prefix_only_url_replace_regex(generic_courseware_link_base), portable_asset_link_subtitution, text)
except Exception as exc: # pylint: disable=broad-except
logging.warning("Error producing regex substitution %r for text = %r.\n\nError msg = %s", source_course_id, text, str(exc))
return text
def draft_node_constructor(module, url, parent_url, location=None, parent_location=None, index=None):
"""
Contructs a draft_node namedtuple with defaults.
"""
draft_node = namedtuple('draft_node', ['module', 'location', 'url', 'parent_location', 'parent_url', 'index'])
return draft_node(module, location, url, parent_location, parent_url, index)
def get_draft_subtree_roots(draft_nodes):
"""
Takes a list of draft_nodes, which are namedtuples, each of which identify
itself and its parent.
If a draft_node is in `draft_nodes`, then we expect for all its children
should be in `draft_nodes` as well. Since `_import_draft` is recursive,
we only want to import the roots of any draft subtrees contained in
`draft_nodes`.
This generator yields those roots.
"""
urls = [draft_node.url for draft_node in draft_nodes]
for draft_node in draft_nodes:
if draft_node.parent_url not in urls:
yield draft_node
|
urda/mrbutler-bot | refs/heads/master | tests/unit/mrb/test_versioning.py | 1 | """
Copyright 2017 Peter Urda
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from unittest import TestCase
from unittest.mock import MagicMock
from discord import (
Message,
User,
)
from mrb import versioning
from mrb_common.commanding import (
CommandResult,
ResponseType,
)
class TestGetVersion(TestCase):
def test_type(self):
"""Verify that a type of string is returned"""
self.assertIsInstance(versioning.get_version(), str)
def test_non_zero_length(self):
"""Verify that a non-zero length string is returned"""
self.assertGreater(len(versioning.get_version()), 0)
class TestGetVersionCommand(TestCase):
def setUp(self):
self.user = User(
name='TestUser',
id='TestUserId',
discriminator='0000',
avatar='',
bot=False,
)
self.mock_message = MagicMock(spec=Message) # type: Message
self.mock_message.author = self.user
def test_type(self):
"""Verify that the correct result type is returned"""
self.assertIsInstance(
versioning.get_version_command(self.mock_message),
CommandResult,
)
def test_response_content_user_mention(self):
"""Verify that the version command mentions the user"""
self.assertIn(
self.user.mention,
versioning.get_version_command(self.mock_message).content,
)
def test_response_content_version_value(self):
"""Verify that the version command contains the version"""
self.assertIn(
versioning.get_version(),
versioning.get_version_command(self.mock_message).content,
)
def test_response_success(self):
"""Verify that the response is always successful"""
self.assertTrue(
versioning.get_version_command(self.mock_message).success
)
def test_response_type(self):
"""Verify that the version command is a channel response type"""
self.assertEqual(
versioning.get_version_command(self.mock_message).response_type,
ResponseType.ChannelMessage,
)
|
dferguso/IGT4SAR | refs/heads/master | ElevationDifferenceModel_Example.py | 1 | #-------------------------------------------------------------------------------
# Name: ElevationDifferenceModel_Example
# Purpose: This is not an actual script but examples of the calculations that
# would be performed inside of the Raster Calculator to complete the Elevation
# Model as described in Koester's Lost Person Behavior.
#
# Author: Don Ferguson
#
# Created: 06/12/2012
# Copyright: (c) Don Ferguson 2012
# Licence:
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The GNU General Public License can be found at
# <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
#####
# IPP Elevation = 640
# Same Elevation
Con((((640-10) <= "14 Base_Data_Group\Elevation\crsf_dem") & ("14 Base_Data_Group\Elevation\crsf_dem" <= (640+10))),16,0)
# Down
Con((((640-40) <= "14 Base_Data_Group\Elevation\crsf_dem") & ("14 Base_Data_Group\Elevation\crsf_dem" < (640-10))),36,0)
Con((((640-86) <= "14 Base_Data_Group\Elevation\crsf_dem") & ("14 Base_Data_Group\Elevation\crsf_dem" < (640-40))),36,0)
Con((((640-203) <= "14 Base_Data_Group\Elevation\crsf_dem") & ("14 Base_Data_Group\Elevation\crsf_dem" < (640-86))),36,0)
# Up
Con((((640+10) < "14 Base_Data_Group\Elevation\crsf_dem") & ("14 Base_Data_Group\Elevation\crsf_dem" <= (640+48))),33,0)
Con((((640+48) < "14 Base_Data_Group\Elevation\crsf_dem") & ("14 Base_Data_Group\Elevation\crsf_dem" <= (640+100))),33,0)
Con((((640+100) < "14 Base_Data_Group\Elevation\crsf_dem") & ("14 Base_Data_Group\Elevation\crsf_dem" <= (640+370))),33,0)
# Overall up - 32%
Con((((640+10) < "14 Base_Data_Group\Elevation\crsf_dem") & ("14 Base_Data_Group\Elevation\crsf_dem" <= (640+1905))),42,0)
# Overall down - 52%
Con((((640-1607) <= "14 Base_Data_Group\Elevation\crsf_dem") & ("14 Base_Data_Group\Elevation\crsf_dem" < (640-20))),55,0)
##### Land Cover - Find Location
Con(IsNull("road_find"),Con(IsNull("linear_find"),Con(IsNull("Water_Reclass"),Con(IsNull("drain_find"),"NLCD_Clipped",5),11),2),1)
Con(IsNull("Models\Mobility\road_find"),Con(IsNull("Models\Mobility\linear_find"),Con(IsNull("Models\Mobility\Water_Reclass"),Con(IsNull("Models\Mobility\highslopeA"),Con(IsNull("Models\Mobility\drain_find"),"Models\Mobility\Veggie_Impd",45),99),80),33),1) |
Lukasa/zmusic-ng | refs/heads/master | backend/zmusic/picard/util/mimetype.py | 3 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2009 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
MIME_TYPE_EXTENSION_MAP = {
'image/jpeg': '.jpg',
'image/png': '.png',
'image/gif': '.gif',
'image/tiff': '.tiff',
}
EXTENSION_MIME_TYPE_MAP = dict([(b, a) for a, b in MIME_TYPE_EXTENSION_MAP.items()])
def get_from_data(data, filename=None, default=None):
"""Tries to determine the mime type from the given data."""
if data.startswith('\xff\xd8\xff'):
return 'image/jpeg'
elif data.startswith('\x89PNG\x0d\x0a\x1a\x0a'):
return 'image/png'
elif data.startswith('GIF87a') or data.startswith('GIF89a'):
return 'image/gif'
elif data.startswith('MM\x00*') or data.startswith('II*\x00'):
return 'image/tiff'
elif filename:
return get_from_filename(filename, default)
else:
return default
def get_from_filename(filename, default=None):
"""Tries to determine the mime type from the given filename."""
name, ext = os.path.splitext(os.path.basename(filename))
return EXTENSION_MIME_TYPE_MAP.get(ext, default)
def get_extension(mimetype, default=None):
"""Returns the file extension for a given mime type."""
return MIME_TYPE_EXTENSION_MAP.get(mimetype, default) |
Jessicamiejiu/a-journey-to-python-jessicamiejiu | refs/heads/master | _src/om2pyItem/scaffold/modules/__init__.py | 107 | # -*- coding: utf-8 -*-
import sys
#sys.path.append("..")
|
proxysh/Safejumper-for-Mac | refs/heads/master | buildmac/Resources/env/lib/python2.7/site-packages/pyasn1/compat/__init__.py | 3653 | # This file is necessary to make this directory a package.
|
shail2810/nova | refs/heads/master | nova/objects/instance_group.py | 16 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from nova.compute import utils as compute_utils
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import utils
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Use list/dict helpers for policies, metadetails, members
# Version 1.3: Make uuid a non-None real string
# Version 1.4: Add add_members()
# Version 1.5: Add get_hosts()
# Version 1.6: Add get_by_name()
# Version 1.7: Deprecate metadetails
# Version 1.8: Add count_members_by_user()
# Version 1.9: Add get_by_instance_uuid()
VERSION = '1.9'
fields = {
'id': fields.IntegerField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'name': fields.StringField(nullable=True),
'policies': fields.ListOfStringsField(nullable=True),
'members': fields.ListOfStringsField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 7):
# NOTE(danms): Before 1.7, we had an always-empty
# metadetails property
primitive['metadetails'] = {}
@staticmethod
def _from_db_object(context, instance_group, db_inst):
"""Method to help with migration to objects.
Converts a database entity to a formal object.
"""
# Most of the field names match right now, so be quick
for field in instance_group.fields:
if field == 'deleted':
instance_group.deleted = db_inst['deleted'] == db_inst['id']
else:
instance_group[field] = db_inst[field]
instance_group._context = context
instance_group.obj_reset_changes()
return instance_group
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
db_inst = db.instance_group_get(context, uuid)
return cls._from_db_object(context, cls(), db_inst)
@base.remotable_classmethod
def get_by_name(cls, context, name):
# TODO(russellb) We need to get the group by name here. There's no
# db.api method for this yet. Come back and optimize this by
# adding a new query by name. This is unnecessarily expensive if a
# tenant has lots of groups.
igs = objects.InstanceGroupList.get_by_project_id(context,
context.project_id)
for ig in igs:
if ig.name == name:
return ig
raise exception.InstanceGroupNotFound(group_uuid=name)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_inst = db.instance_group_get_by_instance(context, instance_uuid)
return cls._from_db_object(context, cls(), db_inst)
@classmethod
def get_by_hint(cls, context, hint):
if uuidutils.is_uuid_like(hint):
return cls.get_by_uuid(context, hint)
else:
return cls.get_by_name(context, hint)
@base.remotable
def save(self):
"""Save updates to this instance group."""
updates = self.obj_get_changes()
if not updates:
return
payload = dict(updates)
payload['server_group_id'] = self.uuid
db.instance_group_update(self._context, self.uuid, updates)
db_inst = db.instance_group_get(self._context, self.uuid)
self._from_db_object(self._context, self, db_inst)
compute_utils.notify_about_server_group_update(self._context,
"update", payload)
@base.remotable
def refresh(self):
"""Refreshes the instance group."""
current = self.__class__.get_by_uuid(self._context, self.uuid)
for field in self.fields:
if self.obj_attr_is_set(field) and self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
payload = dict(updates)
updates.pop('id', None)
policies = updates.pop('policies', None)
members = updates.pop('members', None)
db_inst = db.instance_group_create(self._context, updates,
policies=policies,
members=members)
self._from_db_object(self._context, self, db_inst)
payload['server_group_id'] = self.uuid
compute_utils.notify_about_server_group_update(self._context,
"create", payload)
@base.remotable
def destroy(self):
payload = {'server_group_id': self.uuid}
db.instance_group_delete(self._context, self.uuid)
self.obj_reset_changes()
compute_utils.notify_about_server_group_update(self._context,
"delete", payload)
@base.remotable_classmethod
def add_members(cls, context, group_uuid, instance_uuids):
payload = {'server_group_id': group_uuid,
'instance_uuids': instance_uuids}
members = db.instance_group_members_add(context, group_uuid,
instance_uuids)
compute_utils.notify_about_server_group_update(context,
"addmember", payload)
return list(members)
@base.remotable
def get_hosts(self, exclude=None):
"""Get a list of hosts for non-deleted instances in the group
This method allows you to get a list of the hosts where instances in
this group are currently running. There's also an option to exclude
certain instance UUIDs from this calculation.
"""
filter_uuids = self.members
if exclude:
filter_uuids = set(filter_uuids) - set(exclude)
filters = {'uuid': filter_uuids, 'deleted': False}
instances = objects.InstanceList.get_by_filters(self._context,
filters=filters)
return list(set([instance.host for instance in instances
if instance.host]))
@base.remotable
def count_members_by_user(self, user_id):
"""Count the number of instances in a group belonging to a user."""
filter_uuids = self.members
filters = {'uuid': filter_uuids, 'user_id': user_id, 'deleted': False}
instances = objects.InstanceList.get_by_filters(self._context,
filters=filters)
return len(instances)
@base.NovaObjectRegistry.register
class InstanceGroupList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# InstanceGroup <= version 1.3
# Version 1.1: InstanceGroup <= version 1.4
# Version 1.2: InstanceGroup <= version 1.5
# Version 1.3: InstanceGroup <= version 1.6
# Version 1.4: InstanceGroup <= version 1.7
# Version 1.5: InstanceGroup <= version 1.8
# Version 1.6: InstanceGroup <= version 1.9
VERSION = '1.6'
fields = {
'objects': fields.ListOfObjectsField('InstanceGroup'),
}
# NOTE(danms): InstanceGroup was at 1.3 before we added this
obj_relationships = {
'objects': [('1.0', '1.3'), ('1.1', '1.4'), ('1.2', '1.5'),
('1.3', '1.6'), ('1.4', '1.7'), ('1.5', '1.8'),
('1.6', '1.9')],
}
@base.remotable_classmethod
def get_by_project_id(cls, context, project_id):
groups = db.instance_group_get_all_by_project_id(context, project_id)
return base.obj_make_list(context, cls(context), objects.InstanceGroup,
groups)
@base.remotable_classmethod
def get_all(cls, context):
groups = db.instance_group_get_all(context)
return base.obj_make_list(context, cls(context), objects.InstanceGroup,
groups)
|
acshan/odoo | refs/heads/8.0 | openerp/workflow/__init__.py | 378 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.workflow.service import WorkflowService
# The new API is in openerp.workflow.workflow_service
# OLD API of the Workflow
def clear_cache(cr, uid):
WorkflowService.clear_cache(cr.dbname)
def trg_write(uid, res_type, res_id, cr):
"""
Reevaluates the specified workflow instance. Thus if any condition for
a transition have been changed in the backend, then running ``trg_write``
will move the workflow over that transition.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).write()
def trg_trigger(uid, res_type, res_id, cr):
"""
Activate a trigger.
If a workflow instance is waiting for a trigger from another model, then this
trigger can be activated if its conditions are met.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).trigger()
def trg_delete(uid, res_type, res_id, cr):
"""
Delete a workflow instance
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).delete()
def trg_create(uid, res_type, res_id, cr):
"""
Create a new workflow instance
:param res_type: the model name
:param res_id: the model instance id to own the created worfklow instance
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).create()
def trg_validate(uid, res_type, res_id, signal, cr):
"""
Fire a signal on a given workflow instance
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:signal: the signal name to be fired
:param cr: a database cursor
"""
assert isinstance(signal, basestring)
return WorkflowService.new(cr, uid, res_type, res_id).validate(signal)
def trg_redirect(uid, res_type, res_id, new_rid, cr):
"""
Re-bind a workflow instance to another instance of the same model.
Make all workitems which are waiting for a (subflow) workflow instance
for the old resource point to the (first active) workflow instance for
the new resource.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param new_rid: the model instance id to own the worfklow instance
:param cr: a database cursor
"""
assert isinstance(new_rid, (long, int))
return WorkflowService.new(cr, uid, res_type, res_id).redirect(new_rid)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unreal666/outwiker | refs/heads/master | plugins/updatenotifier/updatenotifier/updatecontroller.py | 2 | # -*- coding: utf-8 -*-
import datetime
import logging
import threading
import os.path
import json
import shutil
import html
import wx
import outwiker.core
from outwiker.gui.longprocessrunner import LongProcessRunner
from outwiker.core.commands import getCurrentVersion, MessageBox, setStatusText
from outwiker.core.version import Version
from outwiker.core.system import getPluginsDirList
import outwiker.core.packageversion as pv
from outwiker.utilites.textfile import readTextFile
from .updatedialog import UpdateDialog
from .updatesconfig import UpdatesConfig
from .versionlist import VersionList
from .i18n import get_
from .contentgenerator import ContentGenerator
from .updateplugin import UpdatePlugin
from .loaders import NormalLoader
# The event occurs after finish checking latest versions.
# The parameters:
# appInfoDict - dictionary. Key - plugin name or special id,
# value - AppInfo instance.
# silenceMode - True if the thread was run in the silence mode.
UpdateVersionsEvent, EVT_UPDATE_VERSIONS = wx.lib.newevent.NewEvent()
logger = logging.getLogger('updatenotifier')
class UpdateController(object):
"""
Controller for updates checking and show information.
"""
def __init__(self, application, plugin):
'''
application - instance of the ApplicationParams class.
pluginPath - path to UpdateNotifier plugin.
'''
global _
_ = get_()
join = os.path.join
self._application = application
self._plugin = plugin
self._config = UpdatesConfig(self._application.config)
self._dataPath = join(plugin.pluginPath, u'data')
self._updateTemplatePath = join(self._dataPath, u'update.html')
self.__deletedPlugins = {}
self.vl = VersionList() # to load app info from the internet.
# Special string id for stable and unstable OutWiker URL
self._OUTWIKER_STABLE_KEY = u'OUTWIKER_STABLE'
self._OUTWIKER_UNSTABLE_KEY = u'OUTWIKER_UNSTABLE'
# Dictionary. Key - plugin name or special string id,
# Value - URL to XML file with versions information.
self._updateUrls = {}
self._updateUrls[self._OUTWIKER_STABLE_KEY] = u'https://jenyay.net/uploads/Soft/Outwiker/versions.xml'
self._updateUrls[self._OUTWIKER_UNSTABLE_KEY] = u'https://jenyay.net/uploads/Outwiker/Unstable/versions.xml'
# update dialog instance
self._dialog = None
# The thread to check updates in the silence mode (after program start)
self._silenceThread = None
if self._application.mainWindow is not None:
self._application.mainWindow.Bind(
EVT_UPDATE_VERSIONS,
handler=self._onVersionUpdate)
def checkForUpdatesSilence(self):
"""
Execute the silence update checking.
"""
setStatusText(_(u"Check for new versions..."))
if (self._silenceThread is None or not self._silenceThread.isAlive()):
self._silenceThread = threading.Thread(
None,
self._threadFunc,
args=(self._updateUrls.copy(), True)
)
self._silenceThread.start()
def checkForUpdates(self):
"""
Execute updates checking and show dialog with the results.
"""
setStatusText(_(u"Check for new versions..."))
progressRunner = LongProcessRunner(
self._threadFunc,
self._application.mainWindow,
dialogTitle=u"UpdateNotifier",
dialogText=_(u"Check for new versions..."))
progressRunner.run(self._updateUrls.copy(),
silenceMode=False)
def createHTMLContent(
self, appInfoDict, updatedAppInfo, installerInfoDict):
currentVersion = getCurrentVersion()
currentVersionsDict = self._getCurrentVersionsDict()
appInfoDict = self.filterUpdatedApps(currentVersionsDict, appInfoDict)
updateAppInfo = self.filterUpdatedApps(
currentVersionsDict, updatedAppInfo)
installedAppInfo = {
x: y for x,
y in updatedAppInfo.items() if x not in updateAppInfo}
template = readTextFile(self._updateTemplatePath)
templateData = {
u'outwiker_current_version': currentVersion,
u'outwikerAppInfo': appInfoDict,
u'updatedAppInfo': updateAppInfo,
u'installedAppInfo': installedAppInfo,
u'otherAppInfo': installerInfoDict,
u'currentVersionsDict': currentVersionsDict,
u'str_outwiker_current_version': _(u'Installed OutWiker version'),
u'str_outwiker_latest_stable_version': _(u'Latest stable OutWiker version'),
u'str_outwiker_latest_unstable_version': _(u'Latest unstable OutWiker version'),
u'str_version_history': _(u'Version history'),
u'str_more_info': _(u'More info'),
u'str_update': _(u'Update'),
u'str_install': _(u'Install'),
u'str_uninstall': _(u'Uninstall'),
u'data_path': self._dataPath,
u'escape': html.escape,
}
contentGenerator = ContentGenerator(template)
HTMLContent = contentGenerator.render(templateData)
return HTMLContent
@staticmethod
def filterUpdatedApps(currentVersionsDict, latestAppInfoDict):
"""
Return dictionary with the AppInfo for updated apps only.
currentVersionsDict - dictionary with apps versions.
Key - plugin name or special id,
value - version number string.
latestAppInfoDict - dictionary with AppInfo instances.
Key - plugin name or special id,
value - instance of the AppInfo.
"""
updatedPlugins = {}
for app_name, version_str in currentVersionsDict.items():
if app_name not in latestAppInfoDict:
continue
latestAppInfo = latestAppInfoDict[app_name]
try:
currentPluginVersion = Version.parse(version_str)
except ValueError:
continue
latestVersion = latestAppInfo.currentVersion
if (latestVersion is not None and
latestVersion > currentPluginVersion):
updatedPlugins[app_name] = latestAppInfo
return updatedPlugins
def _getPluginsUpdateUrls(self):
'''
plugins - instance of the PluginsLoader
Return dict which key is plugin name, value is updatesUrl
'''
getInfo = self._application.plugins.getInfo
result = {}
plugin_names = self._application.plugins.loadedPlugins
for name in plugin_names:
try:
appInfo = getInfo(name, [_(u'__updateLang'), u'en'])
except IOError:
logger.warning(u"Can't read {} Info".format(name))
continue
except ValueError:
logger.warning(u"Invalid format {}".format(name))
continue
result[name] = appInfo.updatesUrl
return result
def _getUpdatedAppInfo(self, latestVersionsDict):
'''
Get AppInfo instances for updated apps (plugins and OutWiker) only.
latestVersionsDict - dictionary. Key - plugin name or special id,
value - instance of the AppInfo class.
Return dictionary with the AppInfo instances for updated apps.
'''
currentVersionsDict = self._getCurrentVersionsDict()
updatedAppInfo = self.filterUpdatedApps(
currentVersionsDict, latestVersionsDict)
return updatedAppInfo
def _getCurrentVersionsDict(self):
'''
Return dictionary with apps versions. Key - plugin name or special id,
value - string with version.
'''
currentVersion = getCurrentVersion()
currentVersionsDict = {plugin: self.get_plugin(plugin).version
for plugin
in self._application.plugins.loadedPlugins}
currentVersionsDict[self._OUTWIKER_STABLE_KEY] = str(currentVersion)
currentVersionsDict[self._OUTWIKER_UNSTABLE_KEY] = str(currentVersion)
return currentVersionsDict
def _showUpdates(self, appInfoDict, updatedAppInfo, installerInfoDict):
'''
Show dialog with update information.
'''
setStatusText(u"")
HTMLContent = self.createHTMLContent(
appInfoDict, updatedAppInfo, installerInfoDict)
with UpdateDialog(self._application.mainWindow) as updateDialog:
self._dialog = updateDialog
updateDialog.setContent(HTMLContent, self._dataPath)
updateDialog.ShowModal()
def _onVersionUpdate(self, event):
'''
Event handler for EVT_UPDATE_VERSIONS.
'''
setStatusText(u"")
self._touchLastUpdateDate()
updatedAppInfo = self._getUpdatedAppInfo(event.plugInfoDict)
if event.silenceMode:
if updatedAppInfo:
self._showUpdates(
event.appInfoDict,
event.plugInfoDict,
event.installerInfoDict)
else:
self._showUpdates(
event.appInfoDict,
event.plugInfoDict,
event.installerInfoDict)
def _threadFunc(self, updateUrls, silenceMode):
"""
Thread function for silence updates checking.
Get info data from the updates Urls
:param:
updateUrls - dict which key is plugin name or other ID,
value is update url
silenceMode - True or False
:raise:
EVT_UPDATE_VERSIONS event
"""
# get
appInfoDict = self.vl.loadAppInfo(updateUrls)
# get update URLs from installed plugins
plugInfoDict = self.vl.loadAppInfo(self._getPluginsUpdateUrls())
# get update URLs from plugins.json and remove installed.
installerInfoDict = {x: y for x, y
in self._getUrlsForInstaller().items()
if x not in self._application.plugins.loadedPlugins}
installerInfoDict = self.vl.loadAppInfo(installerInfoDict)
event = UpdateVersionsEvent(appInfoDict=appInfoDict,
plugInfoDict=plugInfoDict,
installerInfoDict=installerInfoDict,
silenceMode=silenceMode)
if self._application.mainWindow:
wx.PostEvent(self._application.mainWindow, event)
def _getUrlsForInstaller(self):
"""
Download plugins.json from URL and deserialize it to dict.
:return:
dictionary {<plagin name>: {
'name':<pluginname>
'url':<url to plugin.xml file>
}
}
"""
json_url = r"https://jenyay.net/uploads/Outwiker/Plugins/plugins.json"
pluginsRepo = NormalLoader().load(json_url)
if pluginsRepo:
# read data/plugins.json
self._installerPlugins = json.loads(pluginsRepo)
updateUrls = {x['name']: x['url']
for x in self._installerPlugins.values()}
else:
updateUrls = {}
return updateUrls
def _touchLastUpdateDate(self):
'''
Save latest updates checking time.
'''
self._config.lastUpdate = datetime.datetime.today()
def update_plugin(self, name):
"""
Update plugin to latest version by name.
:return: True if plugin was installed, otherwise False
"""
appInfoDict = self.vl.loadAppInfo(self._getPluginsUpdateUrls())
# get link to latest version
appInfo = appInfoDict.get(name)
if appInfo:
url = self.vl.getDownlodUrl(appInfo)
if not url:
MessageBox(_(u"The download link was not found in plugin description. Please update plugin manually"),
u"UpdateNotifier")
return False
else:
MessageBox(_(u"Plugin was NOT updated. Please update plugin manually"),
u"UpdateNotifier")
return False
plugin = self.get_plugin(name)
logger.info(
'update_plugin: {url} {path}'.format(
url=url, path=plugin.pluginPath))
rez = UpdatePlugin().update(url, plugin.pluginPath)
if rez:
self._application.plugins.reload(name)
self._updateDialog()
else:
MessageBox(
_(u"Plugin was NOT updated. Please update plugin manually"),
u"UpdateNotifier")
return rez
def _updateDialog(self):
"""
Update content on the current opened update dialog.
"""
if self._dialog and self._dialog.IsModal():
self._dialog.EndModal(wx.ID_OK)
self.checkForUpdates()
def install_plugin(self, name):
"""
Install plugin by name.
:return: True if plugin was installed, otherwise False
"""
getAppInfo = self.vl.getAppInfoFromUrl
getDownlodUrl = self.vl.getDownlodUrl
plugin_info = self._installerPlugins.get(name, None)
if plugin_info:
appInfo = getAppInfo(plugin_info["url"])
if not appInfo or not appInfo.versionsList:
MessageBox(_(u"The plugin description can't be downloaded. Please install plugin manually"),
u"UpdateNotifier")
return False
api_required_version = appInfo.requirements.api_version
if pv.checkVersionAny(outwiker.core.__version__,
api_required_version) != 0:
MessageBox(_(u"The plugin required newer version of OutWiker. Please update OutWiker"),
u"UpdateNotifier")
return False
# get link to latest version
url = getDownlodUrl(appInfo)
if not url:
MessageBox(_(u"The download link was not found in plugin description. Please install plugin manually"),
u"UpdateNotifier")
return False
# getPluginsDirList[0] - папка рядом с запускаемым файлом, затем идут другие папки,
# если они есть
pluginPath = self.__deletedPlugins.get(
name,
os.path.join(getPluginsDirList()[-1], name.lower()))
logger.info(
'install_plugin: {url} {path}'.format(
url=url, path=pluginPath))
rez = UpdatePlugin().update(url, pluginPath)
if rez:
self._application.plugins.load([os.path.dirname(pluginPath)])
self._updateDialog()
else:
MessageBox(
_(u"Plugin was NOT Installed. Please update plugin manually"),
u"UpdateNotifier")
return rez
def get_plugin(self, name):
"""
Retrieve Plugin object from app.plugins
:param name:
plugin name
:return:
The object with Plugin interface or None
"""
return self._application.plugins.loadedPlugins.get(name, None)
def uninstall_plugin(self, name):
"""
remove plugin from application._plugins and delete plugin folder from disk
:param name:
:return:
True if plugin was uninstalled successful, otherwise False
"""
def del_msg(function, path, excinfo):
"Error handler for shutil.rmtree"
MessageBox(_("Plugin's folder can't be deleted. Please delete the following path: \n {}").format(path))
rez = True
plugin_path = self.get_plugin(name).pluginPath
logger.info(
'uninstall_plugin: {name} {path}'.format(
name=name, path=plugin_path))
# remove plugin from applications._plugins
rez = rez and self._application.plugins.remove(name)
logger.info('uninstall_plugin: remove plugin {}'.format(rez))
# remove plugin folder or remove symbolic link to it.
if rez:
if os.path.islink(plugin_path):
os.unlink(plugin_path)
else:
shutil.rmtree(plugin_path,
onerror=lambda f, path, i:
MessageBox(
_("Plugin's folder can't be deleted. Please delete the following path: \n {}"
).format(path)))
# Python can't unimport file, so save the deleted plugin
# If user re-installs it we just install it in same directory
self.__deletedPlugins[name] = plugin_path
# reopen dialog
if name != self._plugin.name:
self._updateDialog()
else: # if UpdateNotifier was deleted, just close dialog
self._dialog.EndModal(wx.ID_OK)
return rez
|
jondong/crosswalk | refs/heads/master | app/tools/android/make_apk.py | 4 | #!/usr/bin/env python
# Copyright (c) 2013, 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=F0401
import atexit
import json
import optparse
import os
import re
import shutil
import struct
import subprocess
import sys
import tempfile
# get xwalk absolute path so we can run this script from any location
xwalk_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(xwalk_dir)
from app_info import AppInfo
from customize import VerifyPackageName, CustomizeAll, \
ParseParameterForCompressor, CustomizeManifest
from extension_manager import GetExtensionList, GetExtensionStatus
from handle_permissions import permission_mapping_table
from util import CleanDir, GetVersion, RunCommand, \
CreateAndCopyDir, GetBuildDir
from manifest_json_parser import HandlePermissionList
from manifest_json_parser import ManifestJsonParser
NATIVE_LIBRARY = 'libxwalkcore.so'
DUMMY_LIBRARY = 'libxwalkdummy.so'
EMBEDDED_LIBRARY = 'xwalk_core_library'
SHARED_LIBRARY = 'xwalk_shared_library'
# FIXME(rakuco): Only ALL_ARCHITECTURES should exist. We keep these two
# separate lists because SUPPORTED_ARCHITECTURES contains the architectures
# for which we provide official Crosswalk downloads. We do not want to
# prevent users from providing APKs for other architectures if they build
# Crosswalk themselves though.
SUPPORTED_ARCHITECTURES = (
'arm',
'x86',
)
ALL_ARCHITECTURES = (
'arm',
'arm64',
'x86',
'x86_64',
)
def ConvertArchNameToArchFolder(arch):
arch_dict = {
'x86': 'x86',
'x86_64': 'x86_64',
'arm': 'armeabi-v7a',
'arm64': 'arm64-v8a'
}
return arch_dict.get(arch, None)
def ConvertArchNameToArchValue(arch):
# The processor types represented in ELF file header follow
# http://www.sco.com/developers/gabi/latest/ch4.eheader.html
arch_dict = {
'x86': 3,
'x86_64': 50,
'arm': 40,
'arm64': 183
}
return arch_dict.get(arch, None)
def AddExeExtensions(name):
exts_str = os.environ.get('PATHEXT', '').lower()
exts = [_f for _f in exts_str.split(os.pathsep) if _f]
result = []
for e in exts:
result.append(name + e)
result.append(name)
return result
def Which(name):
"""Searches PATH for executable files with the given name, also taking
PATHEXT into account. Returns the first existing match, or None if no matches
are found."""
for path in os.environ.get('PATH', '').split(os.pathsep):
for filename in AddExeExtensions(name):
full_path = os.path.join(path, filename)
if os.path.isfile(full_path) and os.access(full_path, os.X_OK):
return full_path
return None
def GetAndroidApiLevel(android_path):
"""Get Highest Android target level installed.
return -1 if no targets have been found.
"""
target_output = RunCommand([android_path, 'list', 'target', '-c'])
target_regex = re.compile(r'android-(\d+)')
targets = [int(i) for i in target_regex.findall(target_output)]
targets.extend([-1])
return max(targets)
def GetNativeLibraryArchValueInELFHeader(path):
library = open(path, 'rb')
# The offset of the architecture value is 0x12 from the ELF header,
# the length is 2 bytes.
header = library.read(20)
return struct.unpack_from('H', header, 18)[0]
def CheckValidationOfExpectedLibraryArch(path, expected_arch):
extension_path_list = FindNativeExtensionFiles(path);
for extension_path in extension_path_list:
if not ConvertArchNameToArchValue(expected_arch) == \
GetNativeLibraryArchValueInELFHeader(extension_path):
print('Invalid CPU arch of %s, %s is expected.' % (extension_path,
expected_arch))
sys.exit(10)
def ContainsNativeLibrary(path):
return os.path.isfile(os.path.join(path, NATIVE_LIBRARY))
def ContainsCompressedLibrary(path):
return os.path.isfile(os.path.join(path, NATIVE_LIBRARY + ".lzma"))
def ParseManifest(options):
parser = ManifestJsonParser(os.path.expanduser(options.manifest))
if not options.name:
options.name = parser.GetAppName()
if not options.app_version:
options.app_version = parser.GetVersion()
if parser.GetDescription():
options.description = parser.GetDescription()
if parser.GetPermissions():
options.permissions = parser.GetPermissions()
if parser.GetAppUrl():
options.app_url = parser.GetAppUrl()
elif parser.GetAppLocalPath():
options.app_local_path = parser.GetAppLocalPath()
else:
print('Error: there is no app launch path defined in manifest.json.')
sys.exit(9)
if not options.xwalk_apk_url and parser.GetXWalkApkUrl():
options.xwalk_apk_url = parser.GetXWalkApkUrl()
options.icon_dict = {}
if parser.GetAppRoot():
options.app_root = parser.GetAppRoot()
options.icon_dict = parser.GetIcons()
if parser.GetOrientation():
options.orientation = parser.GetOrientation()
if parser.GetFullScreenFlag().lower() == 'true':
options.fullscreen = True
elif parser.GetFullScreenFlag().lower() == 'false':
options.fullscreen = False
return parser
def ParseXPK(options, out_dir):
cmd = ['python', os.path.join(xwalk_dir, 'parse_xpk.py'),
'--file=%s' % os.path.expanduser(options.xpk),
'--out=%s' % out_dir]
RunCommand(cmd)
if options.manifest:
print ('Use the manifest from XPK by default '
'when "--xpk" option is specified, and '
'the "--manifest" option would be ignored.')
sys.exit(7)
if os.path.isfile(os.path.join(out_dir, 'manifest.json')):
options.manifest = os.path.join(out_dir, 'manifest.json')
else:
print('XPK doesn\'t contain manifest file.')
sys.exit(8)
def FindExtensionJars(root_path):
''' Find all .jar files for external extensions. '''
extension_jars = []
if not os.path.exists(root_path):
return extension_jars
for afile in os.listdir(root_path):
if os.path.isdir(os.path.join(root_path, afile)):
base_name = os.path.basename(afile)
extension_jar = os.path.join(root_path, afile, base_name + '.jar')
if os.path.isfile(extension_jar):
extension_jars.append(extension_jar)
return extension_jars
def MakeCodeBaseFromAppVersion(app_version):
"""
Generates a string suitable for an Android versionCode from a version number.
The returned string will be appended to the ABI prefix digit to create a
version number for the android:versionCode attribute of the Android manifest
file.
|app_version| must be a string with the format "ab.cd.efg", all digits but
'a' being optional.
If |app_version|'s format is invalid, this function returns None.
"""
version_re = r'\d{1,2}(\.\d{1,2}(\.\d{1,3})?)?$'
if not re.match(version_re, app_version):
return None
version_numbers = [int(i) for i in app_version.split('.')]
version_numbers.extend([0] * (3 - len(version_numbers))) # Pad to 3 parts.
return '%02d%02d%03d' % tuple(version_numbers)
def MakeVersionCode(options, app_version):
"""
Returns a number in a format suitable for Android's android:versionCode
manifest attribute.
If the --app-versionCode option is not provided, this function tries to
generate an 8-digit version code based on, in this order, either the
--app-versionCodeBase parameter or --app-version (or its JSON manifest
counterpart, "xwalk_version"), as recommended by
"""
if options.app_versionCode:
return options.app_versionCode
# The android:versionCode we build follows the recommendations from
# https://software.intel.com/en-us/blogs/2012/11/12/how-to-publish-your-apps-on-google-play-for-x86-based-android-devices-using
arch_abis = {
'arm': 2,
'arm64': 3,
'x86': 6,
'x86_64': 7,
}
abi_number = arch_abis.get(options.arch, 0)
if options.app_versionCodeBase is not None:
if len(str(options.app_versionCodeBase)) > 7:
print('Error: --app-versionCodeBase must have 7 digits or less.')
sys.exit(12)
version_code_base = options.app_versionCodeBase
else:
version_code_base = MakeCodeBaseFromAppVersion(app_version)
if version_code_base is None:
print('Error: Cannot create a valid android:versionCode from version '
'number "%s". Valid version numbers must follow the format '
'"ab.cd.efg", where only \'a\' is mandatory. For example, "1", '
'"3.45" and "12.3.976" are all valid version numbers. If you use '
'a different versioning scheme, please either "--app-versionCode" '
'or "--app-versionCodeBase" to manually provide the '
'android:versionCode number that your APK will use.' % app_version)
sys.exit(12)
version_code_base = int(version_code_base)
return '%d%07d' % (abi_number, version_code_base)
def GetExtensionBinaryPathList():
local_extension_list = []
extensions_path = os.path.join(os.getcwd(), "extensions")
exist_extension_list = GetExtensionList(extensions_path)
for item in exist_extension_list:
build_json_path = os.path.join(extensions_path, item, "build.json")
with open(build_json_path) as fd:
data = json.load(fd)
if not GetExtensionStatus(item, extensions_path):
continue
else:
if data.get("binary_path", False):
extension_binary_path = os.path.join(extensions_path,
item,
data["binary_path"])
else:
print("The extension \"%s\" doesn't exists." % item)
sys.exit(1)
if os.path.isdir(extension_binary_path):
local_extension_list.append(extension_binary_path)
else:
print("The extension \"%s\" doesn't exists." % item)
sys.exit(1)
return local_extension_list
def Customize(options, app_info, manifest):
app_info.package = options.package
app_info.app_name = options.name
# 'org.xwalk.my_first_app' => 'MyFirstApp'
android_name = options.package.split('.')[-1].split('_')
app_info.android_name = ''.join([i.capitalize() for i in android_name if i])
if options.app_version:
app_info.app_version = options.app_version
if options.app_root:
app_info.app_root = os.path.expanduser(options.app_root)
if options.enable_remote_debugging:
app_info.remote_debugging = '--enable-remote-debugging'
if options.use_animatable_view:
app_info.use_animatable_view = '--use-animatable-view'
if options.fullscreen:
app_info.fullscreen_flag = '-f'
if options.orientation:
app_info.orientation = options.orientation
if options.icon:
app_info.icon = '%s' % os.path.expanduser(options.icon)
if options.xwalk_apk_url:
app_info.xwalk_apk_url = options.xwalk_apk_url
if options.mode:
app_info.mode = options.mode
#Add local extensions to extension list.
extension_binary_path_list = GetExtensionBinaryPathList()
if len(extension_binary_path_list) > 0:
if options.extensions is None:
options.extensions = ""
else:
options.extensions += os.pathsep
for item in extension_binary_path_list:
options.extensions += item
options.extensions += os.pathsep
#trim final path separator
options.extensions = options.extensions[0:-1]
CustomizeAll(app_info, options.description, options.icon_dict,
options.permissions, options.app_url, options.app_local_path,
options.keep_screen_on, options.extensions, manifest,
options.xwalk_command_line, options.compressor)
def CleanCompressedLibrary(library_path, arch):
useless = os.path.join(library_path, NATIVE_LIBRARY + '.' + arch)
if os.path.isfile(useless):
os.remove(useless)
def CleanNativeLibrary(library_path, arch):
lib_dir = os.path.join(library_path, arch)
if os.path.isdir(lib_dir):
shutil.rmtree(lib_dir)
def CopyCompressedLibrary(native_path, library_path, raw_path, arch):
arch_path = os.path.join(library_path, arch)
dummy_library = os.path.join(native_path, DUMMY_LIBRARY);
if os.path.isfile(dummy_library):
if not os.path.isdir(arch_path):
os.mkdir(arch_path)
shutil.copy(dummy_library, arch_path)
compressed_library = os.path.join(native_path, NATIVE_LIBRARY + '.lzma')
shutil.copy(compressed_library, raw_path)
def CopyNativeLibrary(native_path, library_path, raw_path, arch):
shutil.copytree(native_path, os.path.join(library_path, arch))
def CopyNativeExtensionFile(suffix, src_path, dest_path):
if os.path.exists(src_path):
for afile in os.listdir(src_path):
if afile.endswith(suffix):
shutil.copy(os.path.join(src_path, afile), dest_path)
def FindNativeExtensionFiles(path):
''' Find all .so files for native external extensions. '''
extension_list = []
if not os.path.exists(path):
return extension_list
for afile in os.listdir(path):
if os.path.isfile(os.path.join(path, afile)) and afile.endswith('.so'):
extension_list.append(os.path.join(path, afile))
return extension_list
def Execution(options, app_info):
# Now we've got correct app_version and correct ABI value,
# start to generate suitable versionCode
app_info.app_versionCode = MakeVersionCode(options, app_info.app_version)
# Write generated versionCode into AndroidManifest.xml.
# Later if we have other customization,
# we can put them together into CustomizeManifest func.
CustomizeManifest(app_info)
name = app_info.android_name
arch_string = (' ('+options.arch+')' if options.arch else '')
print('\nStarting application build' + arch_string)
app_dir = GetBuildDir(name)
android_path = Which('android')
api_level = GetAndroidApiLevel(android_path)
target_string = 'android-%d' % api_level
print (' * Checking keystore for signing')
if options.keystore_path:
key_store = os.path.expanduser(options.keystore_path)
if options.keystore_alias:
key_alias = options.keystore_alias
else:
print('Please provide an alias name of the developer key.')
sys.exit(6)
if options.keystore_passcode:
key_code = options.keystore_passcode
else:
key_code = None
if options.keystore_alias_passcode:
key_alias_code = options.keystore_alias_passcode
else:
key_alias_code = None
else:
print(' No keystore provided for signing. Using xwalk\'s keystore '
'for debugging.\n Please use a valid keystore when '
'distributing to the app market.')
key_store = os.path.join(xwalk_dir, 'xwalk-debug.keystore')
key_alias = 'xwalkdebugkey'
key_code = 'xwalkdebug'
key_alias_code = 'xwalkdebug'
# Update android project for app and xwalk_core_library.
update_project_cmd = [android_path, 'update', 'project',
'--path', app_dir,
'--target', target_string,
'--name', name]
if options.mode == 'embedded':
print(' * Updating project with xwalk_core_library')
RunCommand([android_path, 'update', 'lib-project',
'--path', os.path.join(app_dir, EMBEDDED_LIBRARY),
'--target', target_string])
update_project_cmd.extend(['-l', EMBEDDED_LIBRARY])
elif options.mode == 'shared' or options.mode == 'download':
print(' * Updating project with xwalk_shared_library')
RunCommand([android_path, 'update', 'lib-project',
'--path', os.path.join(app_dir, SHARED_LIBRARY),
'--target', target_string])
update_project_cmd.extend(['-l', SHARED_LIBRARY])
else:
print(' * Updating project')
RunCommand(update_project_cmd)
# Enable proguard
if options.mode == 'embedded' and options.enable_proguard:
print(' * Enabling proguard config files')
# Enable proguard in project.properies.
if not os.path.exists(os.path.join(app_dir, 'project.properties')):
print('Error, project.properties file not found!')
sys.exit(14)
file_prop = file(os.path.join(app_dir, 'project.properties'), 'a')
file_prop.write('proguard.config=${sdk.dir}/tools/proguard/'
'proguard-android.txt:proguard-xwalk.txt')
file_prop.close()
# Add proguard cfg file.
if not os.path.exists(os.path.join(app_dir, 'proguard-xwalk.txt')):
print('Error, proguard config file for Crosswalk not found!')
sys.exit(14)
# Check whether external extensions are included.
print(' * Checking for external extensions')
extensions_string = 'xwalk-extensions'
extensions_dir = os.path.join(app_dir, extensions_string)
external_extension_jars = FindExtensionJars(extensions_dir)
for external_extension_jar in external_extension_jars:
shutil.copyfile(external_extension_jar,
os.path.join(app_dir, 'libs',
os.path.basename(external_extension_jar)))
if options.mode == 'embedded':
print (' * Copying native libraries for %s' % options.arch)
# Remove existing native libraries in xwalk_core_library, they are probably
# for the last execution to make apk for another CPU arch.
# And then copy the native libraries for the specified arch into
# xwalk_core_library.
arch = ConvertArchNameToArchFolder(options.arch)
if not arch:
print ('Invalid CPU arch: %s.' % arch)
sys.exit(10)
native_path = os.path.join(app_dir, 'native_libs', arch)
library_path = os.path.join(app_dir, EMBEDDED_LIBRARY, 'libs')
raw_path = os.path.join(app_dir, EMBEDDED_LIBRARY, 'res', 'raw')
if options.enable_lzma:
contains_library = ContainsCompressedLibrary
clean_library = CleanCompressedLibrary
copy_library = CopyCompressedLibrary
else:
contains_library = ContainsNativeLibrary
clean_library = CleanNativeLibrary
copy_library = CopyNativeLibrary
# cleanup previous build's library first.
for dir_name in os.listdir(library_path):
clean_library(library_path, dir_name)
if options.native_extensions:
CheckValidationOfExpectedLibraryArch(options.native_extensions,
options.arch)
CopyNativeExtensionFile('.so', os.path.join(options.native_extensions,
arch),
native_path)
if contains_library(native_path):
copy_library(native_path, library_path, raw_path, arch)
else:
print('No %s native library has been found for creating a Crosswalk '
'embedded APK.' % arch)
sys.exit(10)
else:
if options.native_extensions:
for arch_name in ALL_ARCHITECTURES:
arch = ConvertArchNameToArchFolder(arch_name)
extension_path = os.path.join(app_dir, SHARED_LIBRARY, 'libs', arch)
library_path = os.path.join(options.native_extensions, arch)
CheckValidationOfExpectedLibraryArch(library_path, arch_name)
os.mkdir(extension_path)
CopyNativeExtensionFile('.so', os.path.join(options.native_extensions,
arch),
extension_path)
if options.project_only:
print (' (Skipping apk package creation)')
return
# Build the APK
if options.mode == 'embedded':
print(' * Building Android apk package with Crosswalk embedded' +
arch_string)
else:
print(' * Building Android apk package')
ant_path = Which('ant')
ant_cmd = [ant_path, 'release', '-f', os.path.join(app_dir, 'build.xml')]
ant_cmd.extend(['-Dkey.store=%s' % os.path.abspath(key_store)])
ant_cmd.extend(['-Dkey.alias=%s' % key_alias])
if key_code:
ant_cmd.extend(['-Dkey.store.password=%s' % key_code])
if key_alias_code:
ant_cmd.extend(['-Dkey.alias.password=%s' % key_alias_code])
ignore_properties = "!.svn:!.git:.*:!CVS:!thumbs.db:!picasa.ini:!*.scc:*~"
ant_cmd.extend(['-Daapt.ignore.assets=%s' % ignore_properties])
cmd_display = ' '.join([str(item) for item in ant_cmd])
if options.verbose:
print('Executing:\n %s\n' % cmd_display)
else:
ant_cmd.extend(['-quiet'])
ant_result = subprocess.call(ant_cmd)
if ant_result != 0:
print('Command "%s" exited with non-zero exit code %d'
% (cmd_display, ant_result))
sys.exit(ant_result)
src_file = os.path.join(app_dir, 'bin', '%s-release.apk' % name)
package_name = name
if options.app_version:
package_name += ('_' + options.app_version)
if options.mode == 'shared' or options.mode == 'download':
dst_file = os.path.join(options.target_dir, '%s.apk' % package_name)
elif options.mode == 'embedded':
dst_file = os.path.join(options.target_dir,
'%s_%s.apk' % (package_name, options.arch))
shutil.copyfile(src_file, dst_file)
print(' (Location: %s)' % dst_file)
#Copy proguard dumping files
if options.mode == 'embedded' and options.enable_proguard \
and not options.project_dir:
proguard_dir = os.path.join(app_dir, 'bin/proguard/')
if os.path.exists(proguard_dir):
for afile in os.listdir(proguard_dir):
if afile.endswith('.txt'):
shutil.copy(os.path.join(proguard_dir,afile), xwalk_dir)
else:
print('Warning:Cannot find proguard dumping directory!')
def PrintPackageInfo(options, name, packaged_archs):
package_name_version = os.path.join(options.target_dir, name)
if options.app_version:
package_name_version += '_' + options.app_version
if len(packaged_archs) == 0:
print ('\nA non-platform specific APK for the web application "%s" was '
'generated successfully at:\n %s.apk.\n'
% (name, package_name_version))
if options.mode == 'shared':
print ('It requires a shared Crosswalk Runtime to be present.')
return
print('\nApplication APKs were created for the following architectures:')
for arch in sorted(packaged_archs):
print(' * %s' % arch)
missing_architectures = set(SUPPORTED_ARCHITECTURES) - set(packaged_archs)
if missing_architectures:
print('Consider building for the following architectures as well:')
for arch in sorted(missing_architectures):
print(' * %s' % arch)
print ('If you submit this application to an application store, please '
'submit packages for all architectures. Instructions for submitting '
'multiple APKs to the Google Play Store are available here:')
print ('https://software.intel.com/en-us/html5/articles/submitting'
'-multiple-crosswalk-apk-to-google-play-store')
def CheckSystemRequirements():
''' Check for android, ant, template dir '''
sys.stdout.write('Checking system requirements...')
sys.stdout.flush()
# check android install
android_path = Which('android')
if android_path is None:
print('failed\nThe "android" binary could not be found. Check your Android '
'SDK installation and your PATH environment variable.')
sys.exit(1)
if GetAndroidApiLevel(android_path) < 21:
print('failed\nPlease install Android API level (>=21) first.')
sys.exit(3)
# Check ant install
ant_path = Which('ant')
if ant_path is None:
print('failed\nAnt could not be found. Please make sure it is installed.')
sys.exit(4)
print('ok')
def MakeCompressedLibrary(lib_dir):
# use lzma to compress the native library.
native_library = os.path.join(lib_dir, NATIVE_LIBRARY)
RunCommand(['lzma', '-f', native_library])
return True
def MakeNativeLibrary(lib_dir):
# use lzma to decompress the compressed library.
compressed_library = os.path.join(lib_dir, NATIVE_LIBRARY + '.lzma')
RunCommand(['lzma', '-d', compressed_library])
return True
def MakeSharedApk(options, app_info, app_dir):
# Copy xwalk_shared_library into app folder
target_library_path = os.path.join(app_dir, SHARED_LIBRARY)
shutil.copytree(os.path.join(xwalk_dir, SHARED_LIBRARY),
target_library_path)
Execution(options, app_info)
def MakeEmbeddedApk(options, app_info, app_dir, packaged_archs):
# Copy xwalk_core_library into app folder and move the native libraries out.
# When making apk for specified CPU arch, will only include the
# corresponding native library by copying it back into xwalk_core_library.
target_library_path = os.path.join(app_dir, EMBEDDED_LIBRARY)
shutil.copytree(os.path.join(xwalk_dir, EMBEDDED_LIBRARY),
target_library_path)
library_path = os.path.join(target_library_path, 'libs')
native_path = os.path.join(app_dir, 'native_libs')
os.makedirs(native_path)
available_archs = []
if options.enable_lzma:
contains_library = ContainsCompressedLibrary
make_library = MakeCompressedLibrary
else:
contains_library = ContainsNativeLibrary
make_library = MakeNativeLibrary
for dir_name in os.listdir(library_path):
lib_dir = os.path.join(library_path, dir_name)
if os.path.isdir(lib_dir) and \
(contains_library(lib_dir) or make_library(lib_dir)):
shutil.move(lib_dir, os.path.join(native_path, dir_name))
available_archs.append(dir_name)
if options.arch:
Execution(options, app_info)
packaged_archs.append(options.arch)
else:
# If the arch option is unspecified, all of available platform APKs
# will be generated.
for arch in ALL_ARCHITECTURES:
if ConvertArchNameToArchFolder(arch) in available_archs:
options.arch = arch
Execution(options, app_info)
packaged_archs.append(options.arch)
else:
print('Warning: failed to create package for arch "%s" '
'due to missing native library' % arch)
if len(packaged_archs) == 0:
print('No packages created, aborting')
sys.exit(13)
@atexit.register
def PrintDeprecationMessage():
print('\n\nWARNING!!! The make_apk tool is deprecated and will be '
'removed soon.\n\n'
'make_apk is replaced by crosswalk-app-tools, which can be installed '
'with the\n'
'command \'npm install crosswalk-app-tools\'. For more information, '
'see the blog\n'
'post below:\n\n'
'https://crosswalk-project.org/blog/crosswalk-app-tools-07.html\n')
def MakeApk(options, app_info, manifest):
CheckSystemRequirements()
Customize(options, app_info, manifest)
name = app_info.android_name
app_dir = GetBuildDir(name)
packaged_archs = []
if options.mode == 'shared' or options.mode == 'download':
MakeSharedApk(options, app_info, app_dir)
else: # default
MakeEmbeddedApk(options, app_info, app_dir, packaged_archs)
# if project_dir, save build directory
if options.project_dir:
print ('\nCreating project directory')
save_dir = os.path.join(options.project_dir, name)
if CreateAndCopyDir(app_dir, save_dir, True):
print (' A project directory was created successfully in:\n %s' %
os.path.abspath(save_dir))
print (' To manually generate an APK, run the following in that '
'directory:')
print (' ant release -f build.xml')
print (' For more information, see:\n'
' http://developer.android.com/tools/building/'
'building-cmdline.html')
else:
print ('Error: Unable to create a project directory during the build. '
'Please check the directory passed in --project-dir, '
'available disk space, and write permission.')
if not options.project_only:
PrintPackageInfo(options, name, packaged_archs)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('-v', '--version', action='store_true',
dest='version', default=False,
help='The version of this python tool.')
parser.add_option('--verbose', action="store_true",
dest='verbose', default=False,
help='Print debug messages.')
info = ('The packaging mode of the web application. The value \'shared\' '
'means that the runtime is shared across multiple application '
'instances and that the runtime needs to be distributed separately. '
'The value \'embedded\' means that the runtime is embedded into the '
'application itself and distributed along with it.'
'The value \'download\' means that the runtime is downloaded from the'
'xwalk-apk-url at the first luanch of application.'
'Set the default mode as \'embedded\'. For example: --mode=embedded')
parser.add_option('--mode', choices=('embedded', 'shared', 'download'),
default='embedded', help=info)
info = ('The target architecture of the embedded runtime. Supported values: '
'%s. If not specified, APKs for all available architectures will be '
'generated.' % ', '.join(ALL_ARCHITECTURES))
parser.add_option('--arch', choices=ALL_ARCHITECTURES, help=info)
group = optparse.OptionGroup(parser, 'Application Source Options',
'This packaging tool supports 3 kinds of web application source: '
'1) XPK package; 2) manifest.json; 3) various command line options, '
'for example, \'--app-url\' for website, \'--app-root\' and '
'\'--app-local-path\' for local web application.')
info = ('The path of the XPK package. For example, --xpk=/path/to/xpk/file')
group.add_option('--xpk', help=info)
info = ('The manifest file with the detail description of the application. '
'For example, --manifest=/path/to/your/manifest/file')
group.add_option('--manifest', help=info)
info = ('The url of application. '
'This flag allows to package website as apk. For example, '
'--app-url=http://www.intel.com')
group.add_option('--app-url', help=info)
info = ('The root path of the web app. '
'This flag allows to package local web app as apk. For example, '
'--app-root=/root/path/of/the/web/app')
group.add_option('--app-root', help=info)
info = ('The relative path of entry file based on the value from '
'\'app_root\'. This flag should work with \'--app-root\' together. '
'For example, --app-local-path=/relative/path/of/entry/file')
group.add_option('--app-local-path', help=info)
info = ('The download URL of the Crosswalk runtime library APK. '
'The built-in updater uses the Android download manager to fetch '
'the url. '
'For example, --xwalk-apk-url=http://myhost/XWalkRuntimeLib.apk')
group.add_option('--xwalk-apk-url', help=info)
parser.add_option_group(group)
# Mandatory options group
group = optparse.OptionGroup(parser, 'Mandatory arguments',
'They are used for describing the APK information through '
'command line options.')
info = ('The apk name. For example, --name="Your Application Name"')
group.add_option('--name', help=info)
info = ('The package name. For example, '
'--package=com.example.YourPackage')
group.add_option('--package', help=info)
parser.add_option_group(group)
# Optional options group (alphabetical)
group = optparse.OptionGroup(parser, 'Optional arguments',
'They are used for various settings for applications through '
'command line options.')
group.add_option('--app-version',
help='The application version, corresponding to the '
'android:versionName attribute of the Android App '
'Manifest. If the version is in the format "ab.cd.efg", '
'like "1", "3.45" or "12.3.976", an android:versionCode '
'will be generated automatically if "--app-versionCode" '
'or "--app-versionCodeBase" are not specified.')
group.add_option('--app-versionCode', type='int',
help='An integer corresponding to the android:versionCode '
'attribute of the Android App Manifest. If specified, the '
'value of the "--app-version" option is not used to set '
'the value of the android:versionCode attribute.')
group.add_option('--app-versionCodeBase', type='int',
help='An integer with at most 7 digits used to set the '
'value of the android:versionCode attribute of the Android '
'App Manifest if "--app-versionCode" is not specified. '
'If both "--app-versionCodeBase" and "--app-version" are '
'passed, the former will be used to set the '
'android:versionCode attribute.')
info = ('The description of the application. For example, '
'--description=YourApplicationDescription')
group.add_option('--description', help=info)
info = ('Enable proguard to shrink and obfuscate java classes of Crosswalk '
'and Chromium, only works with embedded mode.')
group.add_option('--enable-proguard', action='store_true', default=False,
help=info)
group.add_option('--enable-remote-debugging', action='store_true',
dest='enable_remote_debugging', default=False,
help='Enable remote debugging.')
group.add_option('--use-animatable-view', action='store_true',
dest='use_animatable_view', default=False,
help='Enable using animatable view (TextureView).')
info = ('The list of external extension paths splitted by OS separators. '
'The separators are \':\' , \';\' and \':\' on Linux, Windows and '
'Mac OS respectively. For example, '
'--extensions=/path/to/extension1:/path/to/extension2.')
group.add_option('--extensions', help=info)
info = ('The native external extension path. Put the architecture-specific '
'library file in the corresponding architecture folder '
'(armeabi-v7a/arm64-v8a/x86/x86_64) under '
'the path. For example, '
'--native-extensions=/path/to/native_extension, '
'architecture-specific folders, which contain the library, can be '
'found under the path.')
group.add_option('--native-extensions', help=info)
group.add_option('-f', '--fullscreen', action='store_true',
dest='fullscreen', default=False,
help='Make application fullscreen.')
group.add_option('--keep-screen-on', action='store_true', default=False,
help='Support keeping screen on')
info = ('The path of application icon. '
'Such as: --icon=/path/to/your/customized/icon')
group.add_option('--icon', help=info)
info = ('The orientation of the web app\'s display on the device. '
'For example, --orientation=landscape. The default value is '
'\'unspecified\'. The permitted values are from Android: '
'http://developer.android.com/guide/topics/manifest/'
'activity-element.html#screen')
group.add_option('--orientation', help=info)
info = ('The list of permissions to be used by web application. For example, '
'--permissions=geolocation:webgl')
group.add_option('--permissions', help=info)
info = ('Create an Android project directory with Crosswalk at this location.'
' (See project-only option below)')
group.add_option('--project-dir', help=info)
info = ('Must be used with project-dir option. Create an Android project '
'directory with Crosswalk but do not build the APK package')
group.add_option('--project-only', action='store_true', default=False,
dest='project_only', help=info)
info = ('Packaging tool will move the output APKs to the target directory')
group.add_option('--target-dir', default=os.getcwd(), help=info)
info = ('Use command lines.'
'Crosswalk is powered by Chromium and supports Chromium command line.'
'For example, '
'--xwalk-command-line=\'--chromium-command-1 --xwalk-command-2\'')
group.add_option('--xwalk-command-line', default='', help=info)
parser.add_option_group(group)
# Keystore options group
group = optparse.OptionGroup(parser, 'Keystore Options',
'The keystore is a signature from web developer, it\'s used when '
'developer wants to distribute the applications.')
info = ('The path to the developer keystore. For example, '
'--keystore-path=/path/to/your/developer/keystore')
group.add_option('--keystore-path', help=info)
info = ('The alias name of keystore. For example, --keystore-alias=name')
group.add_option('--keystore-alias', help=info)
info = ('The passcode of keystore. For example, --keystore-passcode=code')
group.add_option('--keystore-passcode', help=info)
info = ('Passcode for alias\'s private key in the keystore, '
'For example, --keystore-alias-passcode=alias-code')
group.add_option('--keystore-alias-passcode', help=info)
info = ('Minify and obfuscate javascript and css.'
'--compressor: compress javascript and css.'
'--compressor=js: compress javascript.'
'--compressor=css: compress css.')
group.add_option('--compressor', dest='compressor', action='callback',
callback=ParseParameterForCompressor, type='string',
nargs=0, help=info)
parser.add_option_group(group)
parser.add_option('--enable-lzma', action='store_true', dest='enable_lzma',
default=False, help='Enable LZMA.')
options, _ = parser.parse_args()
if len(argv) == 1:
parser.print_help()
return 0
if options.version:
if os.path.isfile('VERSION'):
print(GetVersion('VERSION'))
return 0
else:
parser.error('VERSION was not found, so Crosswalk\'s version could not '
'be determined.')
xpk_temp_dir = ''
if options.xpk:
xpk_name = os.path.splitext(os.path.basename(options.xpk))[0]
xpk_temp_dir = tempfile.mkdtemp(prefix="%s-" % xpk_name + '_xpk')
CleanDir(xpk_temp_dir)
ParseXPK(options, xpk_temp_dir)
if options.manifest:
options.manifest = os.path.abspath(options.manifest)
if not os.path.isfile(options.manifest):
print('Error: The manifest file does not exist.')
sys.exit(8)
try:
manifest = ParseManifest(options)
except SystemExit as ec:
return ec.code
else:
manifest = None
# The checks here are really convoluted, but at the moment make_apk
# misbehaves any of the following conditions is true.
if options.app_url:
# 1) --app-url must be passed without either --app-local-path or
# --app-root.
if options.app_root or options.app_local_path:
parser.error('You must pass either "--app-url" or "--app-local-path" '
'with "--app-root", but not all.')
else:
# 2) --app-url is not passed but only one of --app-local-path and
# --app-root is set.
if bool(options.app_root) != bool(options.app_local_path):
parser.error('You must specify both "--app-local-path" and '
'"--app-root".')
# 3) None of --app-url, --app-local-path and --app-root are passed.
elif not options.app_root and not options.app_local_path:
parser.error('You must pass either "--app-url" or "--app-local-path" '
'with "--app-root".')
if options.permissions:
permission_list = options.permissions.split(':')
else:
print('Warning: all supported permissions on Android port are added. '
'Refer to https://github.com/crosswalk-project/'
'crosswalk-website/wiki/Crosswalk-manifest')
permission_list = permission_mapping_table.keys()
options.permissions = HandlePermissionList(permission_list)
options.icon_dict = {}
if not options.name:
parser.error('An APK name is required. Please use the "--name" option.')
if not options.package:
parser.error('A package name is required. Please use the "--package" '
'option.')
VerifyPackageName(options.package)
if options.mode != 'embedded' and options.enable_lzma:
parser.error('LZMA is only available in embedded mode.')
if options.mode == 'download' and not options.xwalk_apk_url:
print('\nmake_apk.py error: Please use Option --xwalk-apk-url or xwalk_apk_url'
' in manifest to specify the runtime Apk URL in download mode')
sys.exit(8)
if (options.app_root and options.app_local_path and
not os.path.isfile(os.path.join(options.app_root,
options.app_local_path))):
print('Please make sure that the local path file of launching app '
'does exist.')
sys.exit(7)
if options.target_dir:
target_dir = os.path.abspath(os.path.expanduser(options.target_dir))
options.target_dir = target_dir
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
if options.project_only and not options.project_dir:
print('\nmake_apk.py error: Option --project-only must be used '
'with --project-dir')
sys.exit(8)
if options.enable_proguard and options.mode != 'embedded':
print('\nmake_apk.py error: Option --enable-proguard only works with '
'embedded mode.')
sys.exit(14)
try:
app_info = AppInfo()
MakeApk(options, app_info, manifest)
except SystemExit as ec:
return ec.code
finally:
CleanDir(GetBuildDir(app_info.android_name))
CleanDir(xpk_temp_dir)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
print('')
|
thobbs/cassandra-dtest | refs/heads/master | stress_tool_test.py | 2 | from __future__ import division
from dtest import Tester
from tools import rows_to_list, since
@since('3.0')
class TestStressSparsenessRatio(Tester):
"""
@jira_ticket CASSANDRA-9522
Tests for the `row-population-ratio` parameter to `cassandra-stress`.
"""
def uniform_ratio_test(self):
"""
Tests that the ratio-specifying string 'uniform(5..15)/50' results in
~80% of the values written being non-null.
"""
self.distribution_template(ratio_spec='uniform(5..15)/50',
expected_ratio=.8,
delta=.1)
def fixed_ratio_test(self):
"""
Tests that the string 'fixed(1)/3' results in ~1/3 of the values
written being non-null.
"""
self.distribution_template(ratio_spec='fixed(1)/3',
expected_ratio=1 - 1 / 3,
delta=.01)
def distribution_template(self, ratio_spec, expected_ratio, delta):
"""
@param ratio_spec the string passed to `row-population-ratio` in the call to `cassandra-stress`
@param expected_ratio the expected ratio of null/non-null values in the values written
@param delta the acceptable delta between the expected and actual ratios
A parameterized test for the `row-population-ratio` parameter to
`cassandra-stress`.
"""
self.cluster.populate(1).start(wait_for_binary_proto=True)
node = self.cluster.nodelist()[0]
node.stress(['write', 'n=1000', 'no-warmup', '-rate', 'threads=50', '-col', 'n=FIXED(50)',
'-insert', 'row-population-ratio={ratio_spec}'.format(ratio_spec=ratio_spec)])
session = self.patient_cql_connection(node)
written = rows_to_list(session.execute('SELECT * FROM keyspace1.standard1;'))
num_nones = sum(row.count(None) for row in written)
num_results = sum(len(row) for row in written)
self.assertAlmostEqual(float(num_nones) / num_results, expected_ratio, delta=delta)
|
mahmoudhossam/Google-Python-Exercises | refs/heads/master | basic/string1.py | 1 | #!/usr/bin/python2 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
if(count >= 10):
return "Number of donuts: many"
else:
return "Number of donuts: " + str(count)
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
if len(s) < 2:
return ""
else:
return s[0:2] + s[len(s) -2 : len(s)]
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
return s[0] + s[1 : len(s)].replace(s[0], '*')
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
a_chars = (a[0], a[1])
b_chars = (b[0], b[1])
a = a.replace(a[0], b_chars[0])
a = a.replace(a[1], b_chars[1])
b = b.replace(b[0], a_chars[0])
b = b.replace(b[1], a_chars[1])
return a + " " + b
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
|
mfilotto/hipache | refs/heads/master | test/functional/test_simple.py | 14 |
import base
class SimpleTestCase(base.TestCase):
def test_simple(self):
""" Simple test: valid backend """
port = 2080
self.spawn_httpd(port)
self.register_frontend('foobar', ['http://localhost:{0}'.format(port)])
self.assertEqual(self.http_request('foobar'), 200)
def test_multiple_backends(self):
""" Simple test with 3 backends """
port = 2080
self.spawn_httpd(port)
self.spawn_httpd(port + 1)
self.spawn_httpd(port + 2)
self.register_frontend('foobar', [
'http://localhost:{0}'.format(port),
'http://localhost:{0}'.format(port + 1),
'http://localhost:{0}'.format(port + 2)
])
# Let's make 10 request to make sure we reach everyone
for i in xrange(10):
self.assertEqual(self.http_request('foobar'), 200)
def test_not_found(self):
""" Test a frontend which is not here """
self.assertEqual(self.http_request('foobar'), 400)
def test_one_failing(self):
""" One of the backends returns a 502 """
port = 2080
self.spawn_httpd(port, code=502)
self.spawn_httpd(port + 1, code=200)
# Duplicating the backend in the conf
self.register_frontend('foobar', [
'http://localhost:{0}'.format(port),
'http://localhost:{0}'.format(port + 1)
])
# Generate some traffic to force the failing one to be removed
codes = []
for i in xrange(10):
codes.append(self.http_request('foobar'))
self.assertIn(502, codes)
# Then all request should reach the healthy one
for i in xrange(10):
self.assertEqual(self.http_request('foobar'), 200)
# XXX this is failing randomly - the same test now lives in mocha
# def test_one_crashed(self):
# """ One of the backends does not bind """
# port = 2080
# self.spawn_httpd(port, code=200)
# # Duplicating the backend in the conf
# self.register_frontend('foobar', [
# 'http://localhost:{0}'.format(port),
# 'http://localhost:{0}'.format(port + 1)
# ])
# # Generate some traffic to force the failing one to be removed
# for i in xrange(5):
# self.http_request('foobar')
# # Then all request should reach the healthy one
# for i in xrange(5):
# self.assertEqual(self.http_request('foobar'), 200)
|
Juraci/tempest | refs/heads/master | tempest/api/volume/v2/test_volumes_list.py | 8 | # Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves.urllib import parse
from tempest.api.volume import base
from tempest import test
class VolumesV2ListTestJSON(base.BaseVolumeTest):
"""
volumes v2 specific tests.
This test creates a number of 1G volumes. To run successfully,
ensure that the backing file for the volume group that Nova uses
has space for at least 3 1G volumes!
If you are running a Devstack environment, ensure that the
VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
"""
@classmethod
def setup_clients(cls):
super(VolumesV2ListTestJSON, cls).setup_clients()
cls.client = cls.volumes_client
@classmethod
def resource_setup(cls):
super(VolumesV2ListTestJSON, cls).resource_setup()
# Create 3 test volumes
cls.volume_list = []
cls.volume_id_list = []
cls.metadata = {'Type': 'work'}
for i in range(3):
volume = cls.create_volume(metadata=cls.metadata)
volume = cls.client.show_volume(volume['id'])
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
@classmethod
def resource_cleanup(cls):
# Delete the created volumes
for volid in cls.volume_id_list:
cls.client.delete_volume(volid)
cls.client.wait_for_resource_deletion(volid)
super(VolumesV2ListTestJSON, cls).resource_cleanup()
@test.idempotent_id('2a7064eb-b9c3-429b-b888-33928fc5edd3')
def test_volume_list_details_with_multiple_params(self):
# List volumes detail using combined condition
def _list_details_with_multiple_params(limit=2,
status='available',
sort_dir='asc',
sort_key='id'):
params = {'limit': limit,
'status': status,
'sort_dir': sort_dir,
'sort_key': sort_key
}
fetched_volume = self.client.list_volumes(detail=True,
params=params)
self.assertEqual(limit, len(fetched_volume),
"The count of volumes is %s, expected:%s " %
(len(fetched_volume), limit))
self.assertEqual(status, fetched_volume[0]['status'])
self.assertEqual(status, fetched_volume[1]['status'])
val0 = fetched_volume[0][sort_key]
val1 = fetched_volume[1][sort_key]
if sort_dir == 'asc':
self.assertTrue(val0 < val1,
"%s < %s" % (val0, val1))
elif sort_dir == 'desc':
self.assertTrue(val0 > val1,
"%s > %s" % (val0, val1))
_list_details_with_multiple_params()
_list_details_with_multiple_params(sort_dir='desc')
def _test_pagination(self, resource, ids=None, limit=1, **kwargs):
"""Check list pagination functionality for a resource.
This method requests the list of resources and follows pagination
links.
If an iterable is supplied in ids it will check that all ids are
retrieved and that only those are listed, that we will get a next
link for an empty page if the number of items is divisible by used
limit (this is expected behavior).
We can specify number of items per request using limit argument.
"""
# Get list method for the type of resource from the client
client = getattr(self, resource + '_client')
method = getattr(client, 'list_' + resource)
# Include limit in params for list request
params = kwargs.pop('params', {})
params['limit'] = limit
# Store remaining items we are expecting from list
if ids is not None:
remaining = list(ids)
else:
remaining = None
# Mark that we are not comming from a next link
next = None
while True:
# Get a list page
response = method(return_body=True, params=params, **kwargs)
# If we have to check ids
if remaining is not None:
# Confirm we receive expected number of elements
num_expected = min(len(remaining), limit)
self.assertEqual(num_expected, len(response[resource]),
'Requested %(#expect)d but got %(#received)d '
% {'#expect': num_expected,
'#received': len(response[resource])})
# For each received element
for element in response[resource]:
element_id = element['id']
# Check it's one of expected ids
self.assertIn(element_id,
ids,
'Id %(id)s is not in expected ids %(ids)s' %
{'id': element_id, 'ids': ids})
# If not in remaining, we have received it twice
self.assertIn(element_id,
remaining,
'Id %s was received twice' % element_id)
# We no longer expect it
remaining.remove(element_id)
# If we come from a next link check that absolute url is the same
# as the one used for this request
if next:
self.assertEqual(next, response.response['content-location'])
# Get next from response
next = None
for link in response.get(resource + '_links', ()):
if link['rel'] == 'next':
next = link['href']
break
# Check if we have next and we shouldn't or the other way around
if remaining is not None:
if remaining or (num_expected and len(ids) % limit == 0):
self.assertIsNotNone(next, 'Missing link to next page')
else:
self.assertIsNone(next, 'Unexpected link to next page')
# If we can follow to the next page, get params from url to make
# request in the form of a relative URL
if next:
params = parse.urlparse(next).query
# If cannot follow make sure it's because we have finished
else:
self.assertListEqual([], remaining or [],
'No more pages reported, but still '
'missing ids %s' % remaining)
break
@test.idempotent_id('e9138a2c-f67b-4796-8efa-635c196d01de')
def test_volume_list_details_pagination(self):
self._test_pagination('volumes', ids=self.volume_id_list, detail=True)
@test.idempotent_id('af55e775-8e4b-4feb-8719-215c43b0238c')
def test_volume_list_pagination(self):
self._test_pagination('volumes', ids=self.volume_id_list, detail=False)
|
jgosmann/plume | refs/heads/master | plume/datastructure.py | 1 | import numpy as np
class EnlargeableArray(object):
def __init__(self, shape, expected_rows=20):
self._data = np.empty((expected_rows,) + shape)
self._num_rows = 0
data = property(lambda self: self._data[:len(self)])
def append(self, arr):
if len(self) >= len(self._data):
self._data.resize((2 * len(self),) + self._data.shape[1:])
self._data[self._num_rows] = arr
self._num_rows += 1
def __len__(self):
return self._num_rows
|
robjohnson189/home-assistant | refs/heads/dev | homeassistant/components/introduction.py | 27 | """
Component that will help guide the user taking its first steps.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/introduction/
"""
import logging
import voluptuous as vol
DOMAIN = 'introduction'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config=None):
"""Setup the introduction component."""
log = logging.getLogger(__name__)
log.info("""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Hello, and welcome to Home Assistant!
We'll hope that we can make all your dreams come true.
Here are some resources to get started:
- Configuring Home Assistant:
https://home-assistant.io/getting-started/configuration/
- Available components:
https://home-assistant.io/components/
- Troubleshooting your configuration:
https://home-assistant.io/getting-started/troubleshooting-configuration/
- Getting help:
https://home-assistant.io/help/
This message is generated by the introduction component. You can
disable it in configuration.yaml.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""")
return True
|
alexthered/kienhoc-platform | refs/heads/master | lms/djangoapps/teams/management/commands/reindex_course_team.py | 34 | """ Management command to update course_teams' search index. """
from django.core.management import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from optparse import make_option
from textwrap import dedent
from teams.models import CourseTeam
class Command(BaseCommand):
"""
Command to reindex course_teams (single, multiple or all available).
Examples:
./manage.py reindex_course_team team1 team2 - reindexes course teams with team_ids team1 and team2
./manage.py reindex_course_team --all - reindexes all available course teams
"""
help = dedent(__doc__)
can_import_settings = True
args = "<course_team_id course_team_id ...>"
option_list = BaseCommand.option_list + (
make_option(
'--all',
action='store_true',
dest='all',
default=False,
help='Reindex all course teams'
),
)
def _get_course_team(self, team_id):
""" Returns course_team object from team_id. """
try:
result = CourseTeam.objects.get(team_id=team_id)
except ObjectDoesNotExist:
raise CommandError(u"Argument {0} is not a course_team team_id".format(team_id))
return result
def handle(self, *args, **options):
"""
By convention set by django developers, this method actually executes command's actions.
So, there could be no better docstring than emphasize this once again.
"""
# This is ugly, but there is a really strange circular dependency that doesn't
# happen anywhere else that I can't figure out how to avoid it :(
from teams.search_indexes import CourseTeamIndexer
if len(args) == 0 and not options.get('all', False):
raise CommandError(u"reindex_course_team requires one or more arguments: <course_team_id>")
elif not settings.FEATURES.get('ENABLE_TEAMS', False):
raise CommandError(u"ENABLE_TEAMS must be enabled to use course team indexing")
if options.get('all', False):
course_teams = CourseTeam.objects.all()
else:
course_teams = map(self._get_course_team, args)
for course_team in course_teams:
print "Indexing {id}".format(id=course_team.team_id)
CourseTeamIndexer.index(course_team)
|
ericholscher/django-haystack | refs/heads/master | tests/whoosh_tests/tests/forms.py | 10 | # To ensure spelling suggestions work...
from django.conf import settings
from django.http import HttpRequest
from haystack.forms import SearchForm
from haystack.views import SearchView
from whoosh_tests.tests.whoosh_backend import LiveWhooshRoundTripTestCase
# Whoosh appears to flail on providing a useful suggestion, but since it's
# not ``None``, we know the backend is doing something. Whee.
class SpellingSuggestionTestCase(LiveWhooshRoundTripTestCase):
def setUp(self):
self.old_spelling_setting = settings.HAYSTACK_CONNECTIONS['default']['INCLUDE_SPELLING']
settings.HAYSTACK_CONNECTIONS['default']['INCLUDE_SPELLING'] = True
super(SpellingSuggestionTestCase, self).setUp()
def tearDown(self):
settings.HAYSTACK_CONNECTIONS['default']['INCLUDE_SPELLING'] = self.old_spelling_setting
super(SpellingSuggestionTestCase, self).tearDown()
def test_form_suggestion(self):
form = SearchForm({'q': 'exampl'})
self.assertEqual(form.get_suggestion(), '')
def test_view_suggestion(self):
view = SearchView(template='test_suggestion.html')
mock = HttpRequest()
mock.GET['q'] = 'exampl'
resp = view(mock)
self.assertEqual(resp.content, 'Suggestion: ')
|
dagnir/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/html5lib/html5lib/tests/support.py | 450 | from __future__ import absolute_import, division, unicode_literals
import os
import sys
import codecs
import glob
import xml.sax.handler
base_path = os.path.split(__file__)[0]
test_dir = os.path.join(base_path, 'testdata')
sys.path.insert(0, os.path.abspath(os.path.join(base_path,
os.path.pardir,
os.path.pardir)))
from html5lib import treebuilders
del base_path
# Build a dict of avaliable trees
treeTypes = {"DOM": treebuilders.getTreeBuilder("dom")}
# Try whatever etree implementations are avaliable from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
try:
import elementtree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
pass
try:
import xml.etree.cElementTree as cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
try:
import cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
pass
try:
import lxml.etree as lxml # flake8: noqa
except ImportError:
pass
else:
treeTypes['lxml'] = treebuilders.getTreeBuilder("lxml")
def get_data_files(subdirectory, files='*.dat'):
return glob.glob(os.path.join(test_dir, subdirectory, files))
class DefaultDict(dict):
def __init__(self, default, *args, **kwargs):
self.default = default
dict.__init__(self, *args, **kwargs)
def __getitem__(self, key):
return dict.get(self, key, self.default)
class TestData(object):
def __init__(self, filename, newTestHeading="data", encoding="utf8"):
if encoding is None:
self.f = open(filename, mode="rb")
else:
self.f = codecs.open(filename, encoding=encoding)
self.encoding = encoding
self.newTestHeading = newTestHeading
def __del__(self):
self.f.close()
def __iter__(self):
data = DefaultDict(None)
key = None
for line in self.f:
heading = self.isSectionHeading(line)
if heading:
if data and heading == self.newTestHeading:
# Remove trailing newline
data[key] = data[key][:-1]
yield self.normaliseOutput(data)
data = DefaultDict(None)
key = heading
data[key] = "" if self.encoding else b""
elif key is not None:
data[key] += line
if data:
yield self.normaliseOutput(data)
def isSectionHeading(self, line):
"""If the current heading is a test section heading return the heading,
otherwise return False"""
# print(line)
if line.startswith("#" if self.encoding else b"#"):
return line[1:].strip()
else:
return False
def normaliseOutput(self, data):
# Remove trailing newlines
for key, value in data.items():
if value.endswith("\n" if self.encoding else b"\n"):
data[key] = value[:-1]
return data
def convert(stripChars):
def convertData(data):
"""convert the output of str(document) to the format used in the testcases"""
data = data.split("\n")
rv = []
for line in data:
if line.startswith("|"):
rv.append(line[stripChars:])
else:
rv.append(line)
return "\n".join(rv)
return convertData
convertExpected = convert(2)
def errorMessage(input, expected, actual):
msg = ("Input:\n%s\nExpected:\n%s\nRecieved\n%s\n" %
(repr(input), repr(expected), repr(actual)))
if sys.version_info.major == 2:
msg = msg.encode("ascii", "backslashreplace")
return msg
class TracingSaxHandler(xml.sax.handler.ContentHandler):
def __init__(self):
xml.sax.handler.ContentHandler.__init__(self)
self.visited = []
def startDocument(self):
self.visited.append('startDocument')
def endDocument(self):
self.visited.append('endDocument')
def startPrefixMapping(self, prefix, uri):
# These are ignored as their order is not guaranteed
pass
def endPrefixMapping(self, prefix):
# These are ignored as their order is not guaranteed
pass
def startElement(self, name, attrs):
self.visited.append(('startElement', name, attrs))
def endElement(self, name):
self.visited.append(('endElement', name))
def startElementNS(self, name, qname, attrs):
self.visited.append(('startElementNS', name, qname, dict(attrs)))
def endElementNS(self, name, qname):
self.visited.append(('endElementNS', name, qname))
def characters(self, content):
self.visited.append(('characters', content))
def ignorableWhitespace(self, whitespace):
self.visited.append(('ignorableWhitespace', whitespace))
def processingInstruction(self, target, data):
self.visited.append(('processingInstruction', target, data))
def skippedEntity(self, name):
self.visited.append(('skippedEntity', name))
|
Belxjander/Kirito | refs/heads/master | Python-3.5.0-main/Lib/idlelib/idle_test/test_autoexpand.py | 20 | """Unit tests for idlelib.AutoExpand"""
import unittest
from test.support import requires
from tkinter import Text, Tk
#from idlelib.idle_test.mock_tk import Text
from idlelib.AutoExpand import AutoExpand
class Dummy_Editwin:
# AutoExpand.__init__ only needs .text
def __init__(self, text):
self.text = text
class AutoExpandTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
if 'tkinter' in str(Text):
requires('gui')
cls.tk = Tk()
cls.text = Text(cls.tk)
else:
cls.text = Text()
cls.auto_expand = AutoExpand(Dummy_Editwin(cls.text))
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'tk'):
cls.tk.destroy()
del cls.tk
del cls.text, cls.auto_expand
def tearDown(self):
self.text.delete('1.0', 'end')
def test_get_prevword(self):
text = self.text
previous = self.auto_expand.getprevword
equal = self.assertEqual
equal(previous(), '')
text.insert('insert', 't')
equal(previous(), 't')
text.insert('insert', 'his')
equal(previous(), 'this')
text.insert('insert', ' ')
equal(previous(), '')
text.insert('insert', 'is')
equal(previous(), 'is')
text.insert('insert', '\nsample\nstring')
equal(previous(), 'string')
text.delete('3.0', 'insert')
equal(previous(), '')
text.delete('1.0', 'end')
equal(previous(), '')
def test_before_only(self):
previous = self.auto_expand.getprevword
expand = self.auto_expand.expand_word_event
equal = self.assertEqual
self.text.insert('insert', 'ab ac bx ad ab a')
equal(self.auto_expand.getwords(), ['ab', 'ad', 'ac', 'a'])
expand('event')
equal(previous(), 'ab')
expand('event')
equal(previous(), 'ad')
expand('event')
equal(previous(), 'ac')
expand('event')
equal(previous(), 'a')
def test_after_only(self):
# Also add punctuation 'noise' that should be ignored.
text = self.text
previous = self.auto_expand.getprevword
expand = self.auto_expand.expand_word_event
equal = self.assertEqual
text.insert('insert', 'a, [ab] ac: () bx"" cd ac= ad ya')
text.mark_set('insert', '1.1')
equal(self.auto_expand.getwords(), ['ab', 'ac', 'ad', 'a'])
expand('event')
equal(previous(), 'ab')
expand('event')
equal(previous(), 'ac')
expand('event')
equal(previous(), 'ad')
expand('event')
equal(previous(), 'a')
def test_both_before_after(self):
text = self.text
previous = self.auto_expand.getprevword
expand = self.auto_expand.expand_word_event
equal = self.assertEqual
text.insert('insert', 'ab xy yz\n')
text.insert('insert', 'a ac by ac')
text.mark_set('insert', '2.1')
equal(self.auto_expand.getwords(), ['ab', 'ac', 'a'])
expand('event')
equal(previous(), 'ab')
expand('event')
equal(previous(), 'ac')
expand('event')
equal(previous(), 'a')
def test_other_expand_cases(self):
text = self.text
expand = self.auto_expand.expand_word_event
equal = self.assertEqual
# no expansion candidate found
equal(self.auto_expand.getwords(), [])
equal(expand('event'), 'break')
text.insert('insert', 'bx cy dz a')
equal(self.auto_expand.getwords(), [])
# reset state by successfully expanding once
# move cursor to another position and expand again
text.insert('insert', 'ac xy a ac ad a')
text.mark_set('insert', '1.7')
expand('event')
initial_state = self.auto_expand.state
text.mark_set('insert', '1.end')
expand('event')
new_state = self.auto_expand.state
self.assertNotEqual(initial_state, new_state)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Noviat/odoo | refs/heads/8.0 | addons/website/models/ir_qweb.py | 44 | # -*- coding: utf-8 -*-
"""
Website-context rendering needs to add some metadata to rendered fields,
as well as render a few fields differently.
Also, adds methods to convert values back to openerp models.
"""
import cStringIO
import datetime
import itertools
import logging
import os
import urllib2
import urlparse
import re
import pytz
import werkzeug.urls
import werkzeug.utils
from dateutil import parser
from lxml import etree, html
from PIL import Image as I
import openerp.modules
import openerp
from openerp.osv import orm, fields
from openerp.tools import ustr, DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools import html_escape as escape
from openerp.addons.web.http import request
from openerp.addons.base.ir import ir_qweb
REMOTE_CONNECTION_TIMEOUT = 2.5
logger = logging.getLogger(__name__)
class QWeb(orm.AbstractModel):
""" QWeb object for rendering stuff in the website context
"""
_name = 'website.qweb'
_inherit = 'ir.qweb'
URL_ATTRS = {
'form': 'action',
'a': 'href',
}
def add_template(self, qcontext, name, node):
# preprocessing for multilang static urls
if request.website:
for tag, attr in self.URL_ATTRS.iteritems():
for e in node.iterdescendants(tag=tag):
url = e.get(attr)
if url:
e.set(attr, qcontext.get('url_for')(url))
super(QWeb, self).add_template(qcontext, name, node)
def render_att_att(self, element, attribute_name, attribute_value, qwebcontext):
URL_ATTRS = self.URL_ATTRS.get(element.tag)
is_website = request.website
for att, val in super(QWeb, self).render_att_att(element, attribute_name, attribute_value, qwebcontext):
if is_website and att == URL_ATTRS and isinstance(val, basestring):
val = qwebcontext.get('url_for')(val)
yield (att, val)
def get_converter_for(self, field_type):
return self.pool.get(
'website.qweb.field.' + field_type,
self.pool['website.qweb.field'])
class Field(orm.AbstractModel):
_name = 'website.qweb.field'
_inherit = 'ir.qweb.field'
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context, context=None):
if options is None: options = {}
field = record._model._fields[field_name]
attrs = [('data-oe-translate', 1 if getattr(field, 'translate', False) else 0)]
placeholder = options.get('placeholder') \
or source_element.get('placeholder') \
or getattr(field, 'placeholder', None)
if placeholder:
attrs.append(('placeholder', placeholder))
return itertools.chain(
super(Field, self).attributes(cr, uid, field_name, record, options,
source_element, g_att, t_att,
qweb_context, context=context),
attrs
)
def value_from_string(self, value):
return value
def from_html(self, cr, uid, model, field, element, context=None):
return self.value_from_string(element.text_content().strip())
def qweb_object(self):
return self.pool['website.qweb']
class Integer(orm.AbstractModel):
_name = 'website.qweb.field.integer'
_inherit = ['website.qweb.field']
value_from_string = int
class Float(orm.AbstractModel):
_name = 'website.qweb.field.float'
_inherit = ['website.qweb.field', 'ir.qweb.field.float']
def from_html(self, cr, uid, model, field, element, context=None):
lang = self.user_lang(cr, uid, context=context)
value = element.text_content().strip()
return float(value.replace(lang.thousands_sep, '')
.replace(lang.decimal_point, '.'))
def parse_fuzzy(in_format, value):
day_first = in_format.find('%d') < in_format.find('%m')
if '%y' in in_format:
year_first = in_format.find('%y') < in_format.find('%d')
else:
year_first = in_format.find('%Y') < in_format.find('%d')
return parser.parse(value, dayfirst=day_first, yearfirst=year_first)
class Date(orm.AbstractModel):
_name = 'website.qweb.field.date'
_inherit = ['website.qweb.field', 'ir.qweb.field.date']
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context,
context=None):
attrs = super(Date, self).attributes(
cr, uid, field_name, record, options, source_element, g_att, t_att,
qweb_context, context=None)
return itertools.chain(attrs, [('data-oe-original', record[field_name])])
def from_html(self, cr, uid, model, field, element, context=None):
value = element.text_content().strip()
if not value: return False
datetime.datetime.strptime(value, DEFAULT_SERVER_DATE_FORMAT)
return value
class DateTime(orm.AbstractModel):
_name = 'website.qweb.field.datetime'
_inherit = ['website.qweb.field', 'ir.qweb.field.datetime']
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context,
context=None):
value = record[field_name]
if isinstance(value, basestring):
value = datetime.datetime.strptime(
value, DEFAULT_SERVER_DATETIME_FORMAT)
if value:
# convert from UTC (server timezone) to user timezone
value = fields.datetime.context_timestamp(
cr, uid, timestamp=value, context=context)
value = value.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
attrs = super(DateTime, self).attributes(
cr, uid, field_name, record, options, source_element, g_att, t_att,
qweb_context, context=None)
return itertools.chain(attrs, [
('data-oe-original', value)
])
def from_html(self, cr, uid, model, field, element, context=None):
if context is None: context = {}
value = element.text_content().strip()
if not value: return False
# parse from string to datetime
dt = datetime.datetime.strptime(value, DEFAULT_SERVER_DATETIME_FORMAT)
# convert back from user's timezone to UTC
tz_name = context.get('tz') \
or self.pool['res.users'].read(cr, openerp.SUPERUSER_ID, uid, ['tz'], context=context)['tz']
if tz_name:
try:
user_tz = pytz.timezone(tz_name)
utc = pytz.utc
dt = user_tz.localize(dt).astimezone(utc)
except Exception:
logger.warn(
"Failed to convert the value for a field of the model"
" %s back from the user's timezone (%s) to UTC",
model, tz_name,
exc_info=True)
# format back to string
return dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
class Text(orm.AbstractModel):
_name = 'website.qweb.field.text'
_inherit = ['website.qweb.field', 'ir.qweb.field.text']
def from_html(self, cr, uid, model, field, element, context=None):
return html_to_text(element)
class Selection(orm.AbstractModel):
_name = 'website.qweb.field.selection'
_inherit = ['website.qweb.field', 'ir.qweb.field.selection']
def from_html(self, cr, uid, model, field, element, context=None):
record = self.browse(cr, uid, [], context=context)
value = element.text_content().strip()
selection = field.get_description(record.env)['selection']
for k, v in selection:
if isinstance(v, str):
v = ustr(v)
if value == v:
return k
raise ValueError(u"No value found for label %s in selection %s" % (
value, selection))
class ManyToOne(orm.AbstractModel):
_name = 'website.qweb.field.many2one'
_inherit = ['website.qweb.field', 'ir.qweb.field.many2one']
def from_html(self, cr, uid, model, field, element, context=None):
# FIXME: layering violations all the things
Model = self.pool[element.get('data-oe-model')]
M2O = self.pool[field.comodel_name]
field_name = element.get('data-oe-field')
id = int(element.get('data-oe-id'))
# FIXME: weird things are going to happen for char-type _rec_name
value = html_to_text(element)
# if anything blows up, just ignore it and bail
try:
# get parent record
[obj] = Model.read(cr, uid, [id], [field_name])
# get m2o record id
(m2o_id, _) = obj[field_name]
# assume _rec_name and write directly to it
M2O.write(cr, uid, [m2o_id], {
M2O._rec_name: value
}, context=context)
except:
logger.exception("Could not save %r to m2o field %s of model %s",
value, field_name, Model._name)
# not necessary, but might as well be explicit about it
return None
class HTML(orm.AbstractModel):
_name = 'website.qweb.field.html'
_inherit = ['website.qweb.field', 'ir.qweb.field.html']
def from_html(self, cr, uid, model, field, element, context=None):
content = []
if element.text: content.append(element.text)
content.extend(html.tostring(child)
for child in element.iterchildren(tag=etree.Element))
return '\n'.join(content)
class Image(orm.AbstractModel):
"""
Widget options:
``class``
set as attribute on the generated <img> tag
"""
_name = 'website.qweb.field.image'
_inherit = ['website.qweb.field', 'ir.qweb.field.image']
def to_html(self, cr, uid, field_name, record, options,
source_element, t_att, g_att, qweb_context, context=None):
assert source_element.tag != 'img',\
"Oddly enough, the root tag of an image field can not be img. " \
"That is because the image goes into the tag, or it gets the " \
"hose again."
return super(Image, self).to_html(
cr, uid, field_name, record, options,
source_element, t_att, g_att, qweb_context, context=context)
def record_to_html(self, cr, uid, field_name, record, options=None, context=None):
if options is None: options = {}
aclasses = ['img', 'img-responsive'] + options.get('class', '').split()
classes = ' '.join(itertools.imap(escape, aclasses))
max_size = None
max_width, max_height = options.get('max_width', 0), options.get('max_height', 0)
if max_width or max_height:
max_size = '%sx%s' % (max_width, max_height)
src = self.pool['website'].image_url(cr, uid, record, field_name, max_size)
alt = None
if options.get('alt-field') and getattr(record, options['alt-field'], None):
alt = escape(record[options['alt-field']])
elif options.get('alt'):
alt = options['alt']
img = '<img class="%s" src="%s" style="%s"%s/>' % (classes, src, options.get('style', ''), ' alt="%s"' % alt if alt else '')
return ir_qweb.HTMLSafe(img)
local_url_re = re.compile(r'^/(?P<module>[^]]+)/static/(?P<rest>.+)$')
def from_html(self, cr, uid, model, field, element, context=None):
url = element.find('img').get('src')
url_object = urlparse.urlsplit(url)
if url_object.path.startswith('/website/image'):
# url might be /website/image/<model>/<id>[_<checksum>]/<field>[/<width>x<height>]
fragments = url_object.path.split('/')
query = dict(urlparse.parse_qsl(url_object.query))
model = query.get('model', fragments[3])
oid = query.get('id', fragments[4].split('_')[0])
field = query.get('field', fragments[5])
item = self.pool[model].browse(cr, uid, int(oid), context=context)
return item[field]
if self.local_url_re.match(url_object.path):
return self.load_local_url(url)
return self.load_remote_url(url)
def load_local_url(self, url):
match = self.local_url_re.match(urlparse.urlsplit(url).path)
rest = match.group('rest')
for sep in os.sep, os.altsep:
if sep and sep != '/':
rest.replace(sep, '/')
path = openerp.modules.get_module_resource(
match.group('module'), 'static', *(rest.split('/')))
if not path:
return None
try:
with open(path, 'rb') as f:
# force complete image load to ensure it's valid image data
image = I.open(f)
image.load()
f.seek(0)
return f.read().encode('base64')
except Exception:
logger.exception("Failed to load local image %r", url)
return None
def load_remote_url(self, url):
try:
# should probably remove remote URLs entirely:
# * in fields, downloading them without blowing up the server is a
# challenge
# * in views, may trigger mixed content warnings if HTTPS CMS
# linking to HTTP images
# implement drag & drop image upload to mitigate?
req = urllib2.urlopen(url, timeout=REMOTE_CONNECTION_TIMEOUT)
# PIL needs a seekable file-like image, urllib result is not seekable
image = I.open(cStringIO.StringIO(req.read()))
# force a complete load of the image data to validate it
image.load()
except Exception:
logger.exception("Failed to load remote image %r", url)
return None
# don't use original data in case weird stuff was smuggled in, with
# luck PIL will remove some of it?
out = cStringIO.StringIO()
image.save(out, image.format)
return out.getvalue().encode('base64')
class Monetary(orm.AbstractModel):
_name = 'website.qweb.field.monetary'
_inherit = ['website.qweb.field', 'ir.qweb.field.monetary']
def from_html(self, cr, uid, model, field, element, context=None):
lang = self.user_lang(cr, uid, context=context)
value = element.find('span').text.strip()
return float(value.replace(lang.thousands_sep, '')
.replace(lang.decimal_point, '.'))
class Duration(orm.AbstractModel):
_name = 'website.qweb.field.duration'
_inherit = [
'ir.qweb.field.duration',
'website.qweb.field.float',
]
def attributes(self, cr, uid, field_name, record, options,
source_element, g_att, t_att, qweb_context,
context=None):
attrs = super(Duration, self).attributes(
cr, uid, field_name, record, options, source_element, g_att, t_att,
qweb_context, context=None)
return itertools.chain(attrs, [('data-oe-original', record[field_name])])
def from_html(self, cr, uid, model, field, element, context=None):
value = element.text_content().strip()
# non-localized value
return float(value)
class RelativeDatetime(orm.AbstractModel):
_name = 'website.qweb.field.relative'
_inherit = [
'ir.qweb.field.relative',
'website.qweb.field.datetime',
]
# get formatting from ir.qweb.field.relative but edition/save from datetime
class Contact(orm.AbstractModel):
_name = 'website.qweb.field.contact'
_inherit = ['ir.qweb.field.contact', 'website.qweb.field.many2one']
def from_html(self, cr, uid, model, field, element, context=None):
return None
class QwebView(orm.AbstractModel):
_name = 'website.qweb.field.qweb'
_inherit = ['ir.qweb.field.qweb']
def html_to_text(element):
""" Converts HTML content with HTML-specified line breaks (br, p, div, ...)
in roughly equivalent textual content.
Used to replace and fixup the roundtripping of text and m2o: when using
libxml 2.8.0 (but not 2.9.1) and parsing HTML with lxml.html.fromstring
whitespace text nodes (text nodes composed *solely* of whitespace) are
stripped out with no recourse, and fundamentally relying on newlines
being in the text (e.g. inserted during user edition) is probably poor form
anyway.
-> this utility function collapses whitespace sequences and replaces
nodes by roughly corresponding linebreaks
* p are pre-and post-fixed by 2 newlines
* br are replaced by a single newline
* block-level elements not already mentioned are pre- and post-fixed by
a single newline
ought be somewhat similar (but much less high-tech) to aaronsw's html2text.
the latter produces full-blown markdown, our text -> html converter only
replaces newlines by <br> elements at this point so we're reverting that,
and a few more newline-ish elements in case the user tried to add
newlines/paragraphs into the text field
:param element: lxml.html content
:returns: corresponding pure-text output
"""
# output is a list of str | int. Integers are padding requests (in minimum
# number of newlines). When multiple padding requests, fold them into the
# biggest one
output = []
_wrap(element, output)
# remove any leading or tailing whitespace, replace sequences of
# (whitespace)\n(whitespace) by a single newline, where (whitespace) is a
# non-newline whitespace in this case
return re.sub(
r'[ \t\r\f]*\n[ \t\r\f]*',
'\n',
''.join(_realize_padding(output)).strip())
_PADDED_BLOCK = set('p h1 h2 h3 h4 h5 h6'.split())
# https://developer.mozilla.org/en-US/docs/HTML/Block-level_elements minus p
_MISC_BLOCK = set((
'address article aside audio blockquote canvas dd dl div figcaption figure'
' footer form header hgroup hr ol output pre section tfoot ul video'
).split())
def _collapse_whitespace(text):
""" Collapses sequences of whitespace characters in ``text`` to a single
space
"""
return re.sub('\s+', ' ', text)
def _realize_padding(it):
""" Fold and convert padding requests: integers in the output sequence are
requests for at least n newlines of padding. Runs thereof can be collapsed
into the largest requests and converted to newlines.
"""
padding = None
for item in it:
if isinstance(item, int):
padding = max(padding, item)
continue
if padding:
yield '\n' * padding
padding = None
yield item
# leftover padding irrelevant as the output will be stripped
def _wrap(element, output, wrapper=u''):
""" Recursively extracts text from ``element`` (via _element_to_text), and
wraps it all in ``wrapper``. Extracted text is added to ``output``
:type wrapper: basestring | int
"""
output.append(wrapper)
if element.text:
output.append(_collapse_whitespace(element.text))
for child in element:
_element_to_text(child, output)
output.append(wrapper)
def _element_to_text(e, output):
if e.tag == 'br':
output.append(u'\n')
elif e.tag in _PADDED_BLOCK:
_wrap(e, output, 2)
elif e.tag in _MISC_BLOCK:
_wrap(e, output, 1)
else:
# inline
_wrap(e, output)
if e.tail:
output.append(_collapse_whitespace(e.tail))
|
lenstr/rethinkdb | refs/heads/next | external/v8_3.30.33.16/tools/testrunner/objects/testcase.py | 44 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import output
class TestCase(object):
def __init__(self, suite, path, flags=[], dependency=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags # list of strings, flags specific to this test case
self.dependency = dependency # |path| for testcase that must be run first
self.outcomes = None
self.output = None
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
def CopyAddingFlags(self, flags):
copy = TestCase(self.suite, self.path, self.flags + flags, self.dependency)
copy.outcomes = self.outcomes
return copy
def PackTask(self):
"""
Extracts those parts of this object that are required to run the test
and returns them as a JSON serializable object.
"""
assert self.id is not None
return [self.suitename(), self.path, self.flags,
self.dependency, list(self.outcomes or []), self.id]
@staticmethod
def UnpackTask(task):
"""Creates a new TestCase object based on packed task data."""
# For the order of the fields, refer to PackTask() above.
test = TestCase(str(task[0]), task[1], task[2], task[3])
test.outcomes = set(task[4])
test.id = task[5]
test.run = 1
return test
def SetSuiteObject(self, suites):
self.suite = suites[self.suite]
def PackResult(self):
"""Serializes the output of the TestCase after it has run."""
self.suite.StripOutputForTransmit(self)
return [self.id, self.output.Pack(), self.duration]
def MergeResult(self, result):
"""Applies the contents of a Result to this object."""
assert result[0] == self.id
self.output = output.Output.Unpack(result[1])
self.duration = result[2]
def suitename(self):
return self.suite.name
def GetLabel(self):
return self.suitename() + "/" + self.suite.CommonTestName(self)
|
rajatguptarg/infro | refs/heads/master | infro/ansible/tasks.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# flake8: noqa
from celery import Celery
import os
import crypt
import json
app = Celery('tasks', backend='redis://localhost', broker='redis://localhost:6379/0')
KEY = 'guest'
def _spawn_ec2_instance(username, password):
"""
Spawn a EC2 Instance using Ansible and return public IP of Instance
"""
encrypted_password = crypt.crypt(password, KEY)
command = 'ansible-playbook -e "user=%s password=%s" create-instance.yml' % (username, encrypted_password)
print "***************"
print command
print "***************"
response = os.popen(command).read()
aws_cmd = 'aws ec2 describe-instances --filters "Name=tag:Name,Values=suite" "Name=tag:User,Values=%s" "Name=instance-state-name,Values=running"' % (username)
info = os.popen(aws_cmd).read()
ec2_info = json.loads(info)
instance_ip = ec2_info['Reservations'][0]['Instances'][0]['PublicIpAddress']
return instance_ip
def _delete_ec2_instance(ip):
"""
Delete an existing EC2 instance by its public IP
"""
aws_cmd = 'aws ec2 describe-instances --filters "Name=ip-address,Values=%s"' % (ip)
info = os.popen(aws_cmd).read()
ec2_info = json.loads(info)
try:
instance_id = ec2_info['Reservations'][0]['Instances'][0]['InstanceId']
command = 'ansible-playbook -e "id=%s" delete-instance.yml' % (instance_id)
print "#################"
print command
print "#################"
response = os.popen(command)
return ip
except:
return 'Instance with ip %s not found!' % (ip)
@app.task
def async_create_ec2_instance(username, password):
"""
Running ansible playbook to trigger ec2 instance in async manner
"""
return _spawn_ec2_instance(username, password)
def create_ec2_instance(username, password):
"""
Running ansible playbook to trigger ec2 instance in sync manner
"""
return _spawn_ec2_instance(username, password)
def delete_ec2(ip):
"""
Running ansible playbook to delete EC2 instance in sync manner
"""
return _delete_ec2_instance(ip)
@app.task
def async_delete_ec2(ip):
"""
Running ansible playbook to delete EC2 instance in async manner
"""
return _delete_ec2_instance(ip)
|
leoliujie/odoo | refs/heads/8.0 | addons/product/__openerp__.py | 262 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Products & Pricelists',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Sales Management',
'depends': ['base', 'decimal_precision', 'mail', 'report'],
'demo': [
'product_demo.xml',
'product_image_demo.xml',
],
'website': 'https://www.odoo.com',
'description': """
This is the base module for managing products and pricelists in OpenERP.
========================================================================
Products support variants, different pricing methods, suppliers information,
make to stock/order, different unit of measures, packaging and properties.
Pricelists support:
-------------------
* Multiple-level of discount (by product, category, quantities)
* Compute price based on different criteria:
* Other pricelist
* Cost price
* List price
* Supplier price
Pricelists preferences by product and/or partners.
Print product labels with barcode.
""",
'data': [
'security/product_security.xml',
'security/ir.model.access.csv',
'wizard/product_price_view.xml',
'product_data.xml',
'product_report.xml',
'product_view.xml',
'pricelist_view.xml',
'partner_view.xml',
'views/report_pricelist.xml',
],
'test': [
'product_pricelist_demo.yml',
'test/product_pricelist.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
messense/douban2kindle | refs/heads/master | douban2kindle/utils/download.py | 1 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import urllib2
import logging
import multiprocessing
from multiprocessing.pool import ThreadPool
logger = logging.getLogger(__name__)
def _download_image(url, save_dir):
try:
data = urllib2.urlopen(url).read()
except urllib2.URLError:
logger.exception('Error downloading image: %s', url)
return None
image_name = url[url.rfind('/')+1:]
save_path = os.path.join(save_dir, image_name)
with open(save_path, 'w') as f:
f.write(data)
logger.info('Download image %s succeed, saved to %s', url, save_path)
return save_path
def download_images(urls, save_dir):
if not urls:
return
if not os.path.exists(save_dir):
os.makedirs(save_dir)
pool = ThreadPool(multiprocessing.cpu_count())
async_results = []
for url in urls:
async_results.append(pool.apply_async(
_download_image,
args=(url, save_dir)
))
pool.close()
pool.join()
local_paths = []
for result in async_results:
path = result.get()
local_paths.append(path)
return local_paths
|
filannim/Temporalia-TQIC | refs/heads/master | feature_extractor.py | 1 | #!/usr/bin/python
from __future__ import division
import pprint
import re
import sys
import time
import os
from collections import Counter, namedtuple, defaultdict
from nltk.corpus import wordnet as wn
from nltk.corpus.reader import wordnet
from nltk.tag import pos_tag
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
sys.path.append('/home/filannim/Dropbox/Workspace/ManTIME')
from annotate_sentence import annotate as ManTIME
from external.general_timex_normaliser import normalise as TemporallyNormalise
TempoSynset = namedtuple('TempoSynset', ['past','present','future','atemporal'])
def top(l):
"""
Return the first element of an iterable object if it is not empty.
"""
return l[0] if l else ''
def get_temporal_expressions(sentence, utt):
"""
This method is a wrapper for ManTIME. It analyses ManTIME's output
and returns two lists: years and months mentions.
"""
start = time.time()
r_timex3_vals = re.compile(r'value=\"([^\"]+)\"')
r_timex3_years = re.compile(r'^[12][0-9]{3}$')
r_timex3_months = re.compile(r'^[12][0-9]{3}\-[01][0-9]')
timex3_vals = r_timex3_vals.findall(ManTIME(sentence, utterance=utt)[0])
timex3_years = [int(v[:4]) for v in timex3_vals if r_timex3_years.match(v)]
timex3_months = [v for v in timex3_vals if r_timex3_months.match(v)]
#if ManTIME doesn't find anything, use a simple DDDD regular expression
if not(timex3_years) and not(timex3_months):
timex3_years = [int(v) for v in re.findall(r'[12][0-9]{3}', sentence)]
print "ManTIME (" + str(round(time.time()-start,2)) + " s.)"
return (timex3_years, timex3_months)
class TempoWordNet(object):
def __init__(self):
self._path = '/home/filannim/Dropbox/Workspace/NTCIR-11_Temporalia/data/tempowordnet_1.0.tab_separated.txt'
self.offsets = defaultdict(TempoSynset)
with open(self._path, 'r') as source:
for line in source:
line = line.strip()
if line:
fields = line.split('\t')
id = int(fields[0])
self.offsets[id] = TempoSynset(float(fields[4]),
float(fields[5]), float(fields[6]), float(fields[7]))
def past(self, id): return self.offsets[id].past
def present(self, id): return self.offsets[id].present
def future(self, id): return self.offsets[id].future
def atemporal(self, id): return self.offsets[id].atemporal
def scores(self, id): return self.offsets[id]
class FeatureExtractor(object):
def __init__(self):
print 'Feature extractor:',
start = time.time()
self.tempowordnet = TempoWordNet()
self.wordnetlemmatizer = WordNetLemmatizer()
self.wikipedia_titles = set(open('/opt/DBpedia/sorted_uniq_lowered_super_stopworded_nocategories_labels.txt','r').read().splitlines())
self.atemporal_triggers = set(open('/home/filannim/Dropbox/Workspace/NTCIR-11_Temporalia/temporalia/data/atemporal.txt', 'r').read().splitlines())
self.past_triggers = set(open('/home/filannim/Dropbox/Workspace/NTCIR-11_Temporalia/temporalia/data/past.txt', 'r').read().splitlines())
self.present_triggers = set(open('/home/filannim/Dropbox/Workspace/NTCIR-11_Temporalia/temporalia/data/recent.txt', 'r').read().splitlines())
self.future_triggers = set(open('/home/filannim/Dropbox/Workspace/NTCIR-11_Temporalia/temporalia/data/future.txt', 'r').read().splitlines())
print "initialised (" + str(round(time.time()-start,2)) + " s.)"
def __w_type(self, sentence):
"""
It returns the kind of W type of the query (what, where, when, how,
why, who), by checking at the beginning of the sentence
"""
assert type(sentence) == Sentence
s = sentence.normalised.lower()
if s.startswith("what"):
return "what"
elif s.startswith("where"):
return "where"
elif s.startswith("when"):
return "when"
elif s.startswith("how"):
return "how"
elif s.startswith("why"):
return "why"
elif s.startswith("who"):
return "who"
else:
return ""
def __timing(self, sentence, utterance):
"""
It returns:
1) a boolean value, found/not_found at least a TIMEX
2) numerical difference between query and utterance
3) the temporal delta according to the utterance time:
'', past, present, future.
"""
assert type(sentence) == Sentence
assert type(utterance) == Utterance
years, months = get_temporal_expressions(sentence.raw_text, utterance.ISO8601)
if not(years) and not(months):
if 'VBD' in sentence.postags:
return '0', '', 'past'
return '0', '', ''
if years and not months: # compute year-distance
mean_year = sum(years)/len(years)
if utterance.year > mean_year:
return '1', str((utterance.year-mean_year)*12), 'past'
elif utterance.year == mean_year:
delta_month = utterance.month-6
if delta_month > 0:
return '1', str(delta_month), 'past'
elif delta_month < 0:
return '1', str(delta_month), 'future'
else:
return '1', str(delta_month), 'present'
else:
return '1', str((utterance.year-mean_year)*12), 'future'
elif months and not years: # compute month-distance
mean_month = 0
for month in months:
year, month = map(int, month[:7].split('-'))
mean_month += (year*12) + month
mean_month = mean_month / 12
mean_utterance = ((utterance.year*12) + utterance.month) / 12
if mean_utterance > mean_month:
return '1', str(mean_utterance-mean_month), 'past'
elif mean_utterance == mean_month:
return '1', '0', 'present'
else:
return '1', str(mean_utterance-mean_month), 'future'
else:
return '1', '', ''
def __part_of_the_year_in_3(self, utterance):
"""
It returns the part of the year in which the query has been submitted:
B = Beginning (from January to April)
M = Middle (from May to August)
E = End (from September to December)
"""
assert type(utterance) == Utterance
if utterance.month <= 4:
return 'B'
elif utterance.month >= 9:
return 'E'
else:
return 'M'
def __part_of_the_year_in_4(self, utterance):
"""
It returns the part of the year in which the query has been submitted:
B = Beginning (from January to March)
M1 = Middle 1 (from April to June)
M2 = Middle 2 (from July to September)
E = End (from October to December)
"""
assert type(utterance) == Utterance
if utterance.month in range(1,4):
return 'B'
elif utterance.month in range(4,7):
return 'M1'
elif utterance.month in range(7,10):
return 'M2'
else:
return 'E'
def __get_trigger_classes(self, sentence):
"""
It returns an ordered list of temporal classes according to the
external vocabularies.
"""
assert type(sentence) == Sentence
c = Counter()
for token in sentence.tokens:
token = token.lower()
if token in self.atemporal_triggers: c['atemporal'] += 1
if token in self.past_triggers: c['past'] += 1
if token in self.present_triggers: c['present'] += 1
if token in self.future_triggers: c['future'] += 1
if c:
sorted_classes = zip(*c.most_common(len(c)))[0]
return top(sorted_classes), '-'.join(sorted_classes)
else:
return '', ''
def __TempoWordNet_classes(self, sentence, tempowordnet, lemmatizer):
"""
It returns an ordered list of temporal classes according to
TempoWordNet.
"""
assert type(sentence) == Sentence
def get_WN_pos(p):
if p in ['JJ','JJR','JJS']: return wn.ADJ
elif p in ['RB','RBR','RBS']: return wn.ADV
elif p in ['NN','NNS']: return wn.NOUN
elif p in ['VB','VBD','VBG','VBN','VBP','VBZ']: return wn.VERB
elif p in ['NNP','NNPS']: return 'skip'
else: return None
scores = defaultdict(float)
for token, penn_pos in zip(sentence.tokens, sentence.postags):
pos = get_WN_pos(penn_pos)
if pos == wn.NOUN:
continue
if penn_pos == 'VBD':
scores['past'] += 1.
elif token == 'will':
scores['future'] += 1.
elif pos!='skip':
if pos:
lemma = lemmatizer.lemmatize(token, pos=pos)
else:
lemma = lemmatizer.lemmatize(token)
for rank, offset in enumerate(map(int, [s.offset for s in wn.synsets(lemma, pos=pos)][:3]), start=1):
try:
ss_offset = int() # the list can be empty
scores['past'] += tempowordnet.scores(offset).past/rank
scores['present'] += tempowordnet.scores(offset).present/rank
scores['future'] += tempowordnet.scores(offset).future/rank
scores['atemporal'] += tempowordnet.scores(offset).atemporal/rank
except:
continue
sorted_scores = sorted(scores, key=scores.get, reverse=True)
return top(sorted_scores), '-'.join(sorted_scores)
def __tenses(self, sentence):
assert type(sentence) == Sentence
filtered_postags = Counter([p for t, p in zip(sentence.tokens, sentence.postags) if p.startswith('V') or t=='will'])
sorted_postags = sorted(filtered_postags, key=filtered_postags.get, reverse=True)
return top(sorted_postags), '-'.join(sorted_postags)
def __pos_footprint(self, sentence):
assert type(sentence) == Sentence
return '-'.join(sentence.postags)
def __pos_ordered_footprint(self, sentence):
assert type(sentence) == Sentence
pos_tags = Counter(sentence.postags)
sorted_postags = sorted(pos_tags, key=pos_tags.get, reverse=True)
return top(sorted_postags), '-'.join(sorted_postags)
def __pos_simplified_footprint(self, sentence):
assert type(sentence) == Sentence
return '-'.join(map(lambda p: p[0], sentence.postags))
def __pos_simplified_ordered_footprint(self, sentence):
assert type(sentence) == Sentence
pos_tags = Counter(map(lambda p: p[0], sentence.postags))
sorted_postags = sorted(pos_tags, key=pos_tags.get, reverse=True)
return top(sorted_postags), '-'.join(sorted_postags)
def __is_wikipedia_title(self, sentence):
assert type(sentence) == Sentence
if sentence.lower in self.wikipedia_titles:
return 'T'
else:
return 'F'
def extract(self, sentence, utterance):
sentence = Sentence(sentence)
utterance = Utterance(utterance)
first_tempowordnet_class, tempowordnet_classes = self.__TempoWordNet_classes(sentence, self.tempowordnet, self.wordnetlemmatizer)
first_trigger_class, trigger_classes = self.__get_trigger_classes(sentence)
first_ordered_pos, ordered_pos_footprint = self.__pos_ordered_footprint(sentence)
first_simplified_ordered_pos, simplified_ordered_pos_footprint = self.__pos_simplified_ordered_footprint(sentence)
first_tense, tenses = self.__tenses(sentence)
timex3_in, timing_num, timing = self.__timing(sentence, utterance)
return {'QUERY':sentence.raw_text,
'QUERY_normalised': sentence.normalised,
'w_type': self.__w_type(sentence),
'timex3_in': timex3_in,
'timing_num': timing_num,
'timing': timing,
'tempowordnet_first_class': first_tempowordnet_class,
'tempowordnet_classes': tempowordnet_classes,
'part_of_the_year_in_3': self.__part_of_the_year_in_3(utterance),
'part_of_the_year_in_4': self.__part_of_the_year_in_4(utterance),
'trigger_first_class': first_trigger_class,
'trigger_classes': trigger_classes,
'tenses': tenses,
'first_tense': first_tense,
'pos_footprint': self.__pos_footprint(sentence),
'pos_simplified_footprint': self.__pos_simplified_footprint(sentence),
'pos_first_ordered_footprint': first_ordered_pos,
'pos_ordered_footprint': ordered_pos_footprint,
'pos_first_simplified_ordered_footprint': first_simplified_ordered_pos,
'pos_simplified_ordered_footprint': simplified_ordered_pos_footprint,
'is_wikipedia_title': self.__is_wikipedia_title(sentence)}
class Utterance(object):
def __init__(self, utterance):
assert type(utterance) == str
self.raw_text = utterance
self.ISO8601 = TemporallyNormalise(utterance, '20140331')[2]
self.year, self.month, self.day = map(int, self.ISO8601.split('-'))
class Sentence(object):
def __init__(self, sentence):
assert type(sentence) == str
assert len(sentence) > 0
self.raw_text = sentence.strip()
self.lower = sentence.lower()
self.normalised = sentence.replace('"', '``').strip()
self.tokens, self.postags = zip(*pos_tag(word_tokenize(sentence)))
def main():
_, sentence, utterance = sys.argv
fe = FeatureExtractor()
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(fe.extract(sentence, utterance))
if __name__ == "__main__":
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.