repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
robocomp/learnbot
|
refs/heads/version-3
|
learnbot_dsl/functions/perceptual/camera/is_there_red_line.py
|
1
|
from __future__ import print_function, absolute_import
import sys, os
path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(path)
import visual_auxiliary as va
import numpy as np
def is_there_red_line(lbot):
frame = lbot.getImage()
if frame is not None:
rois = va.detect_red_line(frame)
if rois[np.argmax(rois)]>20:
return True
return False
|
willdecker/suds
|
refs/heads/master
|
suds/umx/core.py
|
200
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides base classes for XML->object I{unmarshalling}.
"""
from logging import getLogger
from suds import *
from suds.umx import *
from suds.umx.attrlist import AttrList
from suds.sax.text import Text
from suds.sudsobject import Factory, merge
log = getLogger(__name__)
reserved = { 'class':'cls', 'def':'dfn', }
class Core:
"""
The abstract XML I{node} unmarshaller. This class provides the
I{core} unmarshalling functionality.
"""
def process(self, content):
"""
Process an object graph representation of the xml I{node}.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A suds object.
@rtype: L{Object}
"""
self.reset()
return self.append(content)
def append(self, content):
"""
Process the specified node and convert the XML document into
a I{suds} L{object}.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A I{append-result} tuple as: (L{Object}, I{value})
@rtype: I{append-result}
@note: This is not the proper entry point.
@see: L{process()}
"""
self.start(content)
self.append_attributes(content)
self.append_children(content)
self.append_text(content)
self.end(content)
return self.postprocess(content)
def postprocess(self, content):
"""
Perform final processing of the resulting data structure as follows:
- Mixed values (children and text) will have a result of the I{content.node}.
- Simi-simple values (attributes, no-children and text) will have a result of a
property object.
- Simple values (no-attributes, no-children with text nodes) will have a string
result equal to the value of the content.node.getText().
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: The post-processed result.
@rtype: I{any}
"""
node = content.node
if len(node.children) and node.hasText():
return node
attributes = AttrList(node.attributes)
if attributes.rlen() and \
not len(node.children) and \
node.hasText():
p = Factory.property(node.name, node.getText())
return merge(content.data, p)
if len(content.data):
return content.data
lang = attributes.lang()
if content.node.isnil():
return None
if not len(node.children) and content.text is None:
if self.nillable(content):
return None
else:
return Text('', lang=lang)
if isinstance(content.text, basestring):
return Text(content.text, lang=lang)
else:
return content.text
def append_attributes(self, content):
"""
Append attribute nodes into L{Content.data}.
Attributes in the I{schema} or I{xml} namespaces are skipped.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
attributes = AttrList(content.node.attributes)
for attr in attributes.real():
name = attr.name
value = attr.value
self.append_attribute(name, value, content)
def append_attribute(self, name, value, content):
"""
Append an attribute name/value into L{Content.data}.
@param name: The attribute name
@type name: basestring
@param value: The attribute's value
@type value: basestring
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
key = name
key = '_%s' % reserved.get(key, key)
setattr(content.data, key, value)
def append_children(self, content):
"""
Append child nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
for child in content.node:
cont = Content(child)
cval = self.append(cont)
key = reserved.get(child.name, child.name)
if key in content.data:
v = getattr(content.data, key)
if isinstance(v, list):
v.append(cval)
else:
setattr(content.data, key, [v, cval])
continue
if self.unbounded(cont):
if cval is None:
setattr(content.data, key, [])
else:
setattr(content.data, key, [cval,])
else:
setattr(content.data, key, cval)
def append_text(self, content):
"""
Append text nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
if content.node.hasText():
content.text = content.node.getText()
def reset(self):
pass
def start(self, content):
"""
Processing on I{node} has started. Build and return
the proper object.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A subclass of Object.
@rtype: L{Object}
"""
content.data = Factory.object(content.node.name)
def end(self, content):
"""
Processing on I{node} has ended.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
pass
def bounded(self, content):
"""
Get whether the content is bounded (not a list).
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if bounded, else False
@rtype: boolean
'"""
return ( not self.unbounded(content) )
def unbounded(self, content):
"""
Get whether the object is unbounded (a list).
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if unbounded, else False
@rtype: boolean
'"""
return False
def nillable(self, content):
"""
Get whether the object is nillable.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if nillable, else False
@rtype: boolean
'"""
return False
|
psiinon/addons-server
|
refs/heads/master
|
src/olympia/reviewers/tests/test_utils.py
|
1
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from unittest import mock
from unittest.mock import Mock, patch
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.test.utils import override_settings
from django.utils import translation
import pytest
import responses
from pyquery import PyQuery as pq
from olympia import amo
from olympia.activity.models import ActivityLog, ActivityLogToken
from olympia.addons.models import (
Addon, AddonApprovalsCounter, AddonReviewerFlags)
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import (
TestCase, file_factory, version_factory, addon_factory)
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import send_mail
from olympia.discovery.models import DiscoveryItem
from olympia.files.models import File
from olympia.reviewers.models import (
AutoApprovalSummary, ReviewerScore, ViewExtensionQueue)
from olympia.reviewers.utils import (
ReviewAddon, ReviewFiles, ReviewHelper,
ViewUnlistedAllListTable, view_table_factory)
from olympia.users.models import UserProfile
from olympia.lib.crypto.tests.test_signing import (
_get_signature_details, _get_recommendation_data)
pytestmark = pytest.mark.django_db
REVIEW_FILES_STATUSES = (amo.STATUS_APPROVED, amo.STATUS_DISABLED)
class TestViewExtensionQueueTable(TestCase):
def setUp(self):
super().setUp()
self.table = view_table_factory(ViewExtensionQueue)([])
def test_addon_name(self):
row = Mock()
page = Mock()
page.start_index = Mock()
page.start_index.return_value = 1
row.addon_name = u'フォクすけといっしょ'
row.addon_slug = 'test'
row.latest_version = u'0.12'
self.table.set_page(page)
a = pq(self.table.render_addon_name(row))
assert a.attr('href') == (
reverse('reviewers.review', args=[str(row.addon_slug)]))
assert a.text() == u"フォクすけといっしょ 0.12"
def test_addon_type_id(self):
row = Mock()
row.addon_type_id = amo.ADDON_THEME
assert str(self.table.render_addon_type_id(row)) == (
u'Complete Theme')
def test_waiting_time_in_days(self):
row = Mock()
row.waiting_time_days = 10
row.waiting_time_hours = 10 * 24
assert self.table.render_waiting_time_min(row) == u'10 days'
def test_waiting_time_one_day(self):
row = Mock()
row.waiting_time_days = 1
row.waiting_time_hours = 24
row.waiting_time_min = 60 * 24
assert self.table.render_waiting_time_min(row) == u'1 day'
def test_waiting_time_in_hours(self):
row = Mock()
row.waiting_time_days = 0
row.waiting_time_hours = 22
row.waiting_time_min = 60 * 22
assert self.table.render_waiting_time_min(row) == u'22 hours'
def test_waiting_time_in_min(self):
row = Mock()
row.waiting_time_days = 0
row.waiting_time_hours = 0
row.waiting_time_min = 11
assert self.table.render_waiting_time_min(row) == u'11 minutes'
def test_waiting_time_in_secs(self):
row = Mock()
row.waiting_time_days = 0
row.waiting_time_hours = 0
row.waiting_time_min = 0
assert self.table.render_waiting_time_min(row) == u'moments ago'
def test_flags(self):
row = Mock()
row.flags = [('admin-review', 'Admin Review')]
doc = pq(self.table.render_flags(row))
assert doc('div.ed-sprite-admin-review').length
class TestUnlistedViewAllListTable(TestCase):
def setUp(self):
super(TestUnlistedViewAllListTable, self).setUp()
self.table = ViewUnlistedAllListTable([])
def test_addon_name(self):
row = Mock()
page = Mock()
page.start_index = Mock()
page.start_index.return_value = 1
row.addon_name = u'フォクすけといっしょ'
row.addon_slug = 'test'
row.latest_version = u'0.12'
self.table.set_page(page)
a = pq(self.table.render_addon_name(row))
assert (a.attr('href') == reverse(
'reviewers.review', args=['unlisted', str(row.addon_slug)]))
assert a.text() == u'フォクすけといっしょ 0.12'
def test_last_review(self):
row = Mock()
row.review_version_num = u'0.34.3b'
row.review_date = u'2016-01-01'
doc = pq(self.table.render_review_date(row))
assert doc.text() == u'0.34.3b on 2016-01-01'
def test_no_review(self):
row = Mock()
row.review_version_num = None
row.review_date = None
doc = pq(self.table.render_review_date(row))
assert doc.text() == u'No Reviews'
def test_authors_few(self):
row = Mock()
row.authors = [(123, 'bob'), (456, 'steve')]
doc = pq(self.table.render_authors(row))
assert doc('span').text() == 'bob steve'
assert doc('span a:eq(0)').attr('href') == UserProfile.create_user_url(
123)
assert doc('span a:eq(1)').attr('href') == UserProfile.create_user_url(
456)
assert doc('span').attr('title') == 'bob steve'
def test_authors_four(self):
row = Mock()
row.authors = [(123, 'bob'), (456, 'steve'), (789, 'cvan'),
(999, 'basta')]
doc = pq(self.table.render_authors(row))
assert doc.text() == 'bob steve cvan ...'
assert doc('span a:eq(0)').attr('href') == UserProfile.create_user_url(
123)
assert doc('span a:eq(1)').attr('href') == UserProfile.create_user_url(
456)
assert doc('span a:eq(2)').attr('href') == UserProfile.create_user_url(
789)
assert doc('span').attr('title') == 'bob steve cvan basta', doc.html()
yesterday = datetime.today() - timedelta(days=1)
class TestReviewHelperBase(TestCase):
__test__ = False
fixtures = ['base/addon_3615', 'base/users']
preamble = 'Mozilla Add-ons: Delicious Bookmarks 2.1.072'
def setUp(self):
super().setUp()
class FakeRequest:
user = UserProfile.objects.get(pk=10482)
self.request = FakeRequest()
self.addon = Addon.objects.get(pk=3615)
self.version = self.addon.versions.all()[0]
self.helper = self.get_helper()
self.file = self.version.files.all()[0]
self.create_paths()
def _check_score(self, reviewed_type, bonus=0):
scores = ReviewerScore.objects.all()
assert len(scores) > 0
assert scores[0].score == amo.REVIEWED_SCORES[reviewed_type] + bonus
assert scores[0].note_key == reviewed_type
def remove_paths(self):
for path in (self.file.file_path, self.file.guarded_file_path):
if not storage.exists(path):
storage.delete(path)
def create_paths(self):
for path in (self.file.file_path, self.file.guarded_file_path):
if not storage.exists(path):
with storage.open(path, 'w') as f:
f.write('test data\n')
self.addCleanup(self.remove_paths)
def setup_data(self, status, delete=None,
file_status=amo.STATUS_AWAITING_REVIEW,
channel=amo.RELEASE_CHANNEL_LISTED,
content_review_only=False, type=amo.ADDON_EXTENSION):
if delete is None:
delete = []
mail.outbox = []
ActivityLog.objects.for_addons(self.helper.addon).delete()
self.addon.update(status=status, type=type)
self.file.update(status=file_status)
if channel == amo.RELEASE_CHANNEL_UNLISTED:
self.make_addon_unlisted(self.addon)
self.version.reload()
self.file.reload()
self.helper = self.get_helper(content_review_only=content_review_only)
data = self.get_data().copy()
for key in delete:
del data[key]
self.helper.set_data(data)
def get_data(self):
return {'comments': 'foo', 'addon_files': self.version.files.all(),
'action': 'public', 'operating_systems': 'osx',
'applications': 'Firefox',
'info_request': self.addon.pending_info_request}
def get_helper(self, content_review_only=False):
return ReviewHelper(
request=self.request, addon=self.addon, version=self.version,
content_review_only=content_review_only)
def setup_type(self, status):
self.addon.update(status=status)
return self.get_helper().handler.review_type
def check_log_count(self, id):
return (ActivityLog.objects.for_addons(self.helper.addon)
.filter(action=id).count())
# Those tests can call signing when making things public. We want to test that
# it works correctly, so we set ENABLE_ADDON_SIGNING to True and mock the
# actual signing call.
@override_settings(ENABLE_ADDON_SIGNING=True)
@mock.patch('olympia.lib.crypto.signing.call_signing', lambda f: None)
class TestReviewHelper(TestReviewHelperBase):
__test__ = True
def test_no_request(self):
self.request = None
helper = self.get_helper()
assert helper.content_review_only is False
assert helper.actions == {}
helper = self.get_helper(content_review_only=True)
assert helper.content_review_only is True
assert helper.actions == {}
def test_type_nominated(self):
assert self.setup_type(amo.STATUS_NOMINATED) == 'extension_nominated'
def test_type_pending(self):
assert self.setup_type(amo.STATUS_NULL) == 'extension_pending'
assert self.setup_type(amo.STATUS_APPROVED) == 'extension_pending'
assert self.setup_type(amo.STATUS_DISABLED) == 'extension_pending'
def test_no_version(self):
helper = ReviewHelper(
request=self.request, addon=self.addon, version=None)
assert helper.handler.review_type == 'extension_pending'
def test_review_files(self):
version_factory(addon=self.addon,
created=self.version.created - timedelta(days=1),
file_kw={'status': amo.STATUS_APPROVED})
for status in REVIEW_FILES_STATUSES:
self.setup_data(status=status)
assert self.helper.handler.__class__ == ReviewFiles
def test_review_addon(self):
self.setup_data(status=amo.STATUS_NOMINATED)
assert self.helper.handler.__class__ == ReviewAddon
def test_process_action_none(self):
self.helper.set_data({'action': 'foo'})
with self.assertRaises(Exception):
self.helper.process()
def test_process_action_good(self):
self.helper.set_data({'action': 'reply', 'comments': 'foo'})
self.helper.process()
assert len(mail.outbox) == 1
def test_action_details(self):
for status in Addon.STATUS_CHOICES:
self.addon.update(status=status)
helper = self.get_helper()
actions = helper.actions
for k, v in actions.items():
assert str(v['details']), "Missing details for: %s" % k
def get_review_actions(
self, addon_status, file_status, content_review_only=False):
self.file.update(status=file_status)
self.addon.update(status=addon_status)
# Need to clear self.version.all_files cache since we updated the file.
if self.version:
del self.version.all_files
return self.get_helper(content_review_only=content_review_only).actions
def test_actions_full_nominated(self):
expected = ['public', 'reject', 'reply', 'super', 'comment']
assert list(self.get_review_actions(
addon_status=amo.STATUS_NOMINATED,
file_status=amo.STATUS_AWAITING_REVIEW).keys()) == expected
def test_actions_full_update(self):
expected = ['public', 'reject', 'reply', 'super', 'comment']
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=amo.STATUS_AWAITING_REVIEW).keys()) == expected
def test_actions_full_nonpending(self):
expected = ['reply', 'super', 'comment']
f_statuses = [amo.STATUS_APPROVED, amo.STATUS_DISABLED]
for file_status in f_statuses:
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=file_status).keys()) == expected
def test_actions_public_post_reviewer(self):
self.grant_permission(self.request.user, 'Addons:PostReview')
expected = ['reject_multiple_versions', 'reply', 'super', 'comment']
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=amo.STATUS_APPROVED).keys()) == expected
# Now make current version auto-approved...
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
expected = ['confirm_auto_approved', 'reject_multiple_versions',
'reply', 'super', 'comment']
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=amo.STATUS_APPROVED).keys()) == expected
def test_actions_content_review(self):
self.grant_permission(self.request.user, 'Addons:ContentReview')
expected = ['approve_content', 'reject_multiple_versions',
'reply', 'super', 'comment']
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=amo.STATUS_APPROVED,
content_review_only=True).keys()) == expected
def test_actions_content_review_non_approved_addon(self):
# Content reviewers can also see add-ons before they are approved for
# the first time.
self.grant_permission(self.request.user, 'Addons:ContentReview')
expected = ['approve_content', 'reject_multiple_versions',
'reply', 'super', 'comment']
assert list(self.get_review_actions(
addon_status=amo.STATUS_NOMINATED,
file_status=amo.STATUS_AWAITING_REVIEW,
content_review_only=True).keys()) == expected
def test_actions_public_static_theme(self):
# Having Addons:PostReview and dealing with a public add-on would
# normally be enough to give you access to reject multiple versions
# action, but it should not be available for static themes.
self.grant_permission(self.request.user, 'Addons:PostReview')
self.addon.update(type=amo.ADDON_STATICTHEME)
expected = ['public', 'reject', 'reply', 'super', 'comment']
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=amo.STATUS_AWAITING_REVIEW).keys()) == expected
def test_actions_no_version(self):
"""Deleted addons and addons with no versions in that channel have no
version set."""
expected = ['comment']
self.version = None
assert list(self.get_review_actions(
addon_status=amo.STATUS_APPROVED,
file_status=amo.STATUS_APPROVED).keys()) == expected
def test_set_files(self):
self.file.update(datestatuschanged=yesterday)
self.helper.set_data({'addon_files': self.version.files.all()})
self.helper.handler.set_files(amo.STATUS_APPROVED,
self.helper.handler.data['addon_files'])
self.file = self.version.files.all()[0]
assert self.file.status == amo.STATUS_APPROVED
assert self.file.datestatuschanged.date() > yesterday.date()
def test_logs(self):
self.helper.set_data({'comments': 'something'})
self.helper.handler.log_action(amo.LOG.APPROVE_VERSION)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
def test_notify_email(self):
self.helper.set_data(self.get_data())
base_fragment = 'To respond, please reply to this email or visit'
user = self.addon.listed_authors[0]
ActivityLogToken.objects.create(version=self.version, user=user)
uuid = self.version.token.get(user=user).uuid.hex
reply_email = (
'reviewreply+%s@%s' % (uuid, settings.INBOUND_EMAIL_DOMAIN))
templates = (
'extension_nominated_to_approved',
'extension_nominated_to_rejected',
'extension_pending_to_rejected',
'theme_nominated_to_approved',
'theme_nominated_to_rejected',
'theme_pending_to_rejected',)
for template in templates:
mail.outbox = []
self.helper.handler.notify_email(template, 'Sample subject %s, %s')
assert len(mail.outbox) == 1
assert base_fragment in mail.outbox[0].body
assert mail.outbox[0].reply_to == [reply_email]
mail.outbox = []
# This one does not inherit from base.txt because it's for unlisted
# signing notification, which is not really something that necessitates
# reviewer interaction, so it's simpler.
template = 'unlisted_to_reviewed_auto'
self.helper.handler.notify_email(template, 'Sample subject %s, %s')
assert len(mail.outbox) == 1
assert base_fragment not in mail.outbox[0].body
assert mail.outbox[0].reply_to == [reply_email]
def test_email_links(self):
expected = {
'extension_nominated_to_approved': 'addon_url',
'extension_nominated_to_rejected': 'dev_versions_url',
'extension_pending_to_approved': 'addon_url',
'extension_pending_to_rejected': 'dev_versions_url',
'theme_nominated_to_approved': 'addon_url',
'theme_nominated_to_rejected': 'dev_versions_url',
'theme_pending_to_approved': 'addon_url',
'theme_pending_to_rejected': 'dev_versions_url',
'unlisted_to_reviewed_auto': 'dev_versions_url',
}
self.helper.set_data(self.get_data())
context_data = self.helper.handler.get_context_data()
for template, context_key in expected.items():
mail.outbox = []
self.helper.handler.notify_email(template, 'Sample subject %s, %s')
assert len(mail.outbox) == 1
assert context_key in context_data
assert context_data.get(context_key) in mail.outbox[0].body
def test_send_reviewer_reply(self):
assert not self.addon.pending_info_request
self.setup_data(amo.STATUS_APPROVED, ['addon_files'])
self.helper.handler.reviewer_reply()
assert not self.addon.pending_info_request
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == self.preamble
assert self.check_log_count(amo.LOG.REVIEWER_REPLY_VERSION.id) == 1
def test_request_more_information(self):
self.setup_data(amo.STATUS_APPROVED, ['addon_files'])
self.helper.handler.data['info_request'] = True
self.helper.handler.reviewer_reply()
self.assertCloseToNow(
self.addon.pending_info_request,
now=datetime.now() + timedelta(days=7))
assert len(mail.outbox) == 1
assert (
mail.outbox[0].subject ==
'Mozilla Add-ons: Action Required for Delicious Bookmarks 2.1.072')
assert self.check_log_count(amo.LOG.REQUEST_INFORMATION.id) == 1
def test_request_more_information_custom_deadline(self):
self.setup_data(amo.STATUS_APPROVED, ['addon_files'])
self.helper.handler.data['info_request'] = True
self.helper.handler.data['info_request_deadline'] = 42
self.helper.handler.reviewer_reply()
self.assertCloseToNow(
self.addon.pending_info_request,
now=datetime.now() + timedelta(days=42))
assert len(mail.outbox) == 1
assert (
mail.outbox[0].subject ==
'Mozilla Add-ons: Action Required for Delicious Bookmarks 2.1.072')
assert self.check_log_count(amo.LOG.REQUEST_INFORMATION.id) == 1
def test_request_more_information_reset_notified_flag(self):
self.setup_data(amo.STATUS_APPROVED, ['addon_files'])
flags = AddonReviewerFlags.objects.create(
addon=self.addon,
pending_info_request=datetime.now() - timedelta(days=1),
notified_about_expiring_info_request=True)
self.helper.handler.data['info_request'] = True
self.helper.handler.reviewer_reply()
flags.reload()
self.assertCloseToNow(
flags.pending_info_request,
now=datetime.now() + timedelta(days=7))
assert not flags.notified_about_expiring_info_request
assert len(mail.outbox) == 1
assert (
mail.outbox[0].subject ==
'Mozilla Add-ons: Action Required for Delicious Bookmarks 2.1.072')
assert self.check_log_count(amo.LOG.REQUEST_INFORMATION.id) == 1
def test_request_more_information_deleted_addon(self):
self.addon.delete()
self.test_request_more_information()
def test_email_no_locale(self):
self.addon.name = {
'es': '¿Dónde está la biblioteca?'
}
self.setup_data(amo.STATUS_NOMINATED, ['addon_files'])
with translation.override('es'):
assert translation.get_language() == 'es'
self.helper.handler.process_public()
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
u'Mozilla Add-ons: Delicious Bookmarks 2.1.072 Approved')
assert '/en-US/firefox/addon/a3615' not in mail.outbox[0].body
assert '/es/firefox/addon/a3615' not in mail.outbox[0].body
assert '/addon/a3615' in mail.outbox[0].body
assert 'Your add-on, Delicious Bookmarks ' in mail.outbox[0].body
def test_nomination_to_public_no_files(self):
self.setup_data(amo.STATUS_NOMINATED, ['addon_files'])
self.helper.handler.process_public()
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
def test_nomination_to_public_and_current_version(self):
self.setup_data(amo.STATUS_NOMINATED, ['addon_files'])
self.addon = Addon.objects.get(pk=3615)
self.addon.update(_current_version=None)
assert not self.addon.current_version
self.helper.handler.process_public()
self.addon = Addon.objects.get(pk=3615)
assert self.addon.current_version
def test_nomination_to_public_new_addon(self):
""" Make sure new add-ons can be made public (bug 637959) """
status = amo.STATUS_NOMINATED
self.setup_data(status)
# Make sure we have no public files
for version in self.addon.versions.all():
version.files.update(status=amo.STATUS_AWAITING_REVIEW)
self.helper.handler.process_public()
# Re-fetch the add-on
addon = Addon.objects.get(pk=3615)
assert addon.status == amo.STATUS_APPROVED
assert addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '%s Approved' % self.preamble
# AddonApprovalsCounter counter is now at 1 for this addon since there
# was a human review.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 1
self.assertCloseToNow(approval_counter.last_human_review)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_FULL)
@patch('olympia.reviewers.utils.sign_file')
def test_nomination_to_public(self, sign_mock):
sign_mock.reset()
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_APPROVED
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Approved' % self.preamble)
assert 'has been approved' in mail.outbox[0].body
# AddonApprovalsCounter counter is now at 1 for this addon.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 1
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_FULL)
@patch('olympia.reviewers.utils.sign_file')
def test_old_nomination_to_public_bonus_score(self, sign_mock):
sign_mock.reset()
self.setup_data(amo.STATUS_NOMINATED)
self.version.update(nomination=self.days_ago(9))
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_APPROVED
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Approved' % self.preamble)
assert 'has been approved' in mail.outbox[0].body
# AddonApprovalsCounter counter is now at 1 for this addon.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 1
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
# Score has bonus points added for reviewing an old add-on.
# 2 days over the limit = 4 points
self._check_score(amo.REVIEWED_ADDON_FULL, bonus=4)
@patch('olympia.reviewers.utils.sign_file')
def test_nomination_to_public_no_request(self, sign_mock):
self.request = None
sign_mock.reset()
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_APPROVED
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Approved' % self.preamble)
assert 'has been approved' in mail.outbox[0].body
# AddonApprovalsCounter counter is now at 0 for this addon since there
# was an automatic approval.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 0
# Since approval counter did not exist for this add-on before, the last
# human review field should be empty.
assert approval_counter.last_human_review is None
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
# No request, no user, therefore no score.
assert ReviewerScore.objects.count() == 0
@patch('olympia.reviewers.utils.sign_file')
def test_public_addon_with_version_awaiting_review_to_public(
self, sign_mock):
sign_mock.reset()
self.addon.current_version.update(created=self.days_ago(1))
self.version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
version='3.0.42',
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.preamble = 'Mozilla Add-ons: Delicious Bookmarks 3.0.42'
self.file = self.version.files.all()[0]
self.setup_data(amo.STATUS_APPROVED)
self.create_paths()
AddonApprovalsCounter.objects.create(
addon=self.addon, counter=1, last_human_review=self.days_ago(42))
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_AWAITING_REVIEW
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
self.helper.handler.process_public()
self.addon.reload()
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.reload().status == amo.STATUS_APPROVED
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s Updated' % self.preamble)
assert 'has been updated' in mail.outbox[0].body
# AddonApprovalsCounter counter is now at 2 for this addon since there
# was another human review. The last human review date should have been
# updated.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 2
self.assertCloseToNow(approval_counter.last_human_review)
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_UPDATE)
@patch('olympia.reviewers.utils.sign_file')
def test_public_addon_with_version_awaiting_review_to_sandbox(
self, sign_mock):
sign_mock.reset()
self.addon.current_version.update(created=self.days_ago(1))
self.version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
version='3.0.42',
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.preamble = 'Mozilla Add-ons: Delicious Bookmarks 3.0.42'
self.file = self.version.files.all()[0]
self.setup_data(amo.STATUS_APPROVED)
self.create_paths()
AddonApprovalsCounter.objects.create(addon=self.addon, counter=1)
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_AWAITING_REVIEW
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
self.helper.handler.process_sandbox()
self.addon.reload()
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.reload().status == amo.STATUS_DISABLED
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
"%s didn't pass review" % self.preamble)
assert 'reviewed and did not meet the criteria' in mail.outbox[0].body
# AddonApprovalsCounter counter is still at 1 for this addon.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 1
assert not sign_mock.called
assert storage.exists(self.file.guarded_file_path)
assert not storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 1
self._check_score(amo.REVIEWED_ADDON_UPDATE)
def test_public_addon_confirm_auto_approval(self):
self.grant_permission(self.request.user, 'Addons:PostReview')
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
summary = AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=151)
assert summary.confirmed is None
self.create_paths()
# Safeguards.
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_APPROVED
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
self.helper.handler.confirm_auto_approved()
summary.reload()
assert summary.confirmed is True
approvals_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
self.assertCloseToNow(approvals_counter.last_human_review)
assert self.check_log_count(amo.LOG.APPROVE_CONTENT.id) == 0
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.CONFIRM_AUTO_APPROVED.id)
.get())
assert activity.arguments == [self.addon, self.version]
assert activity.details['comments'] == ''
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_public_with_unreviewed_version_addon_confirm_auto_approval(self):
self.grant_permission(self.request.user, 'Addons:PostReview')
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
self.current_version = self.version
summary = AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=152)
self.version = version_factory(
addon=self.addon, version='3.0',
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.file = self.version.files.all()[0]
self.helper = self.get_helper() # To make it pick up the new version.
self.helper.set_data(self.get_data())
# Confirm approval action should be available even if the latest
# version is not public, what we care about is the current_version.
assert 'confirm_auto_approved' in self.helper.actions
self.helper.handler.confirm_auto_approved()
summary.reload()
assert summary.confirmed is True
approvals_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
self.assertCloseToNow(approvals_counter.last_human_review)
assert self.check_log_count(amo.LOG.APPROVE_CONTENT.id) == 0
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.CONFIRM_AUTO_APPROVED.id)
.get())
assert activity.arguments == [self.addon, self.current_version]
assert activity.details['comments'] == ''
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_public_with_disabled_version_addon_confirm_auto_approval(self):
self.grant_permission(self.request.user, 'Addons:PostReview')
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
self.current_version = self.version
summary = AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=153)
self.version = version_factory(
addon=self.addon, version='3.0',
file_kw={'status': amo.STATUS_DISABLED})
self.file = self.version.files.all()[0]
self.helper = self.get_helper() # To make it pick up the new version.
self.helper.set_data(self.get_data())
# Confirm approval action should be available even if the latest
# version is not public, what we care about is the current_version.
assert 'confirm_auto_approved' in self.helper.actions
self.helper.handler.confirm_auto_approved()
summary.reload()
assert summary.confirmed is True
approvals_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
self.assertCloseToNow(approvals_counter.last_human_review)
assert self.check_log_count(amo.LOG.APPROVE_CONTENT.id) == 0
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.CONFIRM_AUTO_APPROVED.id)
.get())
assert activity.arguments == [self.addon, self.current_version]
assert activity.details['comments'] == ''
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_unlisted_version_addon_confirm_auto_approval(self):
self.grant_permission(self.request.user, 'Addons:ReviewUnlisted')
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED)
self.version = version_factory(
addon=self.addon, version='3.0',
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.file = self.version.files.all()[0]
self.helper = self.get_helper() # To make it pick up the new version.
self.helper.set_data(self.get_data())
# Confirm approval action should be available since the version
# we are looking at is unlisted and reviewer has permission.
assert 'confirm_auto_approved' in self.helper.actions
self.helper.handler.confirm_auto_approved()
assert (
AddonApprovalsCounter.objects.filter(addon=self.addon).count() ==
0) # Not incremented since it was unlisted.
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.CONFIRM_AUTO_APPROVED.id)
.get())
assert activity.arguments == [self.addon, self.version]
@patch('olympia.reviewers.utils.sign_file')
def test_null_to_public_unlisted(self, sign_mock):
sign_mock.reset()
self.setup_data(amo.STATUS_NULL,
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
# AddonApprovalsCounter was not touched since the version we made
# public is unlisted.
assert not AddonApprovalsCounter.objects.filter(
addon=self.addon).exists()
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s signed and ready to download' % self.preamble)
assert ('%s is now signed and ready for you to download' %
self.version.version in mail.outbox[0].body)
assert 'You received this email because' not in mail.outbox[0].body
sign_mock.assert_called_with(self.file)
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
@patch('olympia.reviewers.utils.sign_file')
def test_nomination_to_public_failed_signing(self, sign_mock):
sign_mock.side_effect = Exception
sign_mock.reset()
self.setup_data(amo.STATUS_NOMINATED)
with self.assertRaises(Exception):
self.helper.handler.process_public()
# AddonApprovalsCounter was not touched since we failed signing.
assert not AddonApprovalsCounter.objects.filter(
addon=self.addon).exists()
# Status unchanged.
assert self.addon.status == amo.STATUS_NOMINATED
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_AWAITING_REVIEW)
assert len(mail.outbox) == 0
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 0
@patch('olympia.reviewers.utils.sign_file')
def test_nomination_to_sandbox(self, sign_mock):
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_sandbox()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_DISABLED)
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'%s didn\'t pass review' % self.preamble)
assert 'did not meet the criteria' in mail.outbox[0].body
# AddonApprovalsCounter was not touched since we didn't approve.
assert not AddonApprovalsCounter.objects.filter(
addon=self.addon).exists()
assert not sign_mock.called
assert storage.exists(self.file.guarded_file_path)
assert not storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 1
def test_email_unicode_monster(self):
self.addon.name = u'TaobaoShopping淘宝网导航按钮'
self.addon.save()
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_sandbox()
assert u'TaobaoShopping淘宝网导航按钮' in mail.outbox[0].subject
def test_nomination_to_super_review(self):
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_code_review
assert self.check_log_count(amo.LOG.REQUEST_ADMIN_REVIEW_CODE.id) == 1
def test_auto_approved_admin_code_review(self):
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_code_review
assert self.check_log_count(amo.LOG.REQUEST_ADMIN_REVIEW_CODE.id) == 1
def test_auto_approved_admin_content_review(self):
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED,
content_review_only=True)
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_content_review
assert self.check_log_count(
amo.LOG.REQUEST_ADMIN_REVIEW_CONTENT.id) == 1
def test_auto_approved_admin_theme_review(self):
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED,
type=amo.ADDON_STATICTHEME)
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_theme_review
assert self.check_log_count(amo.LOG.REQUEST_ADMIN_REVIEW_THEME.id) == 1
def test_nomination_to_super_review_and_escalate(self):
self.setup_data(amo.STATUS_NOMINATED)
self.file.update(status=amo.STATUS_AWAITING_REVIEW)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_code_review
assert self.check_log_count(amo.LOG.REQUEST_ADMIN_REVIEW_CODE.id) == 1
def test_operating_system_present(self):
self.setup_data(amo.STATUS_APPROVED)
self.helper.handler.process_sandbox()
assert 'Tested on osx with Firefox' in mail.outbox[0].body
def test_operating_system_not_present(self):
self.setup_data(amo.STATUS_APPROVED)
data = self.get_data().copy()
data['operating_systems'] = ''
self.helper.set_data(data)
self.helper.handler.process_sandbox()
assert 'Tested with Firefox' in mail.outbox[0].body
def test_application_not_present(self):
self.setup_data(amo.STATUS_APPROVED)
data = self.get_data().copy()
data['applications'] = ''
self.helper.set_data(data)
self.helper.handler.process_sandbox()
assert 'Tested on osx' in mail.outbox[0].body
def test_both_not_present(self):
self.setup_data(amo.STATUS_APPROVED)
data = self.get_data().copy()
data['applications'] = ''
data['operating_systems'] = ''
self.helper.set_data(data)
self.helper.handler.process_sandbox()
assert 'Tested' not in mail.outbox[0].body
def test_pending_to_super_review(self):
for status in (amo.STATUS_DISABLED, amo.STATUS_NULL):
self.setup_data(status)
self.helper.handler.process_super_review()
assert self.addon.needs_admin_code_review
def test_nominated_review_time_set_version_process_public(self):
self.version.update(reviewed=None)
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_public()
assert self.version.reload().reviewed
def test_nominated_review_time_set_version_process_sandbox(self):
self.version.update(reviewed=None)
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_sandbox()
assert self.version.reload().reviewed
def test_nominated_review_time_set_file_process_public(self):
self.file.update(reviewed=None)
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_public()
assert File.objects.get(pk=self.file.pk).reviewed
def test_nominated_review_time_set_file_process_sandbox(self):
self.file.update(reviewed=None)
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_sandbox()
assert File.objects.get(pk=self.file.pk).reviewed
def test_review_unlisted_while_a_listed_version_is_awaiting_review(self):
self.make_addon_unlisted(self.addon)
self.version.reload()
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.addon.update(status=amo.STATUS_NOMINATED)
assert self.get_helper()
def test_reject_multiple_versions(self):
old_version = self.version
self.version = version_factory(addon=self.addon, version='3.0')
AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=101)
# An extra file should not change anything.
file_factory(version=self.version, platform=amo.PLATFORM_LINUX.id)
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_APPROVED
assert self.addon.current_version.is_public()
data = self.get_data().copy()
data['versions'] = self.addon.versions.all()
self.helper.set_data(data)
self.helper.handler.reject_multiple_versions()
self.addon.reload()
self.file.reload()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.current_version is None
assert list(self.addon.versions.all()) == [self.version, old_version]
assert self.file.status == amo.STATUS_DISABLED
assert len(mail.outbox) == 1
assert mail.outbox[0].to == [self.addon.authors.all()[0].email]
assert mail.outbox[0].subject == (
u'Mozilla Add-ons: Delicious Bookmarks has been disabled on '
u'addons.mozilla.org')
assert ('your add-on Delicious Bookmarks has been disabled'
in mail.outbox[0].body)
log_token = ActivityLogToken.objects.get()
assert log_token.uuid.hex in mail.outbox[0].reply_to[0]
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 2
assert self.check_log_count(amo.LOG.REJECT_CONTENT.id) == 0
logs = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.REJECT_VERSION.id))
assert logs[0].created == logs[1].created
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_reject_multiple_versions_except_latest(self):
old_version = self.version
extra_version = version_factory(addon=self.addon, version='3.1')
# Add yet another version we don't want to reject.
self.version = version_factory(addon=self.addon, version='42.0')
AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED, weight=91)
self.setup_data(amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED)
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_APPROVED
assert self.addon.current_version.is_public()
data = self.get_data().copy()
data['versions'] = self.addon.versions.all().exclude(
pk=self.version.pk)
self.helper.set_data(data)
self.helper.handler.reject_multiple_versions()
self.addon.reload()
self.file.reload()
# latest_version is still public so the add-on is still public.
assert self.addon.status == amo.STATUS_APPROVED
assert self.addon.current_version == self.version
assert list(self.addon.versions.all().order_by('-pk')) == [
self.version, extra_version, old_version]
assert self.file.status == amo.STATUS_DISABLED
assert len(mail.outbox) == 1
assert mail.outbox[0].to == [self.addon.authors.all()[0].email]
assert mail.outbox[0].subject == (
u'Mozilla Add-ons: Versions disabled for Delicious Bookmarks')
assert ('Version(s) affected and disabled:\n3.1, 2.1.072'
in mail.outbox[0].body)
log_token = ActivityLogToken.objects.filter(
version=self.version).get()
assert log_token.uuid.hex in mail.outbox[0].reply_to[0]
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 2
assert self.check_log_count(amo.LOG.REJECT_CONTENT.id) == 0
# Check points awarded.
self._check_score(amo.REVIEWED_EXTENSION_MEDIUM_RISK)
def test_reject_multiple_versions_content_review(self):
self.grant_permission(self.request.user, 'Addons:ContentReview')
old_version = self.version
self.version = version_factory(addon=self.addon, version='3.0')
self.setup_data(
amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED,
content_review_only=True)
# Safeguards.
assert isinstance(self.helper.handler, ReviewFiles)
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_APPROVED
assert self.addon.current_version.is_public()
data = self.get_data().copy()
data['versions'] = self.addon.versions.all()
self.helper.set_data(data)
self.helper.handler.reject_multiple_versions()
self.addon.reload()
self.file.reload()
assert self.addon.status == amo.STATUS_NULL
assert self.addon.current_version is None
assert list(self.addon.versions.all()) == [self.version, old_version]
assert self.file.status == amo.STATUS_DISABLED
assert len(mail.outbox) == 1
assert mail.outbox[0].to == [self.addon.authors.all()[0].email]
assert mail.outbox[0].subject == (
u'Mozilla Add-ons: Delicious Bookmarks has been disabled on '
u'addons.mozilla.org')
assert ('your add-on Delicious Bookmarks has been disabled'
in mail.outbox[0].body)
log_token = ActivityLogToken.objects.get()
assert log_token.uuid.hex in mail.outbox[0].reply_to[0]
assert self.check_log_count(amo.LOG.REJECT_VERSION.id) == 0
assert self.check_log_count(amo.LOG.REJECT_CONTENT.id) == 2
def test_approve_content_content_review(self):
self.grant_permission(self.request.user, 'Addons:ContentReview')
self.setup_data(
amo.STATUS_APPROVED, file_status=amo.STATUS_APPROVED,
content_review_only=True)
summary = AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED)
self.create_paths()
# Safeguards.
assert self.addon.status == amo.STATUS_APPROVED
assert self.file.status == amo.STATUS_APPROVED
assert self.addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
self.helper.handler.approve_content()
summary.reload()
assert summary.confirmed is None # unchanged.
approvals_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approvals_counter.counter == 0
assert approvals_counter.last_human_review is None
self.assertCloseToNow(approvals_counter.last_content_review)
assert self.check_log_count(amo.LOG.CONFIRM_AUTO_APPROVED.id) == 0
assert self.check_log_count(amo.LOG.APPROVE_CONTENT.id) == 1
activity = (ActivityLog.objects.for_addons(self.addon)
.filter(action=amo.LOG.APPROVE_CONTENT.id)
.get())
assert activity.arguments == [self.addon, self.version]
assert activity.details['comments'] == ''
# Check points awarded.
self._check_score(amo.REVIEWED_CONTENT_REVIEW)
def test_dev_versions_url_in_context(self):
self.helper.set_data(self.get_data())
context_data = self.helper.handler.get_context_data()
assert context_data['dev_versions_url'] == absolutify(
self.addon.get_dev_url('versions'))
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
context_data = self.helper.handler.get_context_data()
assert context_data['dev_versions_url'] == absolutify(
reverse('devhub.addons.versions', args=[self.addon.id]))
def test_nominated_to_approved_recommended(self):
DiscoveryItem.objects.create(
addon=self.addon, recommendable=True)
assert not self.addon.is_recommended
self.test_nomination_to_public()
del self.addon.is_recommended
assert self.addon.current_version.recommendation_approved is True
assert self.addon.is_recommended
def test_approved_update_recommended(self):
DiscoveryItem.objects.create(
addon=self.addon, recommendable=True)
assert not self.addon.is_recommended
self.test_public_addon_with_version_awaiting_review_to_public()
del self.addon.is_recommended
assert self.addon.current_version.recommendation_approved is True
assert self.addon.is_recommended is True
def test_autoapprove_fails_for_recommended(self):
DiscoveryItem.objects.create(
addon=self.addon, recommendable=True)
assert not self.addon.is_recommended
self.request.user = UserProfile.objects.get(id=settings.TASK_USER_ID)
with self.assertRaises(AssertionError):
self.test_nomination_to_public()
assert self.addon.current_version.recommendation_approved is False
assert not self.addon.is_recommended
@override_settings(ENABLE_ADDON_SIGNING=True)
class TestReviewHelperSigning(TestReviewHelperBase):
"""Tests that call signing but don't mock the actual call.
Instead tests will have to check the end-result to see if the signing
calls succeeded.
"""
__test__ = True
def setUp(self):
super().setUp()
responses.add_passthru(settings.AUTOGRAPH_CONFIG['server_url'])
self.addon = addon_factory(
guid='test@local', file_kw={'filename': 'webextension.xpi'},
users=[self.request.user])
self.version = self.addon.versions.all()[0]
self.helper = self.get_helper()
self.file = self.version.files.all()[0]
def test_nomination_to_public(self):
self.setup_data(amo.STATUS_NOMINATED)
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_APPROVED
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
assert len(mail.outbox) == 1
# AddonApprovalsCounter counter is now at 1 for this addon.
approval_counter = AddonApprovalsCounter.objects.get(addon=self.addon)
assert approval_counter.counter == 1
assert storage.exists(self.file.file_path)
assert self.check_log_count(amo.LOG.APPROVE_VERSION.id) == 1
signature_info, manifest = _get_signature_details(self.file.file_path)
subject_info = signature_info.signer_certificate['subject']
assert subject_info['common_name'] == 'test@local'
assert manifest.count('Name: ') == 4
assert 'Name: index.js' in manifest
assert 'Name: manifest.json' in manifest
assert 'Name: META-INF/cose.manifest' in manifest
assert 'Name: META-INF/cose.sig' in manifest
def test_nominated_to_public_recommended(self):
self.setup_data(amo.STATUS_NOMINATED)
DiscoveryItem.objects.create(
addon=self.addon, recommendable=True)
assert not self.addon.is_recommended
self.helper.handler.process_public()
assert self.addon.status == amo.STATUS_APPROVED
assert self.addon.versions.all()[0].files.all()[0].status == (
amo.STATUS_APPROVED)
del self.addon.is_recommended
assert self.addon.current_version.recommendation_approved is True
assert self.addon.is_recommended
signature_info, manifest = _get_signature_details(self.file.file_path)
subject_info = signature_info.signer_certificate['subject']
assert subject_info['common_name'] == 'test@local'
assert manifest.count('Name: ') == 5
assert 'Name: index.js' in manifest
assert 'Name: manifest.json' in manifest
assert 'Name: META-INF/cose.manifest' in manifest
assert 'Name: META-INF/cose.sig' in manifest
assert 'Name: mozilla-recommendation.json' in manifest
recommendation_data = _get_recommendation_data(self.file.file_path)
assert recommendation_data['addon_id'] == 'test@local'
assert recommendation_data['states'] == ['recommended']
def test_send_email_autoescape():
s = 'woo&&<>\'""'
# Make sure HTML is not auto-escaped.
send_mail(u'Random subject with %s', s,
recipient_list=['nobody@mozilla.org'],
from_email='nobody@mozilla.org',
use_deny_list=False)
assert len(mail.outbox) == 1
assert mail.outbox[0].body == s
|
SnabbCo/neutron
|
refs/heads/master
|
neutron/plugins/ml2/drivers/mech_arista/config.py
|
8
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
""" Arista ML2 Mechanism driver specific configuration knobs.
Following are user configurable options for Arista ML2 Mechanism
driver. The eapi_username, eapi_password, and eapi_host are
required options. Region Name must be the same that is used by
Keystone service. This option is available to support multiple
OpenStack/Neutron controllers.
"""
ARISTA_DRIVER_OPTS = [
cfg.StrOpt('eapi_username',
default='',
help=_('Username for Arista EOS. This is required field. '
'If not set, all communications to Arista EOS'
'will fail.')),
cfg.StrOpt('eapi_password',
default='',
secret=True, # do not expose value in the logs
help=_('Password for Arista EOS. This is required field. '
'If not set, all communications to Arista EOS '
'will fail.')),
cfg.StrOpt('eapi_host',
default='',
help=_('Arista EOS IP address. This is required field. '
'If not set, all communications to Arista EOS'
'will fail.')),
cfg.BoolOpt('use_fqdn',
default=True,
help=_('Defines if hostnames are sent to Arista EOS as FQDNs '
'("node1.domain.com") or as short names ("node1"). '
'This is optional. If not set, a value of "True" '
'is assumed.')),
cfg.IntOpt('sync_interval',
default=180,
help=_('Sync interval in seconds between Neutron plugin and '
'EOS. This interval defines how often the '
'synchronization is performed. This is an optional '
'field. If not set, a value of 180 seconds is '
'assumed.')),
cfg.StrOpt('region_name',
default='RegionOne',
help=_('Defines Region Name that is assigned to this OpenStack '
'Controller. This is useful when multiple '
'OpenStack/Neutron controllers are managing the same '
'Arista HW clusters. Note that this name must match '
'with the region name registered (or known) to keystone '
'service. Authentication with Keysotne is performed by '
'EOS. This is optional. If not set, a value of '
'"RegionOne" is assumed.'))
]
cfg.CONF.register_opts(ARISTA_DRIVER_OPTS, "ml2_arista")
|
pratiknarang/SMADES
|
refs/heads/master
|
PreProcess/FilterPackets.py
|
2
|
## Module to obtain packet data from a pcap/dump file
## and save it in csv format using tshark.
## Filenames of input pcap files are taken from InputFiles.txt
## Tshark options are present in TsharkOptions.txt
## TsharkOptions.txt should not contain the -r option.
## usage: python FilterPackets.py
#import global constants
from P2P_CONSTANTS import *
from FilterPacketsHelper import *
import multiprocessing as MP
import subprocess
#execute a shell command as a child process
def executeCommand(command,outfilename):
sem.acquire()
subprocess.call(command, shell = True)
infile = open(outfilename, 'r')
data = [eachline.strip() for eachline in infile]
infile.close()
data = preprocess(data)
outfile = open(outfilename,'w')
for eachcomponent in data:
outfile.write(eachcomponent)
outfile.close()
print 'done processing : ' + outfilename
sem.release()
#obtain input parameters and pcapfilenames
inputfiles = getPCapFileNames()
tsharkOptions = getTsharkOptions()
#create a semaphore so as not to exceed threadlimit
sem = MP.Semaphore(THREADLIMIT)
#get tshark commands to be executed
for filename in inputfiles:
print filename
(command,outfilename) = contructTsharkCommand(filename,tsharkOptions)
task = MP.Process(target = executeCommand, args = (command, outfilename,))
task.start()
|
fyffyt/scikit-learn
|
refs/heads/master
|
sklearn/neighbors/tests/test_nearest_centroid.py
|
305
|
"""
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
|
nils-wisiol/pypuf
|
refs/heads/master
|
crp_learn.py
|
1
|
"""
This module is used for learning a PUF from known challenge-response pairs.
"""
import argparse
from pypuf import tools
from pypuf.learner.regression.logistic_regression import LogisticRegression
from pypuf.simulation.arbiter_based.ltfarray import LTFArray
def uint(val):
"""
Assures that the passed integer is positive.
"""
ival = int(val)
if ival <= 0:
raise argparse.ArgumentTypeError('{} is not a positive integer'.format(val))
return ival
def main():
"""
Learns and evaluates a PUF.
"""
parser = argparse.ArgumentParser()
parser.add_argument('n', type=uint,
help='challenge bits')
parser.add_argument('k', type=uint,
help='number of arbiter chains')
parser.add_argument('num_tr', type=uint,
help='number of CRPs to use for training')
parser.add_argument('num_te', type=uint,
help='number of CRPs to use for testing')
parser.add_argument('file', type=str,
help='file to read CRPs from')
parser.add_argument('-1', '--11-notation', dest='in_11_notation',
action='store_true', default=False,
help='file is in -1,1 notation (default is 0,1)')
args = parser.parse_args()
# read pairs from file
training_set = tools.parse_file(args.file, args.n, 1, args.num_tr,
args.in_11_notation)
testing_set = tools.parse_file(args.file, args.n, args.num_tr + 1,
args.num_te, args.in_11_notation)
# create the learner
lr_learner = LogisticRegression(
t_set=training_set,
n=args.n,
k=args.k,
transformation=LTFArray.transform_atf,
combiner=LTFArray.combiner_xor,
)
# learn and test the model
model = lr_learner.learn()
accuracy = 1 - tools.approx_dist_nonrandom(model, testing_set)
# output the result
print('Learned a {}-bit {}-xor XOR Arbiter PUF from {} CRPs with accuracy {}'
.format(args.n, args.k, args.num_tr, accuracy))
if __name__ == '__main__':
main()
|
jolyonb/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/oauth_dispatch/management/commands/create_dot_application.py
|
1
|
"""
Management command for creating a Django OAuth Toolkit Application model.
Also creates an oauth_dispatch application access if scopes are provided.
"""
from __future__ import absolute_import, unicode_literals
import logging
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from oauth2_provider.models import get_application_model
from openedx.core.djangoapps.oauth_dispatch.models import ApplicationAccess
logger = logging.getLogger(__name__)
Application = get_application_model()
class Command(BaseCommand):
"""
Creates a Django OAuth Toolkit (DOT) Application Instance.
"""
help = "Creates a Django OAuth Toolkit (DOT) Application Instance."
def add_arguments(self, parser):
grant_type_choices = [grant_type[0] for grant_type in Application.GRANT_TYPES]
parser.add_argument('name',
action='store',
help='The name of this DOT Application')
parser.add_argument('username',
action='store',
help='The name of the LMS user associated with this DOT Application')
parser.add_argument('--grant-type',
action='store',
dest='grant_type',
default=Application.GRANT_CLIENT_CREDENTIALS,
choices=grant_type_choices,
help='The type of authorization this application can grant')
parser.add_argument('--redirect-uris',
action='store',
dest='redirect_uris',
default='',
help='The redirect URI(s) for this application. Multiple URIs should be space separated.')
parser.add_argument('--public',
action='store_true',
dest='public',
default=False,
help='Make the application public? Confidential by default.')
parser.add_argument('--skip-authorization',
action='store_true',
dest='skip_authorization',
help='Skip the in-browser user authorization? False by default.')
parser.add_argument('--client-id',
action='store',
dest='client_id',
default='',
help='The client_id for this application. If omitted, one will be generated.')
parser.add_argument('--client-secret',
action='store',
dest='client_secret',
default='',
help='The client_secret for this application. If omitted, one will be generated.')
parser.add_argument('--scopes',
action='store',
dest='scopes',
default='',
help='Comma-separated list of scopes that this application will be allowed to request.')
def _create_application_access(self, application, scopes):
"""
If scopes are supplied, creates an oauth_dispatch ApplicationAccess for the provided
scopes and DOT application.
"""
if not scopes:
return
if ApplicationAccess.objects.filter(application_id=application.id).exists():
logger.info('Application access for application {} already exists.'.format(
application.name,
))
return
application_access = ApplicationAccess.objects.create(
application_id=application.id,
scopes=scopes,
)
application_access.save()
logger.info('Created application access for {} with scopes: {}'.format(
application.name,
application_access.scopes,
))
def handle(self, *args, **options):
app_name = options['name']
username = options['username']
grant_type = options['grant_type']
redirect_uris = options['redirect_uris']
skip_authorization = options['skip_authorization']
client_type = Application.CLIENT_PUBLIC if options['public'] else Application.CLIENT_CONFIDENTIAL
client_id = options['client_id']
client_secret = options['client_secret']
scopes = options['scopes']
user = User.objects.get(username=username)
if Application.objects.filter(user=user, name=app_name).exists():
logger.info('Application with name {} and user {} already exists.'.format(
app_name,
username
))
application = Application.objects.get(user=user, name=app_name)
self._create_application_access(application, scopes)
return
create_kwargs = dict(
name=app_name,
user=user,
redirect_uris=redirect_uris,
client_type=client_type,
authorization_grant_type=grant_type,
skip_authorization=skip_authorization
)
if client_id:
create_kwargs['client_id'] = client_id
if client_secret:
create_kwargs['client_secret'] = client_secret
application = Application.objects.create(**create_kwargs)
application.save()
logger.info('Created {} application with id: {}, client_id: {}, and client_secret: {}'.format(
app_name,
application.id,
application.client_id,
application.client_secret
))
self._create_application_access(application, scopes)
|
ddico/odoo
|
refs/heads/master
|
addons/test_base_automation/models/test_base_automation.py
|
6
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from dateutil import relativedelta
from odoo import fields, models, api
class LeadTest(models.Model):
_name = "base.automation.lead.test"
_description = "Automated Rule Test"
name = fields.Char(string='Subject', required=True, index=True)
user_id = fields.Many2one('res.users', string='Responsible')
state = fields.Selection([('draft', 'New'), ('cancel', 'Cancelled'), ('open', 'In Progress'),
('pending', 'Pending'), ('done', 'Closed')],
string="Status", readonly=True, default='draft')
active = fields.Boolean(default=True)
partner_id = fields.Many2one('res.partner', string='Partner')
date_action_last = fields.Datetime(string='Last Action', readonly=True)
employee = fields.Boolean(compute='_compute_employee_deadline', store=True)
line_ids = fields.One2many('base.automation.line.test', 'lead_id')
priority = fields.Boolean()
deadline = fields.Boolean(compute='_compute_employee_deadline', store=True)
is_assigned_to_admin = fields.Boolean(string='Assigned to admin user')
@api.depends('partner_id.employee', 'priority')
def _compute_employee_deadline(self):
# this method computes two fields on purpose; don't split it
for record in self:
record.employee = record.partner_id.employee
if not record.priority:
record.deadline = False
else:
record.deadline = record.create_date + relativedelta.relativedelta(days=3)
def write(self, vals):
result = super().write(vals)
# force recomputation of field 'deadline' via 'employee': the action
# based on 'deadline' must be triggered
self.mapped('employee')
return result
class LineTest(models.Model):
_name = "base.automation.line.test"
_description = "Automated Rule Line Test"
name = fields.Char()
lead_id = fields.Many2one('base.automation.lead.test', ondelete='cascade')
user_id = fields.Many2one('res.users')
class ModelWithAccess(models.Model):
_name = "base.automation.link.test"
_description = "Automated Rule Link Test"
name = fields.Char()
linked_id = fields.Many2one('base.automation.linked.test', ondelete='cascade')
class ModelWithoutAccess(models.Model):
_name = "base.automation.linked.test"
_description = "Automated Rule Linked Test"
name = fields.Char()
another_field = fields.Char()
|
theguardian/headphones
|
refs/heads/master
|
lib/argparse.py
|
33
|
# Author: Steven J. Bethard <steven.bethard@gmail.com>.
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '1.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'ArgumentTypeError',
'FileType',
'HelpFormatter',
'ArgumentDefaultsHelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter',
'MetavarTypeHelpFormatter',
'Namespace',
'Action',
'ONE_OR_MORE',
'OPTIONAL',
'PARSER',
'REMAINDER',
'SUPPRESS',
'ZERO_OR_MORE',
]
import collections as _collections
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _, ngettext
def _callable(obj):
return hasattr(obj, '__call__') or hasattr(obj, '__bases__')
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = 'A...'
REMAINDER = '...'
_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([part
for part in part_strings
if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = set()
inserts = {}
for group in groups:
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
if start in inserts:
inserts[start] += ' ['
else:
inserts[start] = '['
inserts[end] = ']'
else:
if start in inserts:
inserts[start] += ' ('
else:
inserts[start] = '('
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
default = self._get_default_metavar_for_positional(action)
part = self._format_args(action, default)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
if '%(prog)' in text:
text = text % dict(prog=self._prog)
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
default = self._get_default_metavar_for_positional(action)
metavar, = self._metavar_formatter(action, default)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs == REMAINDER:
result = '...'
elif action.nargs == PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['%s' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
def _get_default_metavar_for_optional(self, action):
return action.dest.upper()
def _get_default_metavar_for_positional(self, action):
return action.dest
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
class MetavarTypeHelpFormatter(HelpFormatter):
"""Help message formatter which uses the argument 'type' as the default
metavar value (instead of the argument 'dest')
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_default_metavar_for_optional(self, action):
return action.type.__name__
def _get_default_metavar_for_positional(self, action):
return action.type.__name__
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
class ArgumentTypeError(Exception):
"""An error from trying to convert a command line string to a type."""
pass
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python objects.
Action objects are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include:
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for store actions must be > 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
version=None,
dest=SUPPRESS,
default=SUPPRESS,
help="show program's version number and exit"):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
version = self.version
if version is None:
version = parser.version
formatter = parser._get_formatter()
formatter.add_text(version)
parser.exit(message=formatter.format_help())
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, aliases, help):
metavar = dest = name
if aliases:
metavar += ' (%s)' % ', '.join(aliases)
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=dest, help=help,
metavar=metavar)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = _collections.OrderedDict()
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
aliases = kwargs.pop('aliases', ())
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, aliases, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
# make parser available under aliases also
for alias in aliases:
self._name_parser_map[alias] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
args = {'parser_name': parser_name,
'choices': ', '.join(self._name_parser_map)}
msg = _('unknown parser %(parser_name)r (choices: %(choices)s)') % args
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
# store any unrecognized options on the object, so that the top
# level parser can decide what to do with them
namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
if arg_strings:
vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=-1):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r') % self._mode
raise ValueError(msg)
# all other arguments are used as file names
try:
return open(string, self._mode, self._bufsize)
except IOError as e:
message = _("can't open '%s': %s")
raise ArgumentTypeError(message % (string, e))
def __repr__(self):
args = self._mode, self._bufsize
args_str = ', '.join(repr(arg) for arg in args if arg != -1)
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
def __contains__(self, key):
return key in self.__dict__
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default accessor methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
def get_default(self, dest):
for action in self._actions:
if action.dest == dest and action.default is not None:
return action.default
return self._defaults.get(dest, None)
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
if args and 'dest' in kwargs:
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
if not _callable(action_class):
raise ValueError('unknown action "%s"' % (action_class,))
action = action_class(**kwargs)
# raise an error if the action type is not callable
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
raise ValueError('%r is not callable' % (type_func,))
# raise an error if the metavar does not match the type
if hasattr(self, "_get_formatter"):
try:
self._get_formatter()._format_args(action, None)
except TypeError:
raise ValueError("length of metavar tuple does not match nargs")
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
args = {'option': option_string,
'prefix_chars': self.prefix_chars}
msg = _('invalid option string %(option)r: '
'must start with a character %(prefix_chars)r')
raise ValueError(msg % args)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if len(option_string) > 1:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
if not dest:
msg = _('dest= is required for options like %r')
raise ValueError(msg % option_string)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = ngettext('conflicting option string: %s',
'conflicting option strings: %s',
len(conflicting_actions))
conflict_string = ', '.join([option_string
for option_string, action
in conflicting_actions])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
self._mutually_exclusive_groups = container._mutually_exclusive_groups
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python objects.
Keyword Arguments:
- prog -- The name of the program (default: sys.argv[0])
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True):
if version is not None:
import warnings
warnings.warn(
"""The "version" argument to ArgumentParser is deprecated. """
"""Please use """
""""add_argument(..., action='version', version="N", ...)" """
"""instead""", DeprecationWarning)
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
default_prefix = '-' if '-' in prefix_chars else prefix_chars[0]
if self.add_help:
self.add_argument(
default_prefix+'h', default_prefix*2+'help',
action='help', default=SUPPRESS,
help=_('show this help message and exit'))
if self.version:
self.add_argument(
default_prefix+'v', default_prefix*2+'version',
action='version', default=SUPPRESS,
version=self.version,
help=_("show program's version number and exit"))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, str):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
try:
namespace, args = self._parse_known_args(args, namespace)
if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
return namespace, args
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# make sure all required actions were present
required_actions = [_get_action_name(action) for action in self._actions
if action.required and action not in seen_actions]
if required_actions:
self.error(_('the following arguments are required: %s') %
', '.join(required_actions))
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = []
for arg_line in args_file.read().splitlines():
for arg in self.convert_arg_line_to_args(arg_line):
arg_strings.append(arg)
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def convert_arg_line_to_args(self, arg_line):
return [arg_line]
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
default = ngettext('expected %s argument',
'expected %s arguments',
action.nargs) % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([self._get_nargs_pattern(action)
for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# if it's just a single character, it was meant to be positional
if len(arg_string) == 1:
return None
# if the option string before the "=" is present, return the action
if '=' in arg_string:
option_string, explicit_arg = arg_string.split('=', 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, explicit_arg in option_tuples])
args = {'option': arg_string, 'matches': options}
msg = _('ambiguous option: %(option)s could match %(matches)s')
self.error(msg % args)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow any number of options or arguments
elif nargs == REMAINDER:
nargs_pattern = '([-AO]*)'
# allow one argument followed by any number of options or arguments
elif nargs == PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs not in [PARSER, REMAINDER]:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, str):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# REMAINDER arguments convert all values, checking none
elif action.nargs == REMAINDER:
value = [self._get_value(action, v) for v in arg_strings]
# PARSER arguments convert all values, but check only the first
elif action.nargs == PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not _callable(type_func):
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# ArgumentTypeErrors indicate errors
except ArgumentTypeError:
name = getattr(action.type, '__name__', repr(action.type))
msg = str(_sys.exc_info()[1])
raise ArgumentError(action, msg)
# TypeErrors or ValueErrors also indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
args = {'type': name, 'value': arg_string}
msg = _('invalid %(type)s value: %(value)r')
raise ArgumentError(action, msg % args)
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
args = {'value': value,
'choices': ', '.join(map(repr, action.choices))}
msg = _('invalid choice: %(value)r (choose from %(choices)s)')
raise ArgumentError(action, msg % args)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
import warnings
warnings.warn(
'The format_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
if file is None:
file = _sys.stdout
self._print_message(self.format_help(), file)
def print_version(self, file=None):
import warnings
warnings.warn(
'The print_version method is deprecated -- the "version" '
'argument to ArgumentParser is no longer supported.',
DeprecationWarning)
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
self._print_message(message, _sys.stderr)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
args = {'prog': self.prog, 'message': message}
self.exit(2, _('%(prog)s: error: %(message)s\n') % args)
|
gwq5210/litlib
|
refs/heads/master
|
thirdparty/sources/protobuf/python/google/protobuf/pyext/__init__.py
|
401
|
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
|
pressel/mpi4py
|
refs/heads/master
|
demo/mpi-ref-v1/ex-3.09.py
|
12
|
from mpi4py import MPI
try:
import numpy
except ImportError:
raise SystemExit
# transpose a matrix a into b
a = numpy.empty((100, 100), dtype=float, order='fortran')
b = numpy.empty((100, 100), dtype=float, order='fortran')
a.flat = numpy.arange(a.size, dtype=float)
lb, sizeofdouble = MPI.DOUBLE.Get_extent()
# create datatype dor one row
# (vector with 100 double entries and stride 100)
row = MPI.DOUBLE.Create_vector(100, 1, 100)
# create datatype for matrix in row-major order
# (one hundred copies of the row datatype, strided one word
# apart; the succesive row datatypes are interleaved)
xpose = row.Create_hvector(100, 1, sizeofdouble)
xpose.Commit()
# send matrix in row-major order and receive in column major order
abuf = (a, xpose)
bbuf = (b, MPI.DOUBLE)
myrank = MPI.COMM_WORLD.Get_rank()
status = MPI.Status()
MPI.COMM_WORLD.Sendrecv(abuf, myrank, 0, bbuf, myrank, 0, status)
assert numpy.allclose(a, b.transpose())
assert status.Get_count(xpose) == 1
assert status.Get_count(MPI.DOUBLE) == b.size
row.Free()
xpose.Free()
|
pjdufour/geonode
|
refs/heads/master
|
geonode/messaging/management/commands/purgemessaging.py
|
5
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.core.management.base import BaseCommand
from geonode.messaging.queues import queue_email_events, queue_geoserver_events, \
queue_notifications_events, queue_all_events, \
queue_geoserver_catalog, queue_geoserver_data, \
queue_geoserver, queue_layer_viewers
class Command(BaseCommand):
help = 'Start the MQ consumer to perform non blocking tasks'
def handle(self, **options):
queue_geoserver_events.purge()
queue_notifications_events.purge()
queue_email_events.purge()
queue_all_events.purge()
queue_geoserver_catalog.purge()
queue_geoserver_data.purge()
queue_geoserver.purge()
queue_layer_viewers.purge()
|
tecwebjoao/TecWeb-TF-2T-B-SI
|
refs/heads/master
|
venv/Lib/encodings/euc_jisx0213.py
|
816
|
#
# euc_jisx0213.py: Python Unicode Codec for EUC_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
gtko/CouchPotatoServer
|
refs/heads/develop
|
libs/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py
|
2057
|
try:
# Python 3.2+
from ssl import CertificateError, match_hostname
except ImportError:
try:
# Backport of the function from a pypi module
from backports.ssl_match_hostname import CertificateError, match_hostname
except ImportError:
# Our vendored copy
from ._implementation import CertificateError, match_hostname
# Not needed, but documenting what we provide.
__all__ = ('CertificateError', 'match_hostname')
|
smi96/django-blog_website
|
refs/heads/master
|
lib/python2.7/site-packages/django/db/migrations/__init__.py
|
826
|
from .migration import Migration, swappable_dependency # NOQA
from .operations import * # NOQA
|
renyi533/tensorflow
|
refs/heads/master
|
tensorflow/python/autograph/converters/logical_expressions_test.py
|
20
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for logical_expressions module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import logical_expressions
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class LogicalExpressionTest(converter_testing.TestCase):
@test_util.run_deprecated_v1
def test_equals(self):
def test_fn(a, b):
return a == b
with self.converted(test_fn, logical_expressions, {}) as result:
with self.cached_session() as sess:
self.assertTrue(sess.run(result.test_fn(constant_op.constant(1), 1)))
self.assertFalse(sess.run(result.test_fn(constant_op.constant(1), 2)))
@test_util.run_deprecated_v1
def test_bool_ops(self):
def test_fn(a, b, c):
return (a or b) and (a or b or c) and not c
with self.converted(test_fn, logical_expressions, {}) as result:
with self.cached_session() as sess:
self.assertTrue(
sess.run(result.test_fn(constant_op.constant(True), False, False)))
self.assertFalse(
sess.run(result.test_fn(constant_op.constant(True), False, True)))
@test_util.run_deprecated_v1
def test_comparison(self):
def test_fn(a, b, c, d):
return a < b == c > d
with self.converted(test_fn, logical_expressions, {}) as result:
with self.cached_session() as sess:
# Note: having just the first constant a tensor tests that the
# operations execute in the correct order. If anything other than
# a < b executed first, the result would be a Python scalar and not a
# Tensor. This is valid as long as the dispat is automatic based on
# type.
self.assertTrue(
sess.run(result.test_fn(constant_op.constant(1), 2, 2, 1)))
self.assertFalse(
sess.run(result.test_fn(constant_op.constant(1), 2, 2, 3)))
def test_default_ops(self):
def test_fn(a, b):
return a in b
with self.converted(test_fn, logical_expressions, {}) as result:
self.assertTrue(result.test_fn('a', ('a',)))
def test_unary_ops(self):
def test_fn(a):
return ~a, -a, +a
with self.converted(test_fn, logical_expressions, {}) as result:
self.assertEqual(result.test_fn(1), (-2, -1, 1))
if __name__ == '__main__':
test.main()
|
Garrett-R/scikit-learn
|
refs/heads/master
|
examples/tree/plot_tree_regression.py
|
40
|
"""
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_1.fit(X, y)
clf_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
|
gofflab/neuron-seq-site
|
refs/heads/master
|
pyramidal/urls.py
|
1
|
from django.conf.urls import patterns, url
from pyramidal import views
urlpatterns = patterns('',
#Index
url(r'^$',views.index,name='index'),
#Geneset Views
url(r'^geneset/(?P<gene_list>[a-zA-Z0-9_\-\.\+]+)/?$',views.geneset,name='gene_set'),
#Isoform Views
url(r'^genes?/(?P<gene_id>[\w.-]+)/isoforms?/?$',views.geneIsoforms,name='isoform_index'),
url(r'^genes?/(?P<gene_id>[\w.-]+)/isoforms?/(?P<isoform_id>[\w.]+)/?$',views.isoformDetail,name='isoform_show'),
#Isoform Data
url(r'^genes?/(?P<gene_id>[\w.-]+)/isoforms?/(?P<isoform_id>[\w.]+)/hivedata/?$',views.isoformHiveData,name='isoform_hive_data'),
url(r'^genes?/(?P<gene_id>[\w.-]+)/isoforms?/(?P<isoform_id>[\w.]+)/expression/?$',views.isoformExpression,name='isoform_expression'),
#Gene detail view
url(r'^genes?/(?P<gene_id>[\w.-]+)/?$',views.geneShow,name='gene_show'),
#Gene Data
url(r'^genes?/(?P<gene_id>[\w.-]+)/hivedata/?$',views.geneHiveData,name='gene_hive_data'),
url(r'^genes?/(?P<gene_id>[\w.-]+)/expression/?$',views.geneExpression,name='gene_expression'),
# #Gene Data
# url(r'^genes?/(?P<gene_id>[\w.-]+)/hivedata/?$',views.geneHiveData,name='gene_hive_data'),
# url(r'^genes?/(?P<gene_id>[\w.-]+)/expression/?$',views.geneExpression,name='gene_expression'),
#All Genes
url(r'^genes/?$',views.geneIndex,name='gene_index'),
#Cluster Views
url(r'^clusters/?$',views.clusterIndex,name='cluster_index'),
url(r'^clusters/(?P<cluster_id>\d+)/?$',views.clusterShow,name='cluster_show'),
#Search
url(r'^search/?$', views.search, name = 'search'),
#Dev
url(r'^dev/$',views.dev),
#Markers
url(r'^markers/?$',views.markers,name = 'markers'),
#Supplement
url(r'^supp/?$',views.supplement,name = 'supplement'),
#TFBS
url(r'^tfbs/?$',views.tfbs,name = 'tfbs'),
#help
url(r'^help/?$',views.help,name = 'help'),
#Devel
url(r'^devel/?$',views.devel,name='devel'),
#About
url(r'^about/?$',views.about,name='about'),
)
|
vaniakov/twisted-intro
|
refs/heads/master
|
twisted-deferred/defer-10.py
|
11
|
from twisted.internet.defer import Deferred
print """
This example illustrates how callbacks in a deferred
chain can return deferreds themselves.
"""
# three simple callbacks
def callback_1(res):
print 'callback_1 got', res
return 1
def callback_2(res):
print 'callback_2 got', res
return 2
def callback_3(res):
print 'callback_3 got', res
return 3
# We add them all to a deferred and fire it:
d = Deferred()
d.addCallback(callback_1)
d.addCallback(callback_2)
d.addCallback(callback_3)
print """
Here we are firing a deferred with three callbacks that just print
their argument and return simple values:
"""
d.callback(0)
# And you get output like this:
# callback_1 got 0
# callback_2 got 1
# callback_3 got 2
# Now we make a callback that returns a deferred:
deferred_2 = None # NOTE: because we aren't using a reactor, we have
# to fire this deferred from the 'outside'.
# We store it in a global variable for this
# purpose. In a normal Twisted program you
# would never store a deferred in a global or
# fire it from the outside. By 'outside' we
# mean the deferred is not being fired by an
# action set in motion by the callback that
# created and returned the deferred, as is
# normally the case.
def callback_2_async(res):
print 'callback_2 got', res
global deferred_2 # never do this in a real program
deferred_2 = Deferred()
return deferred_2
# We do the same thing, but use the async callback:
d = Deferred()
d.addCallback(callback_1)
d.addCallback(callback_2_async)
d.addCallback(callback_3)
print """
Here we are firing a deferred as above but the middle callback is
returning a deferred:
"""
d.callback(0)
# And you get output like this:
# callback_1 got 0
# callback_2 got 1
print """
Notice the output from the third callback is missing. That's because
the second callback returned a deferred and now the 'outer' deferred
is paused. It's not waiting in a thread or anything like that, it just
stopped invoking the callbacks in the chain. Instead, it registered
some callbacks on the 'inner' deferred which will start the outer
deferred back up when the inner deferred is fired.
We can see this in action by firing the inner deferred:
"""
deferred_2.callback(2)
# And you get output like this:
# callback_3 got 2
print """
Note the argument to the inner deferred's callback() method became
the result passed to the next callback in the outer deferred.
"""
|
sanger-pathogens/circlator
|
refs/heads/master
|
circlator/tasks/fixstart.py
|
2
|
import argparse
import circlator
def run():
parser = argparse.ArgumentParser(
description = 'Change start point of each sequence in assembly',
usage = 'circlator fixstart [options] <assembly.fasta> <outprefix>')
parser.add_argument('--genes_fa', help='FASTA file of genes to search for to use as start point. If this option is not used, a built-in set of dnaA genes is used', metavar='FILENAME')
parser.add_argument('--ignore', help='Absolute path to file of IDs of contigs to not change', metavar='FILENAME')
parser.add_argument('--mincluster', type=int, help='The -c|mincluster option of promer. If this option is used, it overrides promer\'s default value', metavar='INT')
parser.add_argument('--min_id', type=float, help='Minimum percent identity of promer match between contigs and gene(s) to use as start point [%(default)s]', default=70, metavar='FLOAT')
parser.add_argument('--verbose', action='store_true', help='Be verbose')
parser.add_argument('assembly_fa', help='Name of input FASTA file', metavar='assembly.fasta')
parser.add_argument('outprefix', help='Prefix of output files')
options = parser.parse_args()
fixer = circlator.start_fixer.StartFixer(
options.assembly_fa,
options.outprefix,
min_percent_identity=options.min_id,
promer_mincluster=options.mincluster,
genes_fa=options.genes_fa,
ignore=options.ignore,
verbose=options.verbose,
)
fixer.run()
|
OpenAgInitiative/gro-api
|
refs/heads/master
|
gro_api/plants/migrations/0002_auto_20150819_0025.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('plants', '0001_squashed_0012_auto_20150812_0250'),
]
operations = [
migrations.AlterModelOptions(
name='harvestevent',
options={},
),
migrations.AlterModelOptions(
name='plant',
options={},
),
migrations.AlterModelOptions(
name='plantcomment',
options={},
),
migrations.AlterModelOptions(
name='plantmodel',
options={},
),
migrations.AlterModelOptions(
name='planttype',
options={},
),
migrations.AlterModelOptions(
name='sowevent',
options={},
),
migrations.AlterModelOptions(
name='transferevent',
options={},
),
]
|
rahul-c1/scrapy
|
refs/heads/master
|
scrapy/contrib/spidermiddleware/referer.py
|
177
|
"""
RefererMiddleware: populates Request referer field, based on the Response which
originated it.
"""
from scrapy.http import Request
from scrapy.exceptions import NotConfigured
class RefererMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
if not crawler.settings.getbool('REFERER_ENABLED'):
raise NotConfigured
return cls()
def process_spider_output(self, response, result, spider):
def _set_referer(r):
if isinstance(r, Request):
r.headers.setdefault('Referer', response.url)
return r
return (_set_referer(r) for r in result or ())
|
wpf710/app-proxy
|
refs/heads/master
|
jumpgate/config.py
|
4
|
from jumpgate.common import config
config.configure()
CONF = config.CONF
|
vinthony/jiandan-raccoon
|
refs/heads/master
|
scrapy_jiandan/items.py
|
1
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ScrapyJiandanItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
link = scrapy.Field()
desp = scrapy.Field()
|
Kapim/ar-table-itable
|
refs/heads/master
|
art_projected_gui/src/art_projected_gui/items/program_item.py
|
6
|
#!/usr/bin/env python
from PyQt4 import QtGui, QtCore
from item import Item
from art_msgs.msg import LearningRequestGoal
from geometry_msgs.msg import Point32, Pose
from button_item import ButtonItem
from art_projected_gui.helpers import conversions
from list_item import ListItem
from art_projected_gui.helpers.items import group_enable, group_visible
from geometry_msgs.msg import PoseStamped
from desc_item import DescItem
import rospkg
translate = QtCore.QCoreApplication.translate
rospack = rospkg.RosPack()
icons_path = rospack.get_path('art_projected_gui') + '/icons/'
class ProgramItem(Item):
def __init__(
self,
scene,
x,
y,
program_helper,
instruction,
ih,
done_cb=None,
item_switched_cb=None,
learning_request_cb=None,
pause_cb=None,
cancel_cb=None,
stopped=False,
visualize=False,
v_visualize_cb=None,
v_back_cb=None,
vis_pause_cb=None,
vis_stop_cb=None,
vis_replay_cb=None,
vis_back_to_blocks_cb=None):
self.w = 100
self.h = 100
self.instruction = instruction
self.ih = ih
self.done_cb = done_cb
self.item_switched_cb = item_switched_cb
self.learning_request_cb = learning_request_cb
self.pause_cb = pause_cb
self.cancel_cb = cancel_cb
self.readonly = False
self.stopped = stopped
# variables for HoloLens visualization
self.visualize = visualize
self.visualization_paused = False
# callbacks for visualization buttons
self.v_visualize_cb = v_visualize_cb
self.v_back_cb = v_back_cb
self.vis_pause_cb = vis_pause_cb
self.vis_stop_cb = vis_stop_cb
self.vis_replay_cb = vis_replay_cb
self.vis_back_to_blocks_cb = vis_back_to_blocks_cb
super(ProgramItem, self).__init__(scene, x, y)
self.title = DescItem(self.scene(), 0, 0, self)
# TODO it should take coords given to __init__
self.title.setPos(QtCore.QPointF(self.m2pix(0.01), self.m2pix(0.01)))
self.w = self.m2pix(0.2)
self.h = self.m2pix(0.25)
self.sp = self.m2pix(0.005)
self.ph = program_helper
self.block_id = None
self.item_id = None
self.block_learned = False
self.program_learned = False
# block "view"
self.block_finished_btn = ButtonItem(self.scene(), 0, 0, "BTN", self, self.block_finished_btn_cb,
image_path=icons_path + "back.svg")
self.block_edit_btn = ButtonItem(self.scene(), 0, 0, "BTN", self, self.block_edit_btn_cb,
image_path=icons_path + "edit.svg")
self.block_on_success_btn = ButtonItem(self.scene(), 0, 0, "BTN", self, self.block_on_success_btn_cb,
image_path=icons_path + "success.svg")
self.block_on_failure_btn = ButtonItem(self.scene(), 0, 0, "BTN", self, self.block_on_failure_btn_cb,
image_path=icons_path + "failure.svg")
# block "view" when in visualization
self.program_visualize_btn = ButtonItem(self.scene(), 0, 0, translate(
"ProgramItem", "Visualize Program"), self, self.program_visualize_btn_cb)
self.block_visualize_btn = ButtonItem(self.scene(), 0, 0, translate(
"ProgramItem", "Visualize Block"), self, self.block_visualize_btn_cb)
self.block_back_btn = ButtonItem(self.scene(), 0, 0, translate(
"ProgramItem", "Back"), self, self.block_back_btn_cb)
bdata = []
self.blocks_map = {} # map from indexes (key) to block_id (value)
self.blocks_map_rev = {}
for i in range(len(self.ph.get_program().blocks)):
bmsg = self.ph.get_program().blocks[i]
bdata.append(translate("ProgramItem", "Block %1\n%2\nSuccess: %3, failure: %4").arg(bmsg.id).arg(bmsg.name).
arg(bmsg.on_success).arg(bmsg.on_failure))
idx = len(bdata) - 1
self.blocks_map[idx] = bmsg.id
self.blocks_map_rev[bmsg.id] = idx
self.blocks_list = ListItem(self.scene(), 0, 0, 0.2 - 2 * 0.005, bdata, self.block_selected_cb, parent=self)
for k, v in self.blocks_map.iteritems():
self._update_block(v)
y = self.title.mapToParent(self.title.boundingRect().bottomLeft()).y()
self.blocks_list.setPos(self.sp, y)
y += self.blocks_list._height() + self.sp
if visualize:
self._place_childs_horizontally(y, self.sp, [
self.program_visualize_btn, self.block_visualize_btn, self.block_back_btn])
y += self.block_visualize_btn._height() + self.sp
self.block_back_btn.set_enabled(True)
self.block_visualize_btn.set_enabled(False)
self.program_visualize_btn.set_enabled(True)
# hide edit block buttons
group_visible((self.block_finished_btn, self.block_edit_btn, self.block_on_failure_btn,
self.block_on_success_btn), False)
else:
self._place_childs_horizontally(y, self.sp, [
self.block_edit_btn, self.block_on_success_btn, self.block_on_failure_btn, self.block_finished_btn])
y += self.block_finished_btn._height() + self.sp
group_enable((self.block_edit_btn, self.block_on_failure_btn, self.block_on_success_btn), False)
# hide visualization block buttons
group_visible((self.block_visualize_btn, self.program_visualize_btn, self.block_back_btn), False)
self.h = y
# items "view"
self.item_edit_btn = ButtonItem(self.scene(), 0, 0, translate(
"ProgramItem", "Ed"), self, self.item_edit_btn_cb, image_path=icons_path + "edit.svg")
self.item_run_btn = ButtonItem(self.scene(), 0, 0, "BTN", self, self.item_run_btn_cb,
image_path=icons_path + "run.svg")
self.item_on_success_btn = ButtonItem(self.scene(), 0, 0, "BTN", self, self.item_on_success_btn_cb,
image_path=icons_path + "success.svg")
self.item_on_failure_btn = ButtonItem(self.scene(), 0, 0, "BTN", self, self.item_on_failure_btn_cb,
image_path=icons_path + "failure.svg")
self.item_finished_btn = ButtonItem(self.scene(), 0, 0, "BTN", self, self.item_finished_btn_cb,
image_path=icons_path + "back.svg")
self.items_list = None
group_visible((self.item_finished_btn, self.item_run_btn,
self.item_on_success_btn, self.item_on_failure_btn, self.item_edit_btn), False)
# readonly (program running) "view"
self.pr_pause_btn = ButtonItem(self.scene(), 0, 0, "BTN", self, self.pr_pause_btn_cb,
image_path=icons_path + "pause.svg")
if self.stopped:
self.pr_pause_btn.set_image(icons_path + "run.svg")
self.pr_cancel_btn = ButtonItem(self.scene(), 0, 0, "BTN", self, self.pr_cancel_btn_cb,
image_path=icons_path + "stop.svg")
group_visible((self.pr_pause_btn, self.pr_cancel_btn), False)
# buttons for HoloLens visualization
self.vis_pause_btn = ButtonItem(self.scene(), 0, 0, translate(
"ProgramItem", "Resume"), self, self.vis_pause_btn_cb)
# quick hack .. init button with 'Resume' caption and switch back to
# 'Pause' to keep the button large enough for text switching
if not self.visualization_paused:
self.vis_pause_btn.set_caption(translate("ProgramItem", "Pause"))
self.vis_stop_btn = ButtonItem(self.scene(), 0, 0, translate(
"ProgramItem", "Stop"), self, self.vis_stop_btn_cb)
self.vis_replay_btn = ButtonItem(self.scene(), 0, 0, translate(
"ProgramItem", "Replay"), self, self.vis_replay_btn_cb)
self.vis_back_btn = ButtonItem(self.scene(), 0, 0, translate(
"ProgramItem", "Back to blocks"), self, self.vis_back_btn_cb)
group_visible((self.vis_pause_btn, self.vis_stop_btn, self.vis_replay_btn, self.vis_back_btn), False)
self.fixed = False
self.editing_item = False
self.edit_request = False
self.run_request = False
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable, True)
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)
self.setZValue(100)
self._update_learned()
self.update()
if self.item_switched_cb:
self.item_switched_cb(None, None, blocks=True)
def _update_title(self):
color = QtCore.Qt.white
if self.items_list is not None:
if not self.block_learned and not self.readonly:
color = QtCore.Qt.red
self.title.set_content(translate(
"ProgramItem",
"Program %1, block %2").arg(
self.ph.get_program_id()).arg(
self.block_id), scale=1.2, color=color)
else:
if not self.program_learned and not self.readonly:
color = QtCore.Qt.red
self.title.set_content(translate("ProgramItem", "Program %1").arg(self.ph.get_program_id()),
scale=1.2, color=color)
def pr_pause_btn_cb(self, btn):
if self.pause_cb is not None:
ret = self.pause_cb()
if ret:
# set disabled and wait for state update
self.set_enabled(False)
def pr_cancel_btn_cb(self, btn):
if self.cancel_cb is not None:
ret = self.cancel_cb()
if ret:
# set disabled and wait for state update
self.set_enabled(False)
def vis_pause_btn_cb(self, btn):
# callback which notifies HoloLens that pause/resume button was hit
if self.vis_pause_cb is not None:
self.vis_pause_cb(self.visualization_paused)
# if visualization is paused .. then resume it - e.g. hit RESUME button
if self.visualization_paused:
self.visualization_paused = False
self.vis_pause_btn.set_caption(translate("ProgramItem", "Pause"))
# or visualization is running .. then pause it - e.g. hit PAUSE button
else:
self.visualization_paused = True
self.vis_pause_btn.set_caption(translate("ProgramItem", "Resume"))
def vis_stop_btn_cb(self, btn):
# callback which notifies HoloLens that stop button was hit
if self.vis_stop_cb is not None:
self.vis_stop_cb()
# make sure that visualization is not paused and handle it's button caption properly
if self.visualization_paused:
self.visualization_paused = False
self.vis_pause_btn.set_caption(translate("ProgramItem", "Pause"))
group_enable((self.vis_stop_btn, self.vis_pause_btn), False)
group_enable((self.vis_replay_btn, self.vis_back_btn), True)
def vis_replay_btn_cb(self, btn):
# callback which notifies HoloLens that replay button was hit
if self.vis_replay_cb is not None:
self.vis_replay_cb()
group_enable((self.vis_stop_btn, self.vis_pause_btn), True)
group_enable((self.vis_replay_btn, self.vis_back_btn), False)
def vis_back_btn_cb(self, btn):
# callback which notifies HoloLens that visualization ended
if self.vis_back_to_blocks_cb is not None:
self.vis_back_to_blocks_cb()
# go back to blocks view from visualization
group_visible((self.block_visualize_btn, self.program_visualize_btn,
self.block_back_btn, self.blocks_list), True)
self.block_back_btn.set_enabled(True)
self.program_visualize_btn.set_enabled(True)
self.show_visualization_buttons(False)
self.block_selected_cb() # TODO extract method to set buttons to proper state
self.blocks_list.setEnabled(True)
self.scene().removeItem(self.items_list)
self.items_list = None
self.item_id = None
if self.item_switched_cb is not None:
self.item_switched_cb(*self.cid)
self.update()
def _update_learned(self):
if self.block_id is not None:
self.block_learned = self.ph.block_learned(self.block_id)
self.program_learned = self.ph.program_learned()
self._update_title()
def set_readonly(self, readonly):
self.readonly = readonly
if self.readonly:
if self.items_list is not None:
self.items_list.setVisible(True)
self.items_list.setEnabled(False)
self.blocks_list.set_enabled(False, True)
group_visible((self.block_finished_btn,
self.block_edit_btn, self.block_on_failure_btn, self.block_on_success_btn), False)
group_enable((self.pr_pause_btn, self.pr_cancel_btn), True)
else:
# TODO
pass
self.update()
def set_program_btns_enabled(self, state):
group_enable((self.pr_pause_btn, self.pr_cancel_btn), state)
def set_active(self, block_id, item_id):
# print "set_active", block_id, item_id
old_block_id = self.block_id
self.block_id = block_id
self.item_id = item_id
if old_block_id != self.block_id and item_id is not None:
# remove old list first
self.scene().removeItem(self.items_list)
self.items_list = None
# init new one
self._init_items_list()
if self.item_id is not None:
self.items_list.set_current_idx(
self.items_map_rev[self.item_id], select=True)
self._handle_item_btns()
group_visible((self.block_finished_btn,
self.block_edit_btn,
self.block_on_failure_btn,
self.block_on_success_btn,
self.block_visualize_btn,
self.program_visualize_btn,
self.block_back_btn,
self.blocks_list),
False)
else:
self.blocks_list.set_current_idx(
self.blocks_map_rev[self.block_id], select=True)
self._update_title()
def get_text_for_item(self, block_id, item_id):
item = self.ph.get_item_msg(block_id, item_id)
text = str(item.id)
text += " | "
# TODO deal with long strings
if item.name:
text += item.name
else:
text += self.ih[item.type].gui.learn.NAME
if len(item.ref_id) > 0:
if self.ph.item_has_nothing_to_set(block_id, item_id):
text += translate("ProgramItem", " (copy of %1)").arg(item.ref_id[0])
# else:
# text += translate("ProgramItem", " (refers to %1)").arg(', '.join(str(x) for x in item.ref_id))
if item.type in self.ih.properties.using_object:
(obj, ref_id) = self.ph.get_object(block_id, item_id)
text += "\n"
if self.ph.is_object_set(block_id, item_id):
obj_txt = obj[0]
else:
obj_txt = "??"
text += translate("ProgramItem", " Object type: %1").arg(obj_txt)
if ref_id != item_id:
text += translate("ProgramItem", " (same as in %1)").arg(ref_id)
# instruction-specific additional text
# TODO it should use different class when running?
text += self.ih[item.type].gui.learn.get_text(self.ph, block_id, item_id)
text += "\n"
text += translate("ProgramItem", " Success: %1, failure: %2").arg(item.on_success).arg(item.on_failure)
return text
def show_visualization_buttons(self, buttons_visible):
"""Shows or hides buttons for visualization mode for HoloLens"""
group_visible((self.vis_pause_btn, self.vis_stop_btn, self.vis_replay_btn, self.vis_back_btn), buttons_visible)
def _init_items_list(self):
idata = []
self.items_map = {} # map from indexes (key) to item_id (value)
self.items_map_rev = {}
bmsg = self.ph.get_block_msg(self.block_id)
for i in range(len(bmsg.items)):
item_id = bmsg.items[i].id
idata.append(self.get_text_for_item(self.block_id, item_id))
self.items_map[i] = item_id
self.items_map_rev[item_id] = i
self.items_list = ListItem(self.scene(
), 0, 0, 0.2 - 2 * 0.005, idata, self.item_selected_cb, parent=self)
for k, v in self.items_map.iteritems():
if self.ph.get_item_msg(self.block_id, v).type in self.ih.properties.runnable_during_learning:
self._update_item(self.block_id, v)
else:
self.items_list.items[k].set_enabled(False)
y = self.title.mapToParent(self.title.boundingRect().bottomLeft()).y() + self.sp
self.items_list.setPos(self.sp, y)
y += self.items_list._height() + self.sp
# in running state
if self.readonly:
self.items_list.setEnabled(False)
self._place_childs_horizontally(
y, self.sp, [self.pr_pause_btn, self.pr_cancel_btn])
y += self.pr_pause_btn._height() + 3 * self.sp
pr = (self.pr_pause_btn, self.pr_cancel_btn)
group_enable(pr, True)
group_visible((self.item_finished_btn, self.item_run_btn,
self.item_on_success_btn, self.item_on_failure_btn, self.item_edit_btn), False)
self.show_visualization_buttons(False)
# going to HoloLens visualization
elif self.visualize:
self.items_list.setEnabled(False)
self._place_childs_horizontally(
y, self.sp, [self.vis_pause_btn, self.vis_stop_btn, self.vis_replay_btn])
y += self.vis_back_btn._height() + self.sp
self._place_childs_horizontally(
y, self.sp, [self.vis_back_btn])
y += self.vis_back_btn._height() + 3 * self.sp
self.show_visualization_buttons(True)
group_enable((self.vis_pause_btn, self.vis_stop_btn), True)
self.vis_back_btn.set_enabled(False)
group_visible((self.pr_pause_btn, self.pr_cancel_btn), False)
group_visible((self.item_run_btn,
self.item_on_success_btn, self.item_on_failure_btn, self.item_edit_btn), False)
# in learning state
else:
btns = (self.item_edit_btn, self.item_run_btn, self.item_on_success_btn, self.item_on_failure_btn,
self.item_finished_btn)
self._place_childs_horizontally(y, self.sp, btns)
y += max(btn._height() for btn in btns)
y += self.sp
group_visible((self.item_finished_btn, self.item_run_btn,
self.item_on_success_btn, self.item_on_failure_btn, self.item_edit_btn), True)
self.item_finished_btn.setEnabled(True)
group_enable((self.item_run_btn, self.item_on_failure_btn,
self.item_on_success_btn, self.item_on_failure_btn), False)
group_visible((self.pr_pause_btn, self.pr_cancel_btn), False)
self.show_visualization_buttons(False)
self.h = y
self._update_title()
self.update()
if self.item_switched_cb:
self.item_switched_cb(self.block_id, self.item_id, blocks=False)
def block_edit_btn_cb(self, btn):
group_visible((self.block_finished_btn, self.block_edit_btn,
self.item_on_success_btn, self.block_on_failure_btn, self.block_on_success_btn,
self.blocks_list), False)
self._init_items_list()
def block_visualize_btn_cb(self, btn):
group_visible((self.block_visualize_btn, self.program_visualize_btn,
self.block_back_btn, self.blocks_list), False)
# callback which notifies HoloLens that visualization started
if self.v_visualize_cb is not None:
self.v_visualize_cb(visualize_whole_program=False)
self._init_items_list()
def program_visualize_btn_cb(self, btn):
group_visible((self.block_visualize_btn, self.program_visualize_btn,
self.block_back_btn, self.blocks_list), False)
# callback which notifies HoloLens that visualization started
if self.v_visualize_cb is not None:
self.v_visualize_cb(visualize_whole_program=True)
self.block_id = self.ph.get_first_block_id()
self._init_items_list()
# go back from block view visualization into main menu
def block_back_btn_cb(self, btn):
group_visible((self.block_visualize_btn, self.program_visualize_btn, self.block_back_btn), False)
# callback which notifies HoloLens that visualization ended
if self.v_back_cb is not None:
self.v_back_cb()
def block_selected_cb(self):
if self.blocks_list.selected_item_idx is not None:
self.block_id = self.blocks_map[self.blocks_list.selected_item_idx]
self.block_on_failure_btn.set_enabled(self.ph.get_block_on_failure(self.block_id) != 0)
self.block_on_success_btn.set_enabled(self.ph.get_block_on_success(self.block_id) != 0)
self.block_edit_btn.set_enabled(True)
self.block_visualize_btn.set_enabled(True)
if self.item_switched_cb:
self.item_switched_cb(self.block_id, None, blocks=True)
self._update_learned()
else:
self.block_id = None
self.item_id = None
self.block_edit_btn.set_enabled(False)
self.block_on_failure_btn.set_enabled(False)
self.block_on_success_btn.set_enabled(False)
self.block_visualize_btn.set_enabled(False)
if self.item_switched_cb is not None:
self.item_switched_cb(self.block_id, None, blocks=True)
@property
def cid(self):
"""Shortcut for accessing program item"""
return self.block_id, self.item_id
def _handle_item_btns(self):
# print ("_handle_item_btns, self.editing_item: " + str(self.editing_item))
if not self.editing_item:
of = self.ph.get_id_on_failure(*self.cid)
os = self.ph.get_id_on_success(*self.cid)
self.item_on_failure_btn.set_enabled(of[0] != 0 and not (of[0] == self.block_id and of[1] == self.item_id))
self.item_on_success_btn.set_enabled(os[0] != 0 and not (os[0] == self.block_id and os[1] == self.item_id))
self.item_run_btn.set_enabled(self._item_runnable())
self.item_edit_btn.set_enabled(self._item_editable())
else:
self.item_edit_btn.set_enabled(True)
self.item_edit_btn.set_image(icons_path + "save.svg")
group_enable((self.item_finished_btn, self.items_list), False)
self.item_run_btn.set_enabled(False)
group_visible((self.pr_cancel_btn, self.pr_pause_btn), False)
def _item_runnable(self):
if self.ph.item_requires_learning(*self.cid):
return self.ph.item_learned(*self.cid)
return self.ph.get_item_msg(*self.cid).type in self.ih.properties.runnable_during_learning
def _item_editable(self):
if not self.ph.item_requires_learning(*self.cid):
return False
if self.ph.item_takes_params_from_ref(*self.cid) and not self.ph.ref_params_learned(*self.cid):
return False
if self.ph.get_item_type(*self.cid) in self.ih.properties.place | self.ih.properties.ref_to_pick and not \
self.ph.ref_pick_learned(*self.cid)[0]:
return False
return True
def item_selected_cb(self):
# print ("self.items_list.selected_item_idx", self.items_list.selected_item_idx)
if self.items_list.selected_item_idx is not None:
self.item_id = self.items_map[self.items_list.selected_item_idx]
self._handle_item_btns()
self._update_learned()
else:
self.item_id = None
group_enable(
(self.item_run_btn, self.item_on_success_btn, self.item_on_failure_btn, self.item_edit_btn), False)
if self.item_switched_cb is not None:
self.item_switched_cb(*self.cid)
def block_on_failure_btn_cb(self, btn):
self.set_active(self.ph.get_block_on_failure(self.block_id), None)
def block_on_success_btn_cb(self, btn):
self.set_active(self.ph.get_block_on_success(self.block_id), None)
def block_finished_btn_cb(self, btn):
if self.done_cb is not None:
self.done_cb()
def item_finished_btn_cb(self, btn):
# go back to blocks view
group_visible((self.block_finished_btn, self.block_edit_btn,
self.block_on_failure_btn, self.block_on_success_btn, self.blocks_list), True)
group_visible((self.item_finished_btn, self.item_run_btn,
self.item_on_success_btn, self.item_on_failure_btn, self.item_edit_btn), False)
self.block_selected_cb() # TODO extract method to set buttons to proper state
self.blocks_list.setEnabled(True)
self.block_finished_btn.setEnabled(True)
self.scene().removeItem(self.items_list)
self.items_list = None
self.item_id = None
if self.item_switched_cb is not None:
self.item_switched_cb(*self.cid)
self._update_title()
self.update()
def item_on_failure_btn_cb(self, btn):
of = self.ph.get_id_on_failure(*self.cid)
self.set_active(*of)
if self.item_switched_cb is not None:
self.item_switched_cb(*of)
def item_on_success_btn_cb(self, btn):
of = self.ph.get_id_on_success(*self.cid)
self.set_active(*of)
if self.item_switched_cb is not None:
self.item_switched_cb(*of)
def item_run_btn_cb(self, btn):
self.run_request = True
self.set_enabled(False)
self.learning_request_cb(LearningRequestGoal.EXECUTE_ITEM)
def item_edit_btn_cb(self, btn):
self.edit_request = True
# call action / disable all, wait for result (callback), enable editing
if not self.editing_item:
self.learning_request_cb(LearningRequestGoal.GET_READY)
else:
self.learning_request_cb(LearningRequestGoal.DONE)
self.set_enabled(False)
def learning_request_result(self, success):
self.set_enabled(True)
# TODO no success, no editing
if self.edit_request:
self.edit_request = False
if not self.editing_item:
self.editing_item = True
self.item_edit_btn.set_image(icons_path + "save.svg")
group_enable((self.item_finished_btn, self.items_list,
self.item_on_failure_btn, self.item_on_success_btn), False)
else:
self.editing_item = False
self.item_edit_btn.set_image(icons_path + "edit.svg")
group_enable((self.item_finished_btn, self.items_list), True)
self._update_learned()
self._handle_item_btns()
elif self.run_request:
self.run_request = False
if self.item_switched_cb is not None:
self.item_switched_cb(
self.block_id, self.item_id, not self.editing_item)
def boundingRect(self):
return QtCore.QRectF(0, 0, self.w, self.h)
def set_place_pose(self, place):
msg = self.get_current_item()
assert len(msg.pose) > 0
msg.pose[0].pose.position.x = place.position[0]
msg.pose[0].pose.position.y = place.position[1]
msg.pose[0].pose.position.z = place.position[2]
msg.pose[0].pose.orientation = conversions.a2q(place.quaternion)
self._update_item()
'''
Method which saves place poses of all objects (in the grid) into the ProgramItem message.
'''
def set_place_poses(self, poses):
msg = self.get_current_item()
poses_count = len(poses)
msg_poses_count = len(msg.pose)
# TODO nemel by byt pocet objektu v gridu spis fixni (dany strukturou programu)?
if poses_count > msg_poses_count:
for i in range(poses_count - msg_poses_count):
ps = PoseStamped()
msg.pose.append(ps)
elif poses_count < msg_poses_count:
for i in range(msg_poses_count - poses_count):
msg.pose.pop()
for i, pose in enumerate(poses):
pos = pose.get_pos()
msg.pose[i].pose.position.x = pos[0]
msg.pose[i].pose.position.y = pos[1]
msg.pose[i].pose.orientation = conversions.yaw2quaternion(pose.rotation())
self._update_item()
def set_pose(self, ps):
msg = self.get_current_item()
assert len(msg.pose) > 0
msg.pose[0] = ps
self._update_item()
def update_pose(self, ps, idx):
msg = self.get_current_item()
msg.pose[idx] = ps
self._update_item()
def clear_poses(self):
for ps in self.get_current_item().pose:
ps.pose = Pose()
self._update_item()
def get_poses_count(self):
msg = self.get_current_item()
return len(msg.pose)
def set_object(self, obj):
msg = self.get_current_item()
assert len(msg.object) > 0
msg.object[0] = obj
self._update_item()
def set_polygon(self, pts):
msg = self.get_current_item()
assert len(msg.polygon) > 0
del msg.polygon[0].polygon.points[:]
for pt in pts:
msg.polygon[0].polygon.points.append(Point32(pt[0], pt[1], 0))
self._update_item()
'''
Method which saves 4 points forming a grid into the ProgramItem message.
'''
def set_place_grid(self, pts):
msg = self.get_current_item()
assert len(msg.polygon) > 0
del msg.polygon[0].polygon.points[:]
for pt in pts:
msg.polygon[0].polygon.points.append(Point32(pt[0], pt[1], 0))
self._update_item()
def _update_block(self, block_id):
idx = self.blocks_map_rev[block_id]
if self.ph.block_learned(block_id):
self.blocks_list.items[idx].set_background_color()
else:
self.blocks_list.items[idx].set_background_color(QtCore.Qt.red)
def _update_item(self, block_id=None, item_id=None):
if block_id is None:
block_id = self.block_id
# need to update all items in block as there might be various dependencies (ref_id)
for idx, item_id in self.items_map.iteritems():
if self.ph.item_learned(block_id, item_id) or \
(self.ph.get_item_msg(block_id, item_id).type in self.ih.properties.runnable_during_learning and
not self.ih.requires_learning(self.ph.get_item_msg(block_id, item_id).type)):
self.items_list.items[idx].set_background_color()
else:
self.items_list.items[idx].set_background_color(QtCore.Qt.red)
self.items_list.items[idx].set_caption(
self.get_text_for_item(block_id, item_id))
self._update_block(block_id)
def get_current_item(self):
if self.block_id is not None and self.item_id is not None:
return self.ph.get_item_msg(*self.cid)
return None
def paint(self, painter, option, widget):
if not self.scene():
return
painter.setClipRect(option.exposedRect)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
pen = QtGui.QPen()
pen.setStyle(QtCore.Qt.NoPen)
painter.setPen(pen)
painter.setBrush(QtCore.Qt.gray)
painter.setOpacity(0.5)
painter.drawRoundedRect(QtCore.QRect(0, 0, self.w, self.h), 5.0, 5.0)
|
mdaniel/intellij-community
|
refs/heads/master
|
python/testData/debug/test_python_subprocess_with_c_parameter.py
|
12
|
from __future__ import print_function
import subprocess
import sys
ret = subprocess.call([sys.executable, '-c', "from test_python_subprocess_helper import foo"], stderr=subprocess.PIPE)
print('The subprocess return code is %d' % ret)
|
maikroeder/grape.pipeline.runner
|
refs/heads/master
|
grape/pipeline/runner/__init__.py
|
1
|
"""
Runs Grape pipelines.
"""
import sys
import argparse
import logging
from grape.pipeline.runner.runner import Runner
def main():
print "start"
desc = 'Runs configured Grape pipelines'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--version', action='store_true',
default=False, help='Displays version and exits.')
args = parser.parse_args()
if args.version:
print(__version__)
sys.exit(0)
runner = Runner()
runner.start()
runner.stop()
sys.exit(0)
if __name__ == '__main__':
main()
|
gorodok11/ajenti
|
refs/heads/master
|
ajenti/plugins/core/download.py
|
17
|
import os.path
from ajenti.com import *
from ajenti.api import URLHandler, url
from ajenti.utils import wsgi_serve_file
from ajenti.plugmgr import PluginLoader
class Downloader(URLHandler, Plugin):
@url('^/dl/.+/.+')
def process_dl(self, req, start_response):
params = req['PATH_INFO'].split('/', 3)
self.log.debug('Dispatching download: %s'%req['PATH_INFO'])
path = PluginLoader.get_plugin_path(self.app, params[2])
file = os.path.join(path, params[2], 'files', params[3])
return wsgi_serve_file(req, start_response, file)
@url('^/htdocs/.+')
def process_htdocs(self, req, start_response):
params = req['PATH_INFO'].split('/', 2)
self.log.debug('Dispatching htdocs: %s'%req['PATH_INFO'])
path = self.app.config.get('ajenti', 'htdocs')
file = os.path.join(path, params[2])
file = os.path.normpath(os.path.realpath(file))
if not file.startswith(path):
start_response('404 Not Found', [])
return ''
return wsgi_serve_file(req, start_response, file)
|
UManPychron/pychron
|
refs/heads/develop
|
docs/user_guide/operation/scripts/examples/helix/measurement/felix_analysis120_60.py
|
2
|
#!Measurement
'''
baseline:
after: true
before: false
counts: 60
detector: H2
mass: 40.062
settling_time: 15.0
default_fits: nominal
equilibration:
eqtime: 1.0
inlet: H
inlet_delay: 3
outlet: V
use_extraction_eqtime: true
multicollect:
counts: 120
detector: H2
isotope: Ar40
peakcenter:
after: true
before: false
detector: H2
detectors:
- H1
- L2(CDD)
isotope: Ar40
integration_time: 1.048576
peakhop:
hops_name: ''
use_peak_hop: false
'''
ACTIVE_DETECTORS=('H2','H1','AX','L1','L2(CDD)')
def main():
info('unknown measurement script')
activate_detectors(*ACTIVE_DETECTORS)
if mx.peakcenter.before:
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
if mx.baseline.before:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
#sniff the gas during equilibration
if mx.equilibration.use_extraction_eqtime:
eqt = eqtime
else:
eqt = mx.equilibration.eqtime
'''
Equilibrate is non-blocking so use a sniff or sleep as a placeholder
e.g sniff(<equilibration_time>) or sleep(<equilibration_time>)
'''
equilibrate(eqtime=eqt, inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet,
delay=mx.equilibration.inlet_delay)
set_time_zero()
sniff(eqt)
set_fits()
set_baseline_fits()
#multicollect on active detectors
multicollect(ncounts=mx.multicollect.counts, integration_time=1.048576)
if mx.baseline.after:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
if mx.peakcenter.after:
activate_detectors(*mx.peakcenter.detectors, **{'peak_center':True})
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope,
integration_time=mx.peakcenter.integration_time)
if use_cdd_warming:
gosub('warm_cdd', argv=(mx.equilibration.outlet,))
info('finished measure script')
|
tupolev/plugin.video.mitele
|
refs/heads/master
|
lib/youtube_dl/extractor/voicerepublic.py
|
20
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
sanitized_Request,
)
class VoiceRepublicIE(InfoExtractor):
_VALID_URL = r'https?://voicerepublic\.com/(?:talks|embed)/(?P<id>[0-9a-z-]+)'
_TESTS = [{
'url': 'http://voicerepublic.com/talks/watching-the-watchers-building-a-sousveillance-state',
'md5': 'b9174d651323f17783000876347116e3',
'info_dict': {
'id': '2296',
'display_id': 'watching-the-watchers-building-a-sousveillance-state',
'ext': 'm4a',
'title': 'Watching the Watchers: Building a Sousveillance State',
'description': 'Secret surveillance programs have metadata too. The people and companies that operate secret surveillance programs can be surveilled.',
'thumbnail': 're:^https?://.*\.(?:png|jpg)$',
'duration': 1800,
'view_count': int,
}
}, {
'url': 'http://voicerepublic.com/embed/watching-the-watchers-building-a-sousveillance-state',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
req = sanitized_Request(
compat_urlparse.urljoin(url, '/talks/%s' % display_id))
# Older versions of Firefox get redirected to an "upgrade browser" page
req.add_header('User-Agent', 'youtube-dl')
webpage = self._download_webpage(req, display_id)
if '>Queued for processing, please stand by...<' in webpage:
raise ExtractorError(
'Audio is still queued for processing', expected=True)
config = self._search_regex(
r'(?s)return ({.+?});\s*\n', webpage,
'data', default=None)
data = self._parse_json(config, display_id, fatal=False) if config else None
if data:
title = data['title']
description = data.get('teaser')
talk_id = compat_str(data.get('talk_id') or display_id)
talk = data['talk']
duration = int_or_none(talk.get('duration'))
formats = [{
'url': compat_urlparse.urljoin(url, talk_url),
'format_id': format_id,
'ext': determine_ext(talk_url) or format_id,
'vcodec': 'none',
} for format_id, talk_url in talk['links'].items()]
else:
title = self._og_search_title(webpage)
description = self._html_search_regex(
r"(?s)<div class='talk-teaser'[^>]*>(.+?)</div>",
webpage, 'description', fatal=False)
talk_id = self._search_regex(
[r"id='jc-(\d+)'", r"data-shareable-id='(\d+)'"],
webpage, 'talk id', default=None) or display_id
duration = None
player = self._search_regex(
r"class='vr-player jp-jplayer'([^>]+)>", webpage, 'player')
formats = [{
'url': compat_urlparse.urljoin(url, talk_url),
'format_id': format_id,
'ext': determine_ext(talk_url) or format_id,
'vcodec': 'none',
} for format_id, talk_url in re.findall(r"data-([^=]+)='([^']+)'", player)]
self._sort_formats(formats)
thumbnail = self._og_search_thumbnail(webpage)
view_count = int_or_none(self._search_regex(
r"class='play-count[^']*'>\s*(\d+) plays",
webpage, 'play count', fatal=False))
return {
'id': talk_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
|
paolodedios/tensorflow
|
refs/heads/master
|
tensorflow/python/compiler/tensorrt/model_tests/run_models.py
|
4
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs sample models with TensorRT and analyzes latency and numerics information."""
import functools
import os
import tempfile
from typing import Callable, Iterable, Sequence
from absl import app
from absl import flags
from absl import logging
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.python.compiler.tensorrt.model_tests import model_handler
from tensorflow.python.compiler.tensorrt.model_tests import result_analyzer
from tensorflow.python.eager import context
from tensorflow.python.framework import config as framework_config
from tensorflow.python.framework import ops as framework_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test as platform_test
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
FLAGS = flags.FLAGS
flags.DEFINE_string(
"saved_model_dir",
platform_test.test_src_dir_path(
"python/compiler/tensorrt/model_tests/sample_model"),
"The directory to the testing SavedModel.")
flags.DEFINE_string("saved_model_signature_key",
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY,
"The signature key of the testing SavedModel being used.")
flags.DEFINE_multi_string("saved_model_tags", (tag_constants.SERVING,),
"The tags of the testing SavedModel being used.")
flags.DEFINE_integer("batch_size", 128,
"The batch size used to run the testing model with.")
flags.DEFINE_boolean("use_tf2", True,
"Whether to test with TF2 behavior or not (TF1).")
flags.DEFINE_boolean("use_int8", True,
"Whether to convert with INT8 precision.")
flags.DEFINE_enum("latency_baseline", "GPU", ["CPU", "GPU"],
"The baseline version for latency improvement analysis.")
flags.DEFINE_enum("numerics_baseline", "CPU", ["CPU", "GPU"],
"The baseline version for numerical difference analysis.")
flags.DEFINE_float(
"speedup_tolerance", 0.95,
"Log errors whenever mean TensorRT speedup is lower than the tolerance.")
flags.DEFINE_float(
"diff_tolerance", 0.05,
"Log errors whenever mean TensorRT relative difference is larger than "
"the tolerance.")
flags.DEFINE_integer(
"gpu_memory_limit_mb", None,
"Limitation on the device memory being used during TensorRT compilation "
"and inference.")
flags.DEFINE_string("output_dir", None, "Output directory of analysis results.")
flags.DEFINE_enum("output_format", "CSV", ["CSV", "JSON"],
"Output format of analysis results.")
DEFAUL_TRT_CONVERT_PARAMS = trt.DEFAULT_TRT_CONVERSION_PARAMS
# pylint: disable=bad-whitespace
def set_up_gpu_memory_limit(memory_limit_mb: int) -> None:
gpus = framework_config.list_physical_devices("GPU")
virtual_device_config = context.LogicalDeviceConfiguration(
memory_limit=memory_limit_mb)
for gpu in gpus:
framework_config.set_logical_device_configuration(gpu,
[virtual_device_config])
class SampleRunner(object):
"""The driver to run all sample models in all specified configurations."""
def __init__(self, saved_model_dir: str, saved_model_tags: Sequence[str],
saved_model_signature_key: str, batch_size: int, output_dir: str,
output_format: str, use_tf2: bool, use_int8: bool,
analyzer: result_analyzer.ResultAnalyzer):
self._output_dir = output_dir or tempfile.mkdtemp(
prefix="tf2trt_model_tests")
logging.info("Use output directory as: %s", self._output_dir)
self._output_format = output_format
# The model_configs contains (saved_model_dir, saved_model_signature_key,
# batch_size) for each model
self._configs = (model_handler.ModelConfig(
saved_model_dir=saved_model_dir,
saved_model_tags=tuple(saved_model_tags),
saved_model_signature_key=saved_model_signature_key,
default_batch_size=batch_size),)
self._model_handler_manager_cls = (
model_handler.ModelHandlerManagerV2
if use_tf2 else model_handler.ModelHandlerManagerV1)
if use_int8:
self._precision_modes = [
trt.TrtPrecisionMode.FP32, trt.TrtPrecisionMode.FP16,
trt.TrtPrecisionMode.INT8]
else:
self._precision_modes = [
trt.TrtPrecisionMode.FP32, trt.TrtPrecisionMode.FP16]
self._analyzer = analyzer
def _write_analysis_result(self, df: result_analyzer.DataFrame,
path: str) -> None:
if self._output_format == "CSV":
df.to_csv(os.path.join(path, "result.csv"))
elif self._output_format == "JSON":
df.to_json(os.path.join(path, "result.json"))
else:
raise NotImplementedError("Unsupported output format: {}".format(
self._output_format))
def _run_impl(
self, test_name: str,
default_trt_converter_params: trt.TrtConversionParams,
trt_converter_params_updater: Callable[[trt.TrtConversionParams],
Iterable[trt.TrtConversionParams]]
) -> None:
"""Runs all sample models based on a key varying parameter."""
for model_config in self._configs:
# Loads, compiles, calibrates and runs models.
manager = self._model_handler_manager_cls(
name=test_name,
model_config=model_config,
default_trt_convert_params=default_trt_converter_params,
trt_convert_params_updater=trt_converter_params_updater)
inputs = manager.generate_random_inputs()
# As all the data are randomly generated, directly use inference data as
# calibration data to produce reliable dynamic ranges.
manager.convert(inputs)
test_results = manager.run(inputs)
# Analyzes the latency and numerical results.
analysis_result_df, _ = self._analyzer.analysis(test_results)
# Outputs the analysis results
model_name = os.path.split(manager.model_config.saved_model_dir)[-1]
model_dir = os.path.join(self._output_dir, model_name)
gfile.MkDir(model_dir)
test_dir = os.path.join(model_dir, test_name)
gfile.MkDir(test_dir)
with gfile.Open(
os.path.join(test_dir, "default_tensorrt_params.txt"), "w") as f:
f.write(repr(default_trt_converter_params))
self._write_analysis_result(analysis_result_df, test_dir)
def run_trt_precision_tests(self) -> None:
"""Runs tests for all TensorRT precisions."""
def trt_converter_params_updater(params: trt.TrtConversionParams):
for precision_mode in self._precision_modes:
yield params._replace(
precision_mode=precision_mode,
use_calibration=(precision_mode == trt.TrtPrecisionMode.INT8))
self._run_impl(
test_name="precision_mode_test",
default_trt_converter_params=DEFAUL_TRT_CONVERT_PARAMS,
trt_converter_params_updater=trt_converter_params_updater)
def run_all_tests(self) -> None:
"""Runs all tests available."""
self.run_trt_precision_tests()
logging.info("Check analysis result at: %s", self._output_dir)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
os.environ["TF_TRT_ALLOW_ENGINE_NATIVE_SEGMENT_EXECUTION"] = "False"
if FLAGS.use_tf2:
logging.info("Running in TF2 mode. Eager execution is enabled.")
framework_ops.enable_eager_execution()
else:
logging.info("Running in TF1 mode. Eager execution is disabled.")
framework_ops.disable_eager_execution()
if FLAGS.use_int8:
logging.info("Will try converting with INT8 precision.")
else:
logging.info("Will not try converting with INT8 precision.")
if FLAGS.gpu_memory_limit_mb:
set_up_gpu_memory_limit(FLAGS.gpu_memory_limit_mb)
analyzer = result_analyzer.ResultAnalyzer(
use_cpu_latency_baseline=FLAGS.latency_baseline == "CPU",
use_cpu_numerics_baseline=FLAGS.numerics_baseline == "CPU",
checkers=[
functools.partial(
result_analyzer.check_column,
name="speedup",
fn=lambda x: x > FLAGS.speedup_tolerance),
functools.partial(
result_analyzer.check_column,
name="rel_diff_mean",
fn=lambda x: all(v < FLAGS.diff_tolerance for v in x.values()))
])
runner = SampleRunner(
saved_model_dir=FLAGS.saved_model_dir,
saved_model_tags=FLAGS.saved_model_tags,
saved_model_signature_key=FLAGS.saved_model_signature_key,
batch_size=FLAGS.batch_size,
output_dir=FLAGS.output_dir,
output_format=FLAGS.output_format,
use_tf2=FLAGS.use_tf2,
use_int8=FLAGS.use_int8,
analyzer=analyzer)
runner.run_all_tests()
if __name__ == "__main__":
app.run(main)
|
klahnakoski/MySQL-to-S3
|
refs/heads/master
|
vendor/mo_dots/nones.py
|
1
|
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from mo_dots import _setdefault, wrap, split_field
from mo_future import text_type, binary_type
_get = object.__getattribute__
_set = object.__setattr__
_zero_list = []
class NullType(object):
"""
Structural Null provides closure under the dot (.) operator
Null[x] == Null
Null.x == Null
Null INSTANCES WILL TRACK THEIR OWN DEREFERENCE PATH SO
ASSIGNMENT CAN BE DONE
"""
def __init__(self, obj=None, key=None):
"""
obj - VALUE BEING DEREFERENCED
key - THE dict ITEM REFERENCE (DOT(.) IS NOT ESCAPED)
"""
d = _get(self, "__dict__")
d["_obj"] = obj
d["__key__"] = key
def __bool__(self):
return False
def __nonzero__(self):
return False
def __add__(self, other):
if isinstance(other, list):
return other
return Null
def __radd__(self, other):
return Null
def __call__(self, *args, **kwargs):
return Null
def __iadd__(self, other):
try:
d = _get(self, "__dict__")
o = d["_obj"]
if o is None:
return self
key = d["__key__"]
_assign_to_null(o, [key], other)
except Exception as e:
raise e
return other
def __sub__(self, other):
return Null
def __rsub__(self, other):
return Null
def __neg__(self):
return Null
def __mul__(self, other):
return Null
def __rmul__(self, other):
return Null
def __div__(self, other):
return Null
def __rdiv__(self, other):
return Null
def __truediv__(self, other):
return Null
def __rtruediv__(self, other):
return Null
def __gt__(self, other):
return Null
def __ge__(self, other):
return Null
def __le__(self, other):
return Null
def __lt__(self, other):
return Null
def __eq__(self, other):
return other == None or isinstance(other, NullType)
def __ne__(self, other):
return other is not None and not isinstance(other, NullType)
def __or__(self, other):
if other is True:
return True
return Null
def __ror__(self, other):
return other
def __and__(self, other):
if other is False:
return False
return Null
def __xor__(self, other):
return Null
def __len__(self):
return 0
def __iter__(self):
return _zero_list.__iter__()
def __copy__(self):
return Null
def __deepcopy__(self, memo):
return Null
def last(self):
"""
IN CASE self IS INTERPRETED AS A list
"""
return Null
def right(self, num=None):
return Null
def __getitem__(self, key):
if isinstance(key, slice):
return Null
elif isinstance(key, binary_type):
key = key.decode("utf8")
elif isinstance(key, int):
return NullType(self, key)
path = _split_field(key)
output = self
for p in path:
output = NullType(output, p)
return output
def __getattr__(self, key):
key = text_type(key)
d = _get(self, "__dict__")
o = wrap(d["_obj"])
k = d["__key__"]
if o is None:
return Null
elif isinstance(o, NullType):
return NullType(self, key)
v = o.get(k)
if v == None:
return NullType(self, key)
return wrap(v.get(key))
def __setattr__(self, key, value):
key = text_type(key)
d = _get(self, "__dict__")
o = wrap(d["_obj"])
k = d["__key__"]
seq = [k] + [key]
_assign_to_null(o, seq, value)
def __setitem__(self, key, value):
d = _get(self, "__dict__")
o = d["_obj"]
if o is None:
return
k = d["__key__"]
if o is None:
return
elif isinstance(key, int):
seq = [k] + [key]
_assign_to_null(o, seq, value)
else:
seq = [k] + _split_field(key)
_assign_to_null(o, seq, value)
def keys(self):
return set()
def items(self):
return []
def pop(self, key, default=None):
return Null
def __str__(self):
return "None"
def __repr__(self):
return "Null"
def __hash__(self):
return hash(None)
Null = NullType() # INSTEAD OF None!!!
def _assign_to_null(obj, path, value, force=True):
"""
value IS ASSIGNED TO obj[self.path][key]
path IS AN ARRAY OF PROPERTY NAMES
force=False IF YOU PREFER TO use setDefault()
"""
try:
if obj is Null:
return
if isinstance(obj, NullType):
d = _get(obj, "__dict__")
o = d["_obj"]
p = d["__key__"]
s = [p]+path
return _assign_to_null(o, s, value)
path0 = path[0]
if len(path) == 1:
if force:
obj[path0] = value
else:
_setdefault(obj, path0, value)
return
old_value = obj.get(path0)
if old_value == None:
if value == None:
return
else:
obj[path0] = old_value = {}
_assign_to_null(old_value, path[1:], value)
except Exception as e:
raise e
def _split_field(field):
"""
SIMPLE SPLIT, NO CHECKS
"""
if field == ".":
return []
else:
return [k.replace("\a", ".") for k in field.replace("\.", "\a").split(".")]
|
Phuehvk/gyp
|
refs/heads/master
|
test/rules/src/copy-file.py
|
600
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
contents = open(sys.argv[1], 'r').read()
open(sys.argv[2], 'wb').write(contents)
sys.exit(0)
|
ipylypiv/grpc
|
refs/heads/master
|
src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py
|
23
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test of RPCs made against gRPC Python's application-layer API."""
import unittest
import grpc
from tests.unit.framework.common import test_constants
_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2
_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:]
_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3
_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3]
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
def _unary_unary_multi_callable(channel):
return channel.unary_unary(_UNARY_UNARY)
def _unary_stream_multi_callable(channel):
return channel.unary_stream(
_UNARY_STREAM,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_unary_multi_callable(channel):
return channel.stream_unary(
_STREAM_UNARY,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_stream_multi_callable(channel):
return channel.stream_stream(_STREAM_STREAM)
class InvalidMetadataTest(unittest.TestCase):
def setUp(self):
self._channel = grpc.insecure_channel('localhost:8080')
self._unary_unary = _unary_unary_multi_callable(self._channel)
self._unary_stream = _unary_stream_multi_callable(self._channel)
self._stream_unary = _stream_unary_multi_callable(self._channel)
self._stream_stream = _stream_stream_multi_callable(self._channel)
def testUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x08'
metadata = (('InVaLiD', 'UnaryRequestBlockingUnaryResponse'),)
expected_error_details = "metadata was invalid: %s" % metadata
with self.assertRaises(ValueError) as exception_context:
self._unary_unary(request, metadata=metadata)
self.assertIn(expected_error_details, str(exception_context.exception))
def testUnaryRequestBlockingUnaryResponseWithCall(self):
request = b'\x07\x08'
metadata = (('InVaLiD', 'UnaryRequestBlockingUnaryResponseWithCall'),)
expected_error_details = "metadata was invalid: %s" % metadata
with self.assertRaises(ValueError) as exception_context:
self._unary_unary.with_call(request, metadata=metadata)
self.assertIn(expected_error_details, str(exception_context.exception))
def testUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x08'
metadata = (('InVaLiD', 'UnaryRequestFutureUnaryResponse'),)
expected_error_details = "metadata was invalid: %s" % metadata
response_future = self._unary_unary.future(request, metadata=metadata)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertEqual(exception_context.exception.details(),
expected_error_details)
self.assertEqual(exception_context.exception.code(),
grpc.StatusCode.INTERNAL)
self.assertEqual(response_future.details(), expected_error_details)
self.assertEqual(response_future.code(), grpc.StatusCode.INTERNAL)
def testUnaryRequestStreamResponse(self):
request = b'\x37\x58'
metadata = (('InVaLiD', 'UnaryRequestStreamResponse'),)
expected_error_details = "metadata was invalid: %s" % metadata
response_iterator = self._unary_stream(request, metadata=metadata)
with self.assertRaises(grpc.RpcError) as exception_context:
next(response_iterator)
self.assertEqual(exception_context.exception.details(),
expected_error_details)
self.assertEqual(exception_context.exception.code(),
grpc.StatusCode.INTERNAL)
self.assertEqual(response_iterator.details(), expected_error_details)
self.assertEqual(response_iterator.code(), grpc.StatusCode.INTERNAL)
def testStreamRequestBlockingUnaryResponse(self):
request_iterator = (b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponse'),)
expected_error_details = "metadata was invalid: %s" % metadata
with self.assertRaises(ValueError) as exception_context:
self._stream_unary(request_iterator, metadata=metadata)
self.assertIn(expected_error_details, str(exception_context.exception))
def testStreamRequestBlockingUnaryResponseWithCall(self):
request_iterator = (b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponseWithCall'),)
expected_error_details = "metadata was invalid: %s" % metadata
multi_callable = _stream_unary_multi_callable(self._channel)
with self.assertRaises(ValueError) as exception_context:
multi_callable.with_call(request_iterator, metadata=metadata)
self.assertIn(expected_error_details, str(exception_context.exception))
def testStreamRequestFutureUnaryResponse(self):
request_iterator = (b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
metadata = (('InVaLiD', 'StreamRequestFutureUnaryResponse'),)
expected_error_details = "metadata was invalid: %s" % metadata
response_future = self._stream_unary.future(
request_iterator, metadata=metadata)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertEqual(exception_context.exception.details(),
expected_error_details)
self.assertEqual(exception_context.exception.code(),
grpc.StatusCode.INTERNAL)
self.assertEqual(response_future.details(), expected_error_details)
self.assertEqual(response_future.code(), grpc.StatusCode.INTERNAL)
def testStreamRequestStreamResponse(self):
request_iterator = (b'\x07\x08'
for _ in range(test_constants.STREAM_LENGTH))
metadata = (('InVaLiD', 'StreamRequestStreamResponse'),)
expected_error_details = "metadata was invalid: %s" % metadata
response_iterator = self._stream_stream(
request_iterator, metadata=metadata)
with self.assertRaises(grpc.RpcError) as exception_context:
next(response_iterator)
self.assertEqual(exception_context.exception.details(),
expected_error_details)
self.assertEqual(exception_context.exception.code(),
grpc.StatusCode.INTERNAL)
self.assertEqual(response_iterator.details(), expected_error_details)
self.assertEqual(response_iterator.code(), grpc.StatusCode.INTERNAL)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
dhtech/graphite-web
|
refs/heads/master
|
webapp/graphite/node.py
|
54
|
class Node(object):
__slots__ = ('name', 'path', 'local', 'is_leaf')
def __init__(self, path):
self.path = path
self.name = path.split('.')[-1]
self.local = True
self.is_leaf = False
def __repr__(self):
return '<%s[%x]: %s>' % (self.__class__.__name__, id(self), self.path)
class BranchNode(Node):
pass
class LeafNode(Node):
__slots__ = ('reader', 'intervals')
def __init__(self, path, reader):
Node.__init__(self, path)
self.reader = reader
self.intervals = reader.get_intervals()
self.is_leaf = True
def fetch(self, startTime, endTime):
return self.reader.fetch(startTime, endTime)
def __repr__(self):
return '<LeafNode[%x]: %s (%s)>' % (id(self), self.path, self.reader)
|
freelancer/freelancer-sdk-python
|
refs/heads/master
|
tests/test_user_helpers.py
|
1
|
import unittest
from freelancersdk.resources.users.helpers import (
create_get_users_object,
create_get_users_details_object,
)
class TestUsersHelpers(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_create_get_users_object(self):
query = create_get_users_object(
user_ids=[
201,
202,
203,
],
)
self.assertIn(('users[]', [201, 202, 203]), query.items())
|
jimkmc/micropython
|
refs/heads/master
|
tests/basics/set_update.py
|
118
|
s = {1}
s.update()
print(s)
s.update([2])
print(sorted(s))
s.update([1,3], [2,2,4])
print(sorted(s))
|
uskudnik/ggrc-core
|
refs/heads/develop
|
src/ggrc_basic_permissions/roles/ProgramBasicReader.py
|
2
|
scope = "Program Implied"
description = """
Allow any user assigned a role in a program the ability to read Role
resources.
"""
permissions = {
"read": [
"Category",
"ControlCategory",
"ControlAssertion",
"Option",
"Role",
"Person",
"Context",
{
"type": "BackgroundTask",
"terms": {
"property_name": "modified_by",
"value": "$current_user"
},
"condition": "is"
},
],
"create": [],
"update": [],
"delete": []
}
|
keithhendry/treadmill
|
refs/heads/master
|
treadmill/zkadmin.py
|
3
|
"""Zookeeper admin interface."""
import socket
import logging
_LOGGER = logging.getLogger(__name__)
def netcat(hostname, port, command):
"""Send 4letter netcat to Zookeeper control port."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, port))
sock.sendall(command)
sock.shutdown(socket.SHUT_WR)
data = []
while True:
chunk = sock.recv(1024)
if not chunk:
break
data.append(chunk)
sock.close()
return ''.join(data)
|
KeyWeeUsr/plyer
|
refs/heads/master
|
plyer/facades/gyroscope.py
|
1
|
'''
Gyroscope
============
The gyroscope measures the rate of rotation around a device's x, y,
and z axis.
The :class:`Gyroscope` provides access to public methods to
use gyroscope of your device.
Simple Examples
---------------
To enable gyroscope::
>>> from plyer import gyroscope
>>> gyroscope.enable()
To disable gyroscope::
>>> gyroscope.disable()
To get the rate of rotation along the three axes::
>>> gyroscope.rotation
(-0.0034587313421070576, -0.0073830625042319298, 0.0046892408281564713)
To get the uncalibrated rate of rotation along the three axes along with the
drift compensation::
>>> gyroscope.rotation_uncalib
()
where the first three values show the rate of rotation w/o drift
compensation and the last three show the estimated drift along the three
axes.
Supported Platforms
-------------------
Android, iOS
'''
class Gyroscope(object):
'''
Gyroscope facade.
.. versionadded:: 1.3.1
'''
@property
def rotation(self):
'''
Property that returns the rate of rotation around the device's local
X, Y and Z axis.
Along x-axis: angular speed around the X axis
Along y-axis: angular speed around the Y axis
Along z-axis: angular speed around the Z axis
Returns (None, None, None) if no data is currently available.
'''
return self.get_orientation()
@property
def rotation_uncalib(self):
'''
Property that returns the current rate of rotation around the X, Y and
Z axis. An estimation of the drift on each axis is reported as well.
Along x-axis: angular speed (w/o drift compensation) around the X axis
Along y-axis: angular speed (w/o drift compensation) around the Y axis
Along z-axis: angular speed (w/o drift compensation) around the Z axis
Along x-axis: estimated drift around X axis
Along y-axis: estimated drift around Y axis
Along z-axis: estimated drift around Z axis
Returns (None, None, None, None, None, None) if no data is currently
available.
'''
return self.get_rotation_uncalib()
@property
def orientation(self):
'''
WARNING:: This property is deprecated after API Level 8.
Use `gyroscope.rotation` instead.
Property that returns values of the current Gyroscope sensors, as
a (x, y, z) tuple. Returns (None, None, None) if no data is currently
available.
'''
return self.get_orientation()
def enable(self):
'''
Activate the Gyroscope sensor.
'''
self._enable()
def disable(self):
'''
Disable the Gyroscope sensor.
'''
self._disable()
def get_orientation(self):
return self._get_orientation()
def get_rotation_uncalib(self):
return self._get_rotation_uncalib()
# private
def _enable(self):
raise NotImplementedError()
def _disable(self):
raise NotImplementedError()
def _get_orientation(self):
raise NotImplementedError()
def _get_rotation_uncalib(self):
raise NotImplementedError()
|
justyns/home-assistant
|
refs/heads/dev
|
tests/components/sensor/test_rfxtrx.py
|
2
|
"""The tests for the Rfxtrx sensor platform."""
import unittest
from homeassistant.components.sensor import rfxtrx
from homeassistant.components import rfxtrx as rfxtrx_core
from homeassistant.const import TEMP_CELCIUS
from tests.common import get_test_home_assistant
class TestSensorRfxtrx(unittest.TestCase):
"""Test the Rfxtrx sensor platform."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(0)
def tearDown(self):
"""Stop everything that was started."""
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx_core.RFX_DEVICES = {}
self.hass.stop()
def test_default_config(self):
"""Test with 0 sensor."""
config = {'devices': {}}
devices = []
def add_dev_callback(devs):
"""Add a callback to add devices."""
for dev in devs:
devices.append(dev)
rfxtrx.setup_platform(self.hass, config, add_dev_callback)
self.assertEqual(0, len(devices))
def test_one_sensor(self):
"""Test with 1 sensor."""
config = {'devices':
{'sensor_0502': {
'name': 'Test',
'packetid': '0a52080705020095220269',
'data_type': 'Temperature'}}}
devices = []
def add_dev_callback(devs):
"""Add a callback to add devices."""
for dev in devs:
devices.append(dev)
rfxtrx.setup_platform(self.hass, config, add_dev_callback)
self.assertEqual(1, len(devices))
entity = devices[0]
self.assertEqual('Test', entity.name)
self.assertEqual(TEMP_CELCIUS, entity.unit_of_measurement)
self.assertEqual(14.9, entity.state)
self.assertEqual({'Humidity status': 'normal', 'Temperature': 14.9,
'Rssi numeric': 6, 'Humidity': 34,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
def test_several_sensors(self):
"""Test with 3 sensors."""
config = {'devices':
{'sensor_0502': {
'name': 'Test',
'packetid': '0a52080705020095220269',
'data_type': 'Temperature'},
'sensor_0601': {
'name': 'Bath_Humidity',
'packetid': '0a520802060100ff0e0269',
'data_type': 'Humidity'},
'sensor_0601 2': {
'name': 'Bath',
'packetid': '0a520802060100ff0e0269'}}}
devices = []
def add_dev_callback(devs):
"""Add a callback to add devices."""
for dev in devs:
devices.append(dev)
rfxtrx.setup_platform(self.hass, config, add_dev_callback)
self.assertEqual(3, len(devices))
self.assertEqual(3, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for entity in devices:
if entity.name == 'Bath_Humidity':
device_num = device_num + 1
self.assertEqual('%', entity.unit_of_measurement)
self.assertEqual(14, entity.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 25.5,
'Humidity': 14, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
entity.device_state_attributes)
self.assertEqual('Bath_Humidity', entity.__str__())
elif entity.name == 'Bath':
device_num = device_num + 1
self.assertEqual(TEMP_CELCIUS, entity.unit_of_measurement)
self.assertEqual(25.5, entity.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 25.5,
'Humidity': 14, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
entity.device_state_attributes)
self.assertEqual('Bath', entity.__str__())
elif entity.name == 'Test':
device_num = device_num + 1
self.assertEqual(TEMP_CELCIUS, entity.unit_of_measurement)
self.assertEqual(14.9, entity.state)
self.assertEqual({'Humidity status': 'normal',
'Temperature': 14.9,
'Rssi numeric': 6, 'Humidity': 34,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('Test', entity.__str__())
self.assertEqual(3, device_num)
def test_discover_sensor(self):
"""Test with discovery of sensor."""
config = {'devices': {}}
devices = []
def add_dev_callback(devs):
"""Add a callback to add devices."""
for dev in devs:
devices.append(dev)
rfxtrx.setup_platform(self.hass, config, add_dev_callback)
event = rfxtrx_core.get_rfx_object('0a520801070100b81b0279')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = devices[0]
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual(1, len(devices))
self.assertEqual({'Humidity status': 'normal',
'Temperature': 18.4,
'Rssi numeric': 7, 'Humidity': 27,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('sensor_0701 : 0a520801070100b81b0279',
entity.__str__())
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual(1, len(devices))
event = rfxtrx_core.get_rfx_object('0a52080405020095240279')
event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = devices[1]
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual(2, len(devices))
self.assertEqual({'Humidity status': 'normal',
'Temperature': 14.9,
'Rssi numeric': 7, 'Humidity': 36,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('sensor_0502 : 0a52080405020095240279',
entity.__str__())
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = devices[0]
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual(2, len(devices))
self.assertEqual({'Humidity status': 'normal',
'Temperature': 17.9,
'Rssi numeric': 7, 'Humidity': 27,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('sensor_0701 : 0a520801070100b81b0279',
entity.__str__())
# trying to add a switch
event = rfxtrx_core.get_rfx_object('0b1100cd0213c7f210010f70')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual(2, len(devices))
def test_discover_sensor_noautoadd(self):
"""Test with discover of sensor when auto add is False."""
config = {'automatic_add': False, 'devices': {}}
devices = []
def add_dev_callback(devs):
"""Add a callback to add devices."""
for dev in devs:
devices.append(dev)
rfxtrx.setup_platform(self.hass, config, add_dev_callback)
event = rfxtrx_core.get_rfx_object('0a520801070100b81b0279')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual(0, len(devices))
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual(0, len(devices))
event = rfxtrx_core.get_rfx_object('0a52080405020095240279')
event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual(0, len(devices))
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual(0, len(devices))
def test_update_of_sensors(self):
"""Test with 3 sensors."""
config = {'devices':
{'sensor_0502': {
'name': 'Test',
'packetid': '0a52080705020095220269',
'data_type': 'Temperature'},
'sensor_0601': {
'name': 'Bath_Humidity',
'packetid': '0a520802060100ff0e0269',
'data_type': 'Humidity'},
'sensor_0601 2': {
'name': 'Bath',
'packetid': '0a520802060100ff0e0269'}}}
devices = []
def add_dev_callback(devs):
"""Add a callback to add devices."""
for dev in devs:
devices.append(dev)
rfxtrx.setup_platform(self.hass, config, add_dev_callback)
self.assertEqual(3, len(devices))
self.assertEqual(3, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for entity in devices:
if entity.name == 'Bath_Humidity':
device_num = device_num + 1
self.assertEqual('%', entity.unit_of_measurement)
self.assertEqual(14, entity.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 25.5,
'Humidity': 14, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
entity.device_state_attributes)
self.assertEqual('Bath_Humidity', entity.__str__())
elif entity.name == 'Bath':
device_num = device_num + 1
self.assertEqual(TEMP_CELCIUS, entity.unit_of_measurement)
self.assertEqual(25.5, entity.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 25.5,
'Humidity': 14, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
entity.device_state_attributes)
self.assertEqual('Bath', entity.__str__())
elif entity.name == 'Test':
device_num = device_num + 1
self.assertEqual(TEMP_CELCIUS, entity.unit_of_measurement)
self.assertEqual(14.9, entity.state)
self.assertEqual({'Humidity status': 'normal',
'Temperature': 14.9,
'Rssi numeric': 6, 'Humidity': 34,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('Test', entity.__str__())
self.assertEqual(3, device_num)
event = rfxtrx_core.get_rfx_object('0a520802060101ff0f0269')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = devices[0]
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
event = rfxtrx_core.get_rfx_object('0a52080705020085220269')
event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(3, len(devices))
self.assertEqual(3, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for entity in devices:
if entity.name == 'Bath_Humidity':
device_num = device_num + 1
self.assertEqual('%', entity.unit_of_measurement)
self.assertEqual(15, entity.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 51.1,
'Humidity': 15, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
entity.device_state_attributes)
self.assertEqual('Bath_Humidity', entity.__str__())
elif entity.name == 'Bath':
device_num = device_num + 1
self.assertEqual(TEMP_CELCIUS, entity.unit_of_measurement)
self.assertEqual(51.1, entity.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 51.1,
'Humidity': 15, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
entity.device_state_attributes)
self.assertEqual('Bath', entity.__str__())
elif entity.name == 'Test':
device_num = device_num + 1
self.assertEqual(TEMP_CELCIUS, entity.unit_of_measurement)
self.assertEqual(13.3, entity.state)
self.assertEqual({'Humidity status': 'normal',
'Temperature': 13.3,
'Rssi numeric': 6, 'Humidity': 34,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('Test', entity.__str__())
self.assertEqual(3, device_num)
self.assertEqual(3, len(devices))
self.assertEqual(3, len(rfxtrx_core.RFX_DEVICES))
|
arbrandes/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/tests/test_video_mongo.py
|
4
|
"""
Video xmodule tests in mongo.
"""
import json
import shutil
from collections import OrderedDict
from tempfile import mkdtemp
from uuid import uuid4
from unittest.mock import MagicMock, Mock, patch
import pytest
import ddt
from django.conf import settings
from django.core.files import File
from django.core.files.base import ContentFile
from django.test import TestCase
from django.test.utils import override_settings
from edxval.api import (
ValCannotCreateError,
ValVideoNotFoundError,
create_or_update_video_transcript,
create_profile,
create_video,
create_video_transcript,
get_video_info,
get_video_transcript,
get_video_transcript_data
)
from edxval.utils import create_file_in_fs
from fs.osfs import OSFS
from fs.path import combine
from lxml import etree
from path import Path as path
from waffle.testutils import override_flag
from lms.djangoapps.courseware.tests.helpers import get_context_dict_from_string
from openedx.core.djangoapps.video_pipeline.config.waffle import DEPRECATE_YOUTUBE, waffle_flags
from openedx.core.djangoapps.waffle_utils.models import WaffleFlagCourseOverrideModel
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.tests.django_utils import TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE
from xmodule.tests.test_import import DummySystem
from xmodule.tests.test_video import VideoBlockTestBase
from xmodule.video_module import VideoBlock, bumper_utils, video_utils
from xmodule.video_module.transcripts_utils import Transcript, save_to_store, subs_filename
from xmodule.video_module.video_module import EXPORT_IMPORT_COURSE_DIR, EXPORT_IMPORT_STATIC_DIR
from xmodule.x_module import PUBLIC_VIEW, STUDENT_VIEW
from .test_video_handlers import BaseTestVideoXBlock, TestVideo
from .test_video_xml import SOURCE_XML
MODULESTORES = {
ModuleStoreEnum.Type.mongo: TEST_DATA_MONGO_MODULESTORE,
ModuleStoreEnum.Type.split: TEST_DATA_SPLIT_MODULESTORE,
}
TRANSCRIPT_FILE_SRT_DATA = """
1
00:00:14,370 --> 00:00:16,530
I am overwatch.
2
00:00:16,500 --> 00:00:18,600
可以用“我不太懂艺术 但我知道我喜欢什么”做比喻.
"""
TRANSCRIPT_FILE_SJSON_DATA = """{\n "start": [10],\n "end": [100],\n "text": ["Hi, welcome to edxval."]\n}"""
class TestVideoYouTube(TestVideo): # lint-amnesty, pylint: disable=missing-class-docstring, test-inherits-tests
METADATA = {}
def test_video_constructor(self):
"""Make sure that all parameters extracted correctly from xml"""
context = self.item_descriptor.render(STUDENT_VIEW).content
sources = ['example.mp4', 'example.webm']
expected_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': 'A Name',
'download_video_link': 'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
'autoAdvance': False,
'saveStateEnabled': True,
'saveStateUrl': self.item_descriptor.ajax_url + '/save_user_state',
'autoplay': False,
'streams': '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg',
'sources': sources,
'duration': None,
'poster': None,
'captionDataDir': None,
'showCaptions': 'true',
'generalSpeed': 1.0,
'speed': None,
'savedVideoPosition': 0.0,
'start': 3603.0,
'end': 3610.0,
'transcriptLanguage': 'en',
'transcriptLanguages': OrderedDict({'en': 'English', 'uk': 'Українська'}),
'ytMetadataEndpoint': '',
'ytTestTimeout': 1500,
'ytApiUrl': 'https://www.youtube.com/iframe_api',
'lmsRootURL': settings.LMS_ROOT_URL,
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'autohideHtml5': False,
'recordedYoutubeIsAvailable': True,
'completionEnabled': False,
'completionPercentage': 0.95,
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'prioritizeHls': False,
})),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
assert get_context_dict_from_string(context) ==\
get_context_dict_from_string(self.item_descriptor.xmodule_runtime.render_template('video.html',
expected_context))
class TestVideoNonYouTube(TestVideo): # pylint: disable=test-inherits-tests
"""Integration tests: web client + mongo."""
DATA = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson"
download_video="true"
start_time="3603.0" end_time="3610.0"
>
<source src="example.mp4"/>
<source src="example.webm"/>
</video>
"""
MODEL_DATA = {
'data': DATA,
}
METADATA = {}
def test_video_constructor(self):
"""Make sure that if the 'youtube' attribute is omitted in XML, then
the template generates an empty string for the YouTube streams.
"""
context = self.item_descriptor.render(STUDENT_VIEW).content
sources = ['example.mp4', 'example.webm']
expected_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': 'A Name',
'download_video_link': 'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
'autoAdvance': False,
'saveStateEnabled': True,
'saveStateUrl': self.item_descriptor.ajax_url + '/save_user_state',
'autoplay': False,
'streams': '1.00:3_yD_cEKoCk',
'sources': sources,
'duration': None,
'poster': None,
'captionDataDir': None,
'showCaptions': 'true',
'generalSpeed': 1.0,
'speed': None,
'savedVideoPosition': 0.0,
'start': 3603.0,
'end': 3610.0,
'transcriptLanguage': 'en',
'transcriptLanguages': OrderedDict({'en': 'English'}),
'ytMetadataEndpoint': '',
'ytTestTimeout': 1500,
'ytApiUrl': 'https://www.youtube.com/iframe_api',
'lmsRootURL': settings.LMS_ROOT_URL,
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'autohideHtml5': False,
'recordedYoutubeIsAvailable': True,
'completionEnabled': False,
'completionPercentage': 0.95,
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'prioritizeHls': False,
})),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
expected_result = get_context_dict_from_string(
self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
)
assert get_context_dict_from_string(context) == expected_result
assert expected_result['download_video_link'] == 'example.mp4'
assert expected_result['display_name'] == 'A Name'
@ddt.ddt
class TestGetHtmlMethod(BaseTestVideoXBlock):
'''
Make sure that `get_html` works correctly.
'''
maxDiff = None
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super().setUp()
self.setup_course()
self.default_metadata_dict = OrderedDict({
'autoAdvance': False,
'saveStateEnabled': True,
'saveStateUrl': '',
'autoplay': settings.FEATURES.get('AUTOPLAY_VIDEOS', True),
'streams': '1.00:3_yD_cEKoCk',
'sources': '[]',
'duration': 111.0,
'poster': None,
'captionDataDir': None,
'showCaptions': 'true',
'generalSpeed': 1.0,
'speed': None,
'savedVideoPosition': 0.0,
'start': 3603.0,
'end': 3610.0,
'transcriptLanguage': 'en',
'transcriptLanguages': OrderedDict({'en': 'English'}),
'ytMetadataEndpoint': '',
'ytTestTimeout': 1500,
'ytApiUrl': 'https://www.youtube.com/iframe_api',
'lmsRootURL': settings.LMS_ROOT_URL,
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'autohideHtml5': False,
'recordedYoutubeIsAvailable': True,
'completionEnabled': False,
'completionPercentage': 0.95,
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'prioritizeHls': False,
})
def get_handler_url(self, handler, suffix):
"""
Return the URL for the specified handler on the block represented by
self.item_descriptor.
"""
return self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, handler, suffix
).rstrip('/?')
def test_get_html_track(self):
# pylint: disable=invalid-name
# lint-amnesty, pylint: disable=redefined-outer-name
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="{sub}" download_track="{download_track}"
start_time="3603.0" end_time="3610.0" download_video="true"
>
<source src="example.mp4"/>
<source src="example.webm"/>
{track}
{transcripts}
</video>
"""
cases = [
{
'download_track': 'true',
'track': '<track src="http://www.example.com/track"/>',
'sub': 'a_sub_file.srt.sjson',
'expected_track_url': 'http://www.example.com/track',
'transcripts': '',
},
{
'download_track': 'true',
'track': '',
'sub': 'a_sub_file.srt.sjson',
'expected_track_url': 'a_sub_file.srt.sjson',
'transcripts': '',
},
{
'download_track': 'true',
'track': '',
'sub': '',
'expected_track_url': None,
'transcripts': '',
},
{
'download_track': 'false',
'track': '<track src="http://www.example.com/track"/>',
'sub': 'a_sub_file.srt.sjson',
'expected_track_url': None,
'transcripts': '',
},
{
'download_track': 'true',
'track': '',
'sub': '',
'expected_track_url': 'a_sub_file.srt.sjson',
'transcripts': '<transcript language="uk" src="ukrainian.srt" />',
},
]
sources = ['example.mp4', 'example.webm']
expected_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': 'A Name',
'download_video_link': 'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': '',
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
for data in cases:
metadata = self.default_metadata_dict
metadata['sources'] = sources
metadata['duration'] = None
DATA = SOURCE_XML.format(
download_track=data['download_track'],
track=data['track'],
sub=data['sub'],
transcripts=data['transcripts'],
)
self.initialize_block(data=DATA)
track_url = self.get_handler_url('transcript', 'download')
context = self.item_descriptor.render(STUDENT_VIEW).content
metadata.update({
'transcriptLanguages': {"en": "English"} if not data['transcripts'] else {"uk": 'Українська'},
'transcriptLanguage': 'en' if not data['transcripts'] or data.get('sub') else 'uk',
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'saveStateUrl': self.item_descriptor.ajax_url + '/save_user_state',
})
expected_context.update({
'transcript_download_format': (
None if self.item_descriptor.track and self.item_descriptor.download_track else 'srt'
),
'track': (
track_url if data['expected_track_url'] == 'a_sub_file.srt.sjson' else data['expected_track_url']
),
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(metadata)
})
assert get_context_dict_from_string(context) ==\
get_context_dict_from_string(self.item_descriptor.xmodule_runtime.render_template('video.html',
expected_context))
def test_get_html_source(self):
# lint-amnesty, pylint: disable=invalid-name, redefined-outer-name
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="3603.0" end_time="3610.0"
>
{sources}
</video>
"""
cases = [
# self.download_video == True
{
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'download_video_link': 'example.mp4',
'sources': ['example.mp4', 'example.webm'],
},
},
{
'download_video': 'true',
'source': '',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'download_video_link': 'example.mp4',
'sources': ['example.mp4', 'example.webm'],
},
},
{
'download_video': 'true',
'source': '',
'sources': [],
'result': {},
},
# self.download_video == False
{
'download_video': 'false',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'result': {
'sources': ['example.mp4', 'example.webm'],
},
},
]
initial_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': 'A Name',
'download_video_link': 'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': self.default_metadata_dict,
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
initial_context['metadata']['duration'] = None
for data in cases:
DATA = SOURCE_XML.format( # lint-amnesty, pylint: disable=invalid-name
download_video=data['download_video'],
source=data['source'],
sources=data['sources']
)
self.initialize_block(data=DATA)
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'saveStateUrl': self.item_descriptor.ajax_url + '/save_user_state',
'sources': data['result'].get('sources', []),
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result'].get('download_video_link'),
'metadata': json.dumps(expected_context['metadata'])
})
assert get_context_dict_from_string(context) ==\
get_context_dict_from_string(self.item_descriptor.xmodule_runtime.render_template('video.html',
expected_context))
def test_get_html_with_non_existent_edx_video_id(self):
"""
Tests the VideoBlock get_html where a edx_video_id is given but a video is not found
"""
# pylint: disable=invalid-name
# lint-amnesty, pylint: disable=redefined-outer-name
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="3603.0" end_time="3610.0"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
no_video_data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "meow",
'result': {
'download_video_link': 'example.mp4',
'sources': ['example.mp4', 'example.webm'],
}
}
DATA = SOURCE_XML.format(
download_video=no_video_data['download_video'],
source=no_video_data['source'],
sources=no_video_data['sources'],
edx_video_id=no_video_data['edx_video_id']
)
self.initialize_block(data=DATA)
# Referencing a non-existent VAL ID in courseware won't cause an error --
# it'll just fall back to the values in the VideoBlock.
assert 'example.mp4' in self.item_descriptor.render(STUDENT_VIEW).content
def test_get_html_with_mocked_edx_video_id(self):
# lint-amnesty, pylint: disable=invalid-name, redefined-outer-name
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="3603.0" end_time="3610.0"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
data = {
# test with download_video set to false and make sure download_video_link is not set (is None)
'download_video': 'false',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': "mock item",
'result': {
'download_video_link': None,
# make sure the desktop_mp4 url is included as part of the alternative sources.
'sources': ['example.mp4', 'example.webm', 'http://www.meowmix.com'],
}
}
# Video found for edx_video_id
metadata = self.default_metadata_dict
metadata['autoplay'] = False
metadata['sources'] = ""
initial_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': 'A Name',
'download_video_link': 'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
'metadata': metadata
}
DATA = SOURCE_XML.format( # lint-amnesty, pylint: disable=invalid-name
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id']
)
self.initialize_block(data=DATA)
with patch('edxval.api.get_video_info') as mock_get_video_info:
mock_get_video_info.return_value = {
'url': '/edxval/video/example',
'edx_video_id': 'example',
'duration': 111.0,
'client_video_id': 'The example video',
'encoded_videos': [
{
'url': 'http://www.meowmix.com',
'file_size': 25556,
'bitrate': 9600,
'profile': 'desktop_mp4'
}
]
}
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'saveStateUrl': self.item_descriptor.ajax_url + '/save_user_state',
'sources': data['result']['sources'],
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result']['download_video_link'],
'metadata': json.dumps(expected_context['metadata'])
})
assert get_context_dict_from_string(context) ==\
get_context_dict_from_string(self.item_descriptor.xmodule_runtime.render_template('video.html',
expected_context))
def test_get_html_with_existing_edx_video_id(self):
"""
Tests the `VideoBlock` `get_html` where `edx_video_id` is given and related video is found
"""
edx_video_id = 'thundercats'
# create video with provided edx_video_id and return encoded_videos
encoded_videos = self.encode_and_create_video(edx_video_id)
# data to be used to retrieve video by edxval API
data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': edx_video_id,
'result': {
'download_video_link': f'http://fake-video.edx.org/{edx_video_id}.mp4',
'sources': ['example.mp4', 'example.webm'] + [video['url'] for video in encoded_videos],
},
}
# context returned by get_html when provided with above data
# expected_context, a dict to assert with context
context, expected_context = self.helper_get_html_with_edx_video_id(data)
assert get_context_dict_from_string(context) ==\
get_context_dict_from_string(self.item_descriptor.xmodule_runtime.render_template('video.html',
expected_context))
def test_get_html_with_existing_unstripped_edx_video_id(self):
"""
Tests the `VideoBlock` `get_html` where `edx_video_id` with some unwanted tab(\t)
is given and related video is found
"""
edx_video_id = 'thundercats'
# create video with provided edx_video_id and return encoded_videos
encoded_videos = self.encode_and_create_video(edx_video_id)
# data to be used to retrieve video by edxval API
# unstripped edx_video_id is provided here
data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="example.mp4"/>
<source src="example.webm"/>
""",
'edx_video_id': f"{edx_video_id}\t",
'result': {
'download_video_link': f'http://fake-video.edx.org/{edx_video_id}.mp4',
'sources': ['example.mp4', 'example.webm'] + [video['url'] for video in encoded_videos],
},
}
# context returned by get_html when provided with above data
# expected_context, a dict to assert with context
context, expected_context = self.helper_get_html_with_edx_video_id(data)
assert get_context_dict_from_string(context) ==\
get_context_dict_from_string(self.item_descriptor.xmodule_runtime.render_template('video.html',
expected_context))
def encode_and_create_video(self, edx_video_id):
"""
Create and encode video to be used for tests
"""
encoded_videos = []
for profile, extension in [("desktop_webm", "webm"), ("desktop_mp4", "mp4")]:
create_profile(profile)
encoded_videos.append(
dict(
url=f"http://fake-video.edx.org/{edx_video_id}.{extension}",
file_size=9000,
bitrate=42,
profile=profile,
)
)
result = create_video(
dict(
client_video_id='A Client Video id',
duration=111.0,
edx_video_id=edx_video_id,
status='test',
encoded_videos=encoded_videos,
)
)
assert result == edx_video_id
return encoded_videos
def helper_get_html_with_edx_video_id(self, data):
"""
Create expected context and get actual context returned by `get_html` method.
"""
# make sure the urls for the various encodings are included as part of the alternative sources.
# lint-amnesty, pylint: disable=invalid-name, redefined-outer-name
SOURCE_XML = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
start_time="3603.0" end_time="3610.0"
edx_video_id="{edx_video_id}"
>
{sources}
</video>
"""
# Video found for edx_video_id
metadata = self.default_metadata_dict
metadata['sources'] = ""
initial_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': 'A Name',
'download_video_link': 'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
'metadata': metadata,
}
# pylint: disable=invalid-name
DATA = SOURCE_XML.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id']
)
self.initialize_block(data=DATA)
# context returned by get_html
context = self.item_descriptor.render(STUDENT_VIEW).content
# expected_context, expected context to be returned by get_html
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'saveStateUrl': self.item_descriptor.ajax_url + '/save_user_state',
'sources': data['result']['sources'],
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result']['download_video_link'],
'metadata': json.dumps(expected_context['metadata'])
})
return context, expected_context
# pylint: disable=invalid-name
@patch('xmodule.video_module.video_module.BrandingInfoConfig')
@patch('xmodule.video_module.video_module.rewrite_video_url')
def test_get_html_cdn_source(self, mocked_get_video, mock_BrandingInfoConfig):
"""
Test if sources got from CDN
"""
mock_BrandingInfoConfig.get_config.return_value = {
"CN": {
'url': 'http://www.xuetangx.com',
'logo_src': 'http://www.xuetangx.com/static/images/logo.png',
'logo_tag': 'Video hosted by XuetangX.com'
}
}
def side_effect(*args, **kwargs): # lint-amnesty, pylint: disable=unused-argument
cdn = {
'http://example.com/example.mp4': 'http://cdn-example.com/example.mp4',
'http://example.com/example.webm': 'http://cdn-example.com/example.webm',
}
return cdn.get(args[1])
mocked_get_video.side_effect = side_effect
source_xml = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
edx_video_id="{edx_video_id}"
start_time="3603.0" end_time="3610.0"
>
{sources}
</video>
"""
case_data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="http://example.com/example.mp4"/>
<source src="http://example.com/example.webm"/>
""",
'result': {
'download_video_link': 'http://example.com/example.mp4',
'sources': [
'http://cdn-example.com/example.mp4',
'http://cdn-example.com/example.webm'
],
},
}
# Only videos with a video id should have their URLs rewritten
# based on CDN settings
cases = [
dict(case_data, edx_video_id="vid-v1:12345"),
]
initial_context = {
'autoadvance_enabled': False,
'branding_info': {
'logo_src': 'http://www.xuetangx.com/static/images/logo.png',
'logo_tag': 'Video hosted by XuetangX.com',
'url': 'http://www.xuetangx.com'
},
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': 'A Name',
'download_video_link': None,
'handout': None,
'id': None,
'metadata': self.default_metadata_dict,
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
initial_context['metadata']['duration'] = None
for data in cases:
DATA = source_xml.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id'],
)
self.initialize_block(data=DATA)
self.item_descriptor.xmodule_runtime.user_location = 'CN'
context = self.item_descriptor.render('student_view').content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'saveStateUrl': self.item_descriptor.ajax_url + '/save_user_state',
'sources': data['result'].get('sources', []),
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result'].get('download_video_link'),
'metadata': json.dumps(expected_context['metadata'])
})
assert get_context_dict_from_string(context) ==\
get_context_dict_from_string(self.item_descriptor.xmodule_runtime.render_template('video.html',
expected_context))
# pylint: disable=invalid-name
def test_get_html_cdn_source_external_video(self):
"""
Test that video from an external source loads successfully.
For a video from a third part, which has 'external' status
in the VAL, the url-rewrite will not happen and URL will
remain unchanged in the get_html() method.
"""
source_xml = """
<video show_captions="true"
display_name="A Name"
sub="a_sub_file.srt.sjson" source="{source}"
download_video="{download_video}"
edx_video_id="{edx_video_id}"
start_time="3603.0" end_time="3610.0"
>
{sources}
</video>
"""
case_data = {
'download_video': 'true',
'source': 'example_source.mp4',
'sources': """
<source src="http://example.com/example.mp4"/>
""",
'result': {
'download_video_link': 'http://example.com/example.mp4',
'sources': [
'http://example.com/example.mp4',
],
},
}
cases = [
dict(case_data, edx_video_id="vid-v1:12345"),
]
initial_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': 'null',
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': 'A Name',
'download_video_link': None,
'handout': None,
'id': None,
'metadata': self.default_metadata_dict,
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null',
}
initial_context['metadata']['duration'] = None
for data in cases:
DATA = source_xml.format(
download_video=data['download_video'],
source=data['source'],
sources=data['sources'],
edx_video_id=data['edx_video_id'],
)
self.initialize_block(data=DATA)
# Mocking the edxval API call because if not done,
# the method throws exception as no VAL entry is found
# for the corresponding edx-video-id
with patch('edxval.api.get_video_info') as mock_get_video_info:
mock_get_video_info.return_value = {
'url': 'http://example.com/example.mp4',
'edx_video_id': 'vid-v1:12345',
'status': 'external',
'duration': None,
'client_video_id': 'external video',
'encoded_videos': {}
}
context = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = dict(initial_context)
expected_context['metadata'].update({
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'saveStateUrl': self.item_descriptor.ajax_url + '/save_user_state',
'sources': data['result'].get('sources', []),
})
expected_context.update({
'id': self.item_descriptor.location.html_id(),
'download_video_link': data['result'].get('download_video_link'),
'metadata': json.dumps(expected_context['metadata'])
})
assert get_context_dict_from_string(context) ==\
get_context_dict_from_string(self.item_descriptor.xmodule_runtime.render_template('video.html',
expected_context))
@ddt.data(
(True, ['youtube', 'desktop_webm', 'desktop_mp4', 'hls']),
(False, ['youtube', 'desktop_webm', 'desktop_mp4'])
)
@ddt.unpack
def test_get_html_on_toggling_hls_feature(self, hls_feature_enabled, expected_val_profiles):
"""
Verify val profiles on toggling HLS Playback feature.
"""
with patch('xmodule.video_module.video_module.edxval_api.get_urls_for_profiles') as get_urls_for_profiles:
get_urls_for_profiles.return_value = {
'desktop_webm': 'https://webm.com/dw.webm',
'hls': 'https://hls.com/hls.m3u8',
'youtube': 'https://yt.com/?v=v0TFmdO4ZP0',
'desktop_mp4': 'https://mp4.com/dm.mp4'
}
with patch('xmodule.video_module.video_module.HLSPlaybackEnabledFlag.feature_enabled') as feature_enabled:
feature_enabled.return_value = hls_feature_enabled
video_xml = '<video display_name="Video" download_video="true" edx_video_id="12345-67890">[]</video>'
self.initialize_block(data=video_xml)
self.item_descriptor.render(STUDENT_VIEW)
get_urls_for_profiles.assert_called_with(
self.item_descriptor.edx_video_id,
expected_val_profiles,
)
@patch('xmodule.video_module.video_module.HLSPlaybackEnabledFlag.feature_enabled', Mock(return_value=True))
@patch('xmodule.video_module.video_module.edxval_api.get_urls_for_profiles')
def test_get_html_hls(self, get_urls_for_profiles):
"""
Verify that hls profile functionality works as expected.
* HLS source should be added into list of available sources
* HLS source should not be used for download URL If available from edxval
"""
video_xml = '<video display_name="Video" download_video="true" edx_video_id="12345-67890">[]</video>'
get_urls_for_profiles.return_value = {
'desktop_webm': 'https://webm.com/dw.webm',
'hls': 'https://hls.com/hls.m3u8',
'youtube': 'https://yt.com/?v=v0TFmdO4ZP0',
'desktop_mp4': 'https://mp4.com/dm.mp4'
}
self.initialize_block(data=video_xml)
context = self.item_descriptor.render(STUDENT_VIEW).content
assert "'download_video_link': 'https://mp4.com/dm.mp4'" in context
assert '"streams": "1.00:https://yt.com/?v=v0TFmdO4ZP0"' in context
assert sorted(['https://webm.com/dw.webm', 'https://mp4.com/dm.mp4', 'https://hls.com/hls.m3u8']) ==\
sorted(get_context_dict_from_string(context)['metadata']['sources'])
def test_get_html_hls_no_video_id(self):
"""
Verify that `download_video_link` is set to None for HLS videos if no video id
"""
video_xml = """
<video display_name="Video" download_video="true" source="https://hls.com/hls.m3u8">
["https://hls.com/hls2.m3u8", "https://hls.com/hls3.m3u8"]
</video>
"""
self.initialize_block(data=video_xml)
context = self.item_descriptor.render(STUDENT_VIEW).content
assert "'download_video_link': None" in context
def test_html_student_public_view(self):
"""
Test the student and public views
"""
video_xml = """
<video display_name="Video" download_video="true" source="https://hls.com/hls.m3u8">
["https://hls.com/hls2.m3u8", "https://hls.com/hls3.m3u8"]
</video>
"""
self.initialize_block(data=video_xml)
context = self.item_descriptor.render(STUDENT_VIEW).content
assert '"saveStateEnabled": true' in context
context = self.item_descriptor.render(PUBLIC_VIEW).content
assert '"saveStateEnabled": false' in context
@patch('xmodule.video_module.video_module.edxval_api.get_course_video_image_url')
def test_poster_image(self, get_course_video_image_url):
"""
Verify that poster image functionality works as expected.
"""
video_xml = '<video display_name="Video" download_video="true" edx_video_id="12345-67890">[]</video>'
get_course_video_image_url.return_value = '/media/video-images/poster.png'
self.initialize_block(data=video_xml)
context = self.item_descriptor.render(STUDENT_VIEW).content
assert '"poster": "/media/video-images/poster.png"' in context
@patch('xmodule.video_module.video_module.edxval_api.get_course_video_image_url')
def test_poster_image_without_edx_video_id(self, get_course_video_image_url):
"""
Verify that poster image is set to None and there is no crash when no edx_video_id.
"""
video_xml = '<video display_name="Video" download_video="true" edx_video_id="null">[]</video>'
get_course_video_image_url.return_value = '/media/video-images/poster.png'
self.initialize_block(data=video_xml)
context = self.item_descriptor.render(STUDENT_VIEW).content
assert "'poster': 'null'" in context
@patch('xmodule.video_module.video_module.HLSPlaybackEnabledFlag.feature_enabled', Mock(return_value=False))
def test_hls_primary_playback_on_toggling_hls_feature(self):
"""
Verify that `prioritize_hls` is set to `False` if `HLSPlaybackEnabledFlag` is disabled.
"""
video_xml = '<video display_name="Video" download_video="true" edx_video_id="12345-67890">[]</video>'
self.initialize_block(data=video_xml)
context = self.item_descriptor.render(STUDENT_VIEW).content
assert '"prioritizeHls": false' in context
@ddt.data(
{
'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.on,
'waffle_enabled': False,
'youtube': '3_yD_cEKoCk',
'hls': ['https://hls.com/hls.m3u8'],
'result': 'true'
},
{
'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.on,
'waffle_enabled': False,
'youtube': '',
'hls': ['https://hls.com/hls.m3u8'],
'result': 'false'
},
{
'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.on,
'waffle_enabled': False,
'youtube': '',
'hls': [],
'result': 'false'
},
{
'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.on,
'waffle_enabled': False,
'youtube': '3_yD_cEKoCk',
'hls': [],
'result': 'false'
},
{
'course_override': WaffleFlagCourseOverrideModel.ALL_CHOICES.off,
'waffle_enabled': True,
'youtube': '3_yD_cEKoCk',
'hls': ['https://hls.com/hls.m3u8'],
'result': 'false'
},
)
@patch('xmodule.video_module.video_module.HLSPlaybackEnabledFlag.feature_enabled', Mock(return_value=True))
def test_deprecate_youtube_course_waffle_flag(self, data):
"""
Tests various combinations of a `prioritize_hls` flag being set in waffle and overridden for a course.
"""
metadata = {
'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'] + data['hls'],
}
video_xml = '<video display_name="Video" edx_video_id="12345-67890" youtube_id_1_0="{}">[]</video>'.format(
data['youtube']
)
DEPRECATE_YOUTUBE_FLAG = waffle_flags()[DEPRECATE_YOUTUBE]
with patch.object(WaffleFlagCourseOverrideModel, 'override_value', return_value=data['course_override']):
with override_flag(DEPRECATE_YOUTUBE_FLAG.name, active=data['waffle_enabled']):
self.initialize_block(data=video_xml, metadata=metadata)
context = self.item_descriptor.render(STUDENT_VIEW).content
assert '"prioritizeHls": {}'.format(data['result']) in context
@ddt.ddt
class TestVideoBlockInitialization(BaseTestVideoXBlock):
"""
Make sure that module initialization works correctly.
"""
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super().setUp()
self.setup_course()
@ddt.data(
(
{
'youtube': 'v0TFmdO4ZP0',
'hls': 'https://hls.com/hls.m3u8',
'desktop_mp4': 'https://mp4.com/dm.mp4',
'desktop_webm': 'https://webm.com/dw.webm',
},
['https://www.youtube.com/watch?v=v0TFmdO4ZP0']
),
(
{
'youtube': None,
'hls': 'https://hls.com/hls.m3u8',
'desktop_mp4': 'https://mp4.com/dm.mp4',
'desktop_webm': 'https://webm.com/dw.webm',
},
['https://www.youtube.com/watch?v=3_yD_cEKoCk']
),
(
{
'youtube': None,
'hls': None,
'desktop_mp4': None,
'desktop_webm': None,
},
['https://www.youtube.com/watch?v=3_yD_cEKoCk']
),
)
@ddt.unpack
@patch('xmodule.video_module.video_module.HLSPlaybackEnabledFlag.feature_enabled', Mock(return_value=True))
def test_val_encoding_in_context(self, val_video_encodings, video_url):
"""
Tests that the val encodings correctly override the video url when the edx video id is set and
one or more encodings are present.
Accepted order of source priority is:
VAL's youtube source > external youtube source > hls > mp4 > webm.
Note that `https://www.youtube.com/watch?v=3_yD_cEKoCk` is the default youtube source with which
a video component is initialized. Current implementation considers this youtube source as a valid
external youtube source.
"""
with patch('xmodule.video_module.video_module.edxval_api.get_urls_for_profiles') as get_urls_for_profiles:
get_urls_for_profiles.return_value = val_video_encodings
self.initialize_block(
data='<video display_name="Video" download_video="true" edx_video_id="12345-67890">[]</video>'
)
context = self.item_descriptor.get_context()
assert context['transcripts_basic_tab_metadata']['video_url']['value'] == video_url
@ddt.data(
(
{
'youtube': None,
'hls': 'https://hls.com/hls.m3u8',
'desktop_mp4': 'https://mp4.com/dm.mp4',
'desktop_webm': 'https://webm.com/dw.webm',
},
['https://hls.com/hls.m3u8']
),
(
{
'youtube': 'v0TFmdO4ZP0',
'hls': 'https://hls.com/hls.m3u8',
'desktop_mp4': None,
'desktop_webm': 'https://webm.com/dw.webm',
},
['https://www.youtube.com/watch?v=v0TFmdO4ZP0']
),
)
@ddt.unpack
@patch('xmodule.video_module.video_module.HLSPlaybackEnabledFlag.feature_enabled', Mock(return_value=True))
def test_val_encoding_in_context_without_external_youtube_source(self, val_video_encodings, video_url):
"""
Tests that the val encodings correctly override the video url when the edx video id is set and
one or more encodings are present. In this scenerio no external youtube source is provided.
Accepted order of source priority is:
VAL's youtube source > external youtube source > hls > mp4 > webm.
"""
with patch('xmodule.video_module.video_module.edxval_api.get_urls_for_profiles') as get_urls_for_profiles:
get_urls_for_profiles.return_value = val_video_encodings
# pylint: disable=line-too-long
self.initialize_block(
data='<video display_name="Video" youtube_id_1_0="" download_video="true" edx_video_id="12345-67890">[]</video>'
)
context = self.item_descriptor.get_context()
assert context['transcripts_basic_tab_metadata']['video_url']['value'] == video_url
@ddt.ddt
class TestEditorSavedMethod(BaseTestVideoXBlock):
"""
Make sure that `editor_saved` method works correctly.
"""
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def setUp(self):
super().setUp()
self.setup_course()
self.metadata = {
'source': 'http://youtu.be/3_yD_cEKoCk',
'html5_sources': ['http://example.org/video.mp4'],
}
# path to subs_3_yD_cEKoCk.srt.sjson file
self.file_name = 'subs_3_yD_cEKoCk.srt.sjson'
self.test_dir = path(__file__).abspath().dirname().dirname().dirname().dirname().dirname()
self.file_path = self.test_dir + '/common/test/data/uploads/' + self.file_name
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_editor_saved_when_html5_sub_not_exist(self, default_store):
"""
When there is youtube_sub exist but no html5_sub present for
html5_sources, editor_saved function will generate new html5_sub
for video.
"""
self.MODULESTORE = MODULESTORES[default_store] # pylint: disable=invalid-name
self.initialize_block(metadata=self.metadata)
item = self.store.get_item(self.item_descriptor.location)
with open(self.file_path, "rb") as myfile: # lint-amnesty, pylint: disable=bad-option-value, open-builtin
save_to_store(myfile.read(), self.file_name, 'text/sjson', item.location)
item.sub = "3_yD_cEKoCk"
# subs_video.srt.sjson does not exist before calling editor_saved function
with pytest.raises(NotFoundError):
Transcript.get_asset(item.location, 'subs_video.srt.sjson')
old_metadata = own_metadata(item)
# calling editor_saved will generate new file subs_video.srt.sjson for html5_sources
item.editor_saved(self.user, old_metadata, None)
assert isinstance(Transcript.get_asset(item.location, 'subs_3_yD_cEKoCk.srt.sjson'), StaticContent)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_editor_saved_when_youtube_and_html5_subs_exist(self, default_store):
"""
When both youtube_sub and html5_sub already exist then no new
sub will be generated by editor_saved function.
"""
self.MODULESTORE = MODULESTORES[default_store]
self.initialize_block(metadata=self.metadata)
item = self.store.get_item(self.item_descriptor.location)
with open(self.file_path, "rb") as myfile: # lint-amnesty, pylint: disable=bad-option-value, open-builtin
save_to_store(myfile.read(), self.file_name, 'text/sjson', item.location)
save_to_store(myfile.read(), 'subs_video.srt.sjson', 'text/sjson', item.location)
item.sub = "3_yD_cEKoCk"
# subs_3_yD_cEKoCk.srt.sjson and subs_video.srt.sjson already exist
assert isinstance(Transcript.get_asset(item.location, self.file_name), StaticContent)
assert isinstance(Transcript.get_asset(item.location, 'subs_video.srt.sjson'), StaticContent)
old_metadata = own_metadata(item)
with patch('xmodule.video_module.video_module.manage_video_subtitles_save') as manage_video_subtitles_save:
item.editor_saved(self.user, old_metadata, None)
assert not manage_video_subtitles_save.called
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_editor_saved_with_unstripped_video_id(self, default_store):
"""
Verify editor saved when video id contains spaces/tabs.
"""
self.MODULESTORE = MODULESTORES[default_store]
stripped_video_id = str(uuid4())
unstripped_video_id = '{video_id}{tabs}'.format(video_id=stripped_video_id, tabs='\t\t\t')
self.metadata.update({
'edx_video_id': unstripped_video_id
})
self.initialize_block(metadata=self.metadata)
item = self.store.get_item(self.item_descriptor.location)
assert item.edx_video_id == unstripped_video_id
# Now, modifying and saving the video module should strip the video id.
old_metadata = own_metadata(item)
item.display_name = 'New display name'
item.editor_saved(self.user, old_metadata, None)
assert item.edx_video_id == stripped_video_id
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
@patch('xmodule.video_module.video_module.edxval_api.get_url_for_profile', Mock(return_value='test_yt_id'))
def test_editor_saved_with_yt_val_profile(self, default_store):
"""
Verify editor saved overrides `youtube_id_1_0` when a youtube val profile is there
for a given `edx_video_id`.
"""
self.MODULESTORE = MODULESTORES[default_store]
self.initialize_block(metadata=self.metadata)
item = self.store.get_item(self.item_descriptor.location)
assert item.youtube_id_1_0 == '3_yD_cEKoCk'
# Now, modify `edx_video_id` and save should override `youtube_id_1_0`.
old_metadata = own_metadata(item)
item.edx_video_id = str(uuid4())
item.editor_saved(self.user, old_metadata, None)
assert item.youtube_id_1_0 == 'test_yt_id'
@ddt.ddt
class TestVideoBlockStudentViewJson(BaseTestVideoXBlock, CacheIsolationTestCase):
"""
Tests for the student_view_data method on VideoBlock.
"""
TEST_DURATION = 111.0
TEST_PROFILE = "mobile"
TEST_SOURCE_URL = "http://www.example.com/source.mp4"
TEST_LANGUAGE = "ge"
TEST_ENCODED_VIDEO = {
'profile': TEST_PROFILE,
'bitrate': 333,
'url': 'http://example.com/video',
'file_size': 222,
}
TEST_EDX_VIDEO_ID = 'test_edx_video_id'
TEST_YOUTUBE_ID = 'test_youtube_id'
TEST_YOUTUBE_EXPECTED_URL = 'https://www.youtube.com/watch?v=test_youtube_id'
def setUp(self):
super().setUp()
video_declaration = (
"<video display_name='Test Video' edx_video_id='123' youtube_id_1_0=\'" + self.TEST_YOUTUBE_ID + "\'>"
)
sample_xml = ''.join([
video_declaration,
"<source src='", self.TEST_SOURCE_URL, "'/> ",
"<transcript language='", self.TEST_LANGUAGE, "' src='german_translation.srt' /> ",
"</video>"]
)
self.transcript_url = "transcript_url"
self.initialize_block(data=sample_xml)
self.video = self.item_descriptor
self.video.runtime.handler_url = Mock(return_value=self.transcript_url)
self.video.runtime.course_id = MagicMock()
def setup_val_video(self, associate_course_in_val=False):
"""
Creates a video entry in VAL.
Arguments:
associate_course - If True, associates the test course with the video in VAL.
"""
create_profile('mobile')
create_video({
'edx_video_id': self.TEST_EDX_VIDEO_ID,
'client_video_id': 'test_client_video_id',
'duration': self.TEST_DURATION,
'status': 'dummy',
'encoded_videos': [self.TEST_ENCODED_VIDEO],
'courses': [str(self.video.location.course_key)] if associate_course_in_val else [],
})
self.val_video = get_video_info(self.TEST_EDX_VIDEO_ID) # pylint: disable=attribute-defined-outside-init
def get_result(self, allow_cache_miss=True):
"""
Returns the result from calling the video's student_view_data method.
Arguments:
allow_cache_miss is passed in the context to the student_view_data method.
"""
context = {
"profiles": [self.TEST_PROFILE],
"allow_cache_miss": "True" if allow_cache_miss else "False"
}
return self.video.student_view_data(context)
def verify_result_with_fallback_and_youtube(self, result):
"""
Verifies the result is as expected when returning "fallback" video data (not from VAL).
"""
self.assertDictEqual(
result,
{
"only_on_web": False,
"duration": None,
"transcripts": {self.TEST_LANGUAGE: self.transcript_url},
"encoded_videos": {
"fallback": {"url": self.TEST_SOURCE_URL, "file_size": 0},
"youtube": {"url": self.TEST_YOUTUBE_EXPECTED_URL, "file_size": 0}
},
"all_sources": [self.TEST_SOURCE_URL],
}
)
def verify_result_with_youtube_url(self, result):
"""
Verifies the result is as expected when returning "fallback" video data (not from VAL).
"""
self.assertDictEqual(
result,
{
"only_on_web": False,
"duration": None,
"transcripts": {self.TEST_LANGUAGE: self.transcript_url},
"encoded_videos": {"youtube": {"url": self.TEST_YOUTUBE_EXPECTED_URL, "file_size": 0}},
"all_sources": [],
}
)
def verify_result_with_val_profile(self, result):
"""
Verifies the result is as expected when returning video data from VAL.
"""
self.assertDictContainsSubset(
result.pop("encoded_videos")[self.TEST_PROFILE],
self.TEST_ENCODED_VIDEO,
)
self.assertDictEqual(
result,
{
"only_on_web": False,
"duration": self.TEST_DURATION,
"transcripts": {self.TEST_LANGUAGE: self.transcript_url},
'all_sources': [self.TEST_SOURCE_URL],
}
)
def test_only_on_web(self):
self.video.only_on_web = True
result = self.get_result()
self.assertDictEqual(result, {"only_on_web": True})
def test_no_edx_video_id(self):
result = self.get_result()
self.verify_result_with_fallback_and_youtube(result)
def test_no_edx_video_id_and_no_fallback(self):
video_declaration = f"<video display_name='Test Video' youtube_id_1_0=\'{self.TEST_YOUTUBE_ID}\'>"
# the video has no source listed, only a youtube link, so no fallback url will be provided
sample_xml = ''.join([
video_declaration,
"<transcript language='", self.TEST_LANGUAGE, "' src='german_translation.srt' /> ",
"</video>"
])
self.transcript_url = "transcript_url"
self.initialize_block(data=sample_xml)
self.video = self.item_descriptor
self.video.runtime.handler_url = Mock(return_value=self.transcript_url)
self.video.runtime.course_id = MagicMock()
result = self.get_result()
self.verify_result_with_youtube_url(result)
@ddt.data(True, False)
def test_with_edx_video_id_video_associated_in_val(self, allow_cache_miss):
"""
Tests retrieving a video that is stored in VAL and associated with a course in VAL.
"""
self.video.edx_video_id = self.TEST_EDX_VIDEO_ID
self.setup_val_video(associate_course_in_val=True)
# the video is associated in VAL so no cache miss should ever happen but test retrieval in both contexts
result = self.get_result(allow_cache_miss)
self.verify_result_with_val_profile(result)
@ddt.data(True, False)
def test_with_edx_video_id_video_unassociated_in_val(self, allow_cache_miss):
"""
Tests retrieving a video that is stored in VAL but not associated with a course in VAL.
"""
self.video.edx_video_id = self.TEST_EDX_VIDEO_ID
self.setup_val_video(associate_course_in_val=False)
result = self.get_result(allow_cache_miss)
if allow_cache_miss:
self.verify_result_with_val_profile(result)
else:
self.verify_result_with_fallback_and_youtube(result)
@ddt.data(True, False)
def test_with_edx_video_id_video_not_in_val(self, allow_cache_miss):
"""
Tests retrieving a video that is not stored in VAL.
"""
self.video.edx_video_id = self.TEST_EDX_VIDEO_ID
# The video is not in VAL so in contexts that do and don't allow cache misses we should always get a fallback
result = self.get_result(allow_cache_miss)
self.verify_result_with_fallback_and_youtube(result)
@ddt.data(
({}, '', [], ['en']),
({}, '', ['de'], ['de']),
({}, '', ['en', 'de'], ['en', 'de']),
({}, 'en-subs', ['de'], ['en', 'de']),
({'uk': 1}, 'en-subs', ['de'], ['en', 'uk', 'de']),
({'uk': 1, 'de': 1}, 'en-subs', ['de', 'en'], ['en', 'uk', 'de']),
)
@ddt.unpack
@patch('xmodule.video_module.transcripts_utils.edxval_api.get_available_transcript_languages')
def test_student_view_with_val_transcripts_enabled(self, transcripts, english_sub, val_transcripts,
expected_transcripts, mock_get_transcript_languages):
"""
Test `student_view_data` with edx-val transcripts enabled.
"""
mock_get_transcript_languages.return_value = val_transcripts
self.video.transcripts = transcripts
self.video.sub = english_sub
student_view_response = self.get_result()
self.assertCountEqual(list(student_view_response['transcripts'].keys()), expected_transcripts)
@ddt.ddt
class VideoBlockTest(TestCase, VideoBlockTestBase):
"""
Tests for video descriptor that requires access to django settings.
"""
def setUp(self):
super().setUp()
self.descriptor.runtime.handler_url = MagicMock()
self.descriptor.runtime.course_id = MagicMock()
self.temp_dir = mkdtemp()
file_system = OSFS(self.temp_dir)
self.file_system = file_system.makedir(EXPORT_IMPORT_COURSE_DIR, recreate=True)
self.addCleanup(shutil.rmtree, self.temp_dir)
def get_video_transcript_data(self, video_id, language_code='en', file_format='srt', provider='Custom'):
return dict(
video_id=video_id,
language_code=language_code,
provider=provider,
file_format=file_format,
)
def test_get_context(self):
""""
Test get_context.
This test is located here and not in xmodule.tests because get_context calls editable_metadata_fields.
Which, in turn, uses settings.LANGUAGES from django setttings.
"""
correct_tabs = [
{
'name': "Basic",
'template': "video/transcripts.html",
'current': True
},
{
'name': 'Advanced',
'template': 'tabs/metadata-edit-tab.html'
}
]
rendered_context = self.descriptor.get_context()
self.assertListEqual(rendered_context['tabs'], correct_tabs)
# Assert that the Video ID field is present in basic tab metadata context.
assert rendered_context['transcripts_basic_tab_metadata']['edx_video_id'] ==\
self.descriptor.editable_metadata_fields['edx_video_id']
def test_export_val_data_with_internal(self):
"""
Tests that exported VAL videos are working as expected.
"""
language_code = 'ar'
transcript_file_name = 'test_edx_video_id-ar.srt'
expected_transcript_path = combine(
combine(self.temp_dir, EXPORT_IMPORT_COURSE_DIR),
combine(EXPORT_IMPORT_STATIC_DIR, transcript_file_name)
)
self.descriptor.edx_video_id = 'test_edx_video_id'
create_profile('mobile')
create_video({
'edx_video_id': self.descriptor.edx_video_id,
'client_video_id': 'test_client_video_id',
'duration': 111.0,
'status': 'dummy',
'encoded_videos': [{
'profile': 'mobile',
'url': 'http://example.com/video',
'file_size': 222,
'bitrate': 333,
}],
})
create_or_update_video_transcript(
video_id=self.descriptor.edx_video_id,
language_code=language_code,
metadata={
'provider': 'Cielo24',
'file_format': 'srt'
},
file_data=ContentFile(TRANSCRIPT_FILE_SRT_DATA)
)
actual = self.descriptor.definition_to_xml(resource_fs=self.file_system)
expected_str = """
<video url_name="SampleProblem" transcripts='{transcripts}'>
<video_asset client_video_id="test_client_video_id" duration="111.0" image="">
<encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/>
<transcripts>
<transcript file_format="srt" language_code="{language_code}" provider="Cielo24"/>
</transcripts>
</video_asset>
<transcript language="{language_code}" src="{transcript_file}"/>
</video>
""".format(
language_code=language_code,
transcript_file=transcript_file_name,
transcripts=json.dumps({language_code: transcript_file_name})
)
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
# Verify transcript file is created.
assert [transcript_file_name] == self.file_system.listdir(EXPORT_IMPORT_STATIC_DIR)
# Also verify the content of created transcript file.
expected_transcript_content = File(open(expected_transcript_path)).read()
transcript = get_video_transcript_data(video_id=self.descriptor.edx_video_id, language_code=language_code)
assert transcript['content'].decode('utf-8') == expected_transcript_content
@ddt.data(
(['en', 'da'], 'test_sub', ''),
(['da'], 'test_sub', 'test_sub')
)
@ddt.unpack
def test_export_val_transcripts_backward_compatibility(self, languages, sub, expected_sub):
"""
Tests new transcripts export for backward compatibility.
"""
self.descriptor.edx_video_id = 'test_video_id'
self.descriptor.sub = sub
# Setup VAL encode profile, video and transcripts
create_profile('mobile')
create_video({
'edx_video_id': self.descriptor.edx_video_id,
'client_video_id': 'test_client_video_id',
'duration': 111.0,
'status': 'dummy',
'encoded_videos': [{
'profile': 'mobile',
'url': 'http://example.com/video',
'file_size': 222,
'bitrate': 333,
}],
})
for language in languages:
create_video_transcript(
video_id=self.descriptor.edx_video_id,
language_code=language,
file_format=Transcript.SRT,
content=ContentFile(TRANSCRIPT_FILE_SRT_DATA)
)
# Export the video module into xml
video_xml = self.descriptor.definition_to_xml(resource_fs=self.file_system)
# Assert `sub` and `transcripts` attribute in the xml
assert video_xml.get('sub') == expected_sub
expected_transcripts = {
language: "{edx_video_id}-{language}.srt".format(
edx_video_id=self.descriptor.edx_video_id,
language=language
)
for language in languages
}
self.assertDictEqual(json.loads(video_xml.get('transcripts')), expected_transcripts)
# Assert transcript content from course OLX
for language in languages:
expected_transcript_path = combine(
combine(self.temp_dir, EXPORT_IMPORT_COURSE_DIR),
combine(EXPORT_IMPORT_STATIC_DIR, expected_transcripts[language])
)
expected_transcript_content = File(open(expected_transcript_path)).read()
transcript = get_video_transcript_data(video_id=self.descriptor.edx_video_id, language_code=language)
assert transcript['content'].decode('utf-8') == expected_transcript_content
def test_export_val_data_not_found(self):
"""
Tests that external video export works as expected.
"""
self.descriptor.edx_video_id = 'nonexistent'
actual = self.descriptor.definition_to_xml(resource_fs=self.file_system)
expected_str = """<video url_name="SampleProblem"/>"""
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
@patch('xmodule.video_module.transcripts_utils.get_video_ids_info')
def test_export_no_video_ids(self, mock_get_video_ids_info):
"""
Tests export when there is no video id. `export_to_xml` only works in case of video id.
"""
mock_get_video_ids_info.return_value = True, []
actual = self.descriptor.definition_to_xml(resource_fs=self.file_system)
expected_str = '<video url_name="SampleProblem"></video>'
parser = etree.XMLParser(remove_blank_text=True)
expected = etree.XML(expected_str, parser=parser)
self.assertXmlEqual(expected, actual)
def test_import_val_data_internal(self):
"""
Test that import val data internal works as expected.
"""
create_profile('mobile')
module_system = DummySystem(load_error_modules=True)
edx_video_id = 'test_edx_video_id'
sub_id = '0CzPOIIdUsA'
external_transcript_name = 'The_Flash.srt'
external_transcript_language_code = 'ur'
val_transcript_language_code = 'ar'
val_transcript_provider = 'Cielo24'
external_transcripts = {
external_transcript_language_code: external_transcript_name
}
# Create static directory in import file system and place transcript files inside it.
module_system.resources_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True)
# Create VAL transcript.
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
'test_edx_video_id-ar.srt',
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
# Create self.sub and self.transcripts transcript.
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
subs_filename(sub_id, self.descriptor.transcript_language),
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
external_transcript_name,
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
xml_data = """
<video edx_video_id='{edx_video_id}' sub='{sub_id}' transcripts='{transcripts}'>
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/>
<transcripts>
<transcript file_format="srt" language_code="{val_transcript_language_code}" provider="{val_transcript_provider}"/>
</transcripts>
</video_asset>
</video>
""".format(
edx_video_id=edx_video_id,
sub_id=sub_id,
transcripts=json.dumps(external_transcripts),
val_transcript_language_code=val_transcript_language_code,
val_transcript_provider=val_transcript_provider
)
id_generator = Mock()
id_generator.target_course_id = "test_course_id"
video = self.descriptor.from_xml(xml_data, module_system, id_generator)
assert video.edx_video_id == 'test_edx_video_id'
video_data = get_video_info(video.edx_video_id)
assert video_data['client_video_id'] == 'test_client_video_id'
assert video_data['duration'] == 111.0
assert video_data['status'] == 'imported'
assert video_data['courses'] == [{id_generator.target_course_id: None}]
assert video_data['encoded_videos'][0]['profile'] == 'mobile'
assert video_data['encoded_videos'][0]['url'] == 'http://example.com/video'
assert video_data['encoded_videos'][0]['file_size'] == 222
assert video_data['encoded_videos'][0]['bitrate'] == 333
# Verify that VAL transcript is imported.
self.assertDictContainsSubset(
self.get_video_transcript_data(
edx_video_id,
language_code=val_transcript_language_code,
provider=val_transcript_provider
),
get_video_transcript(video.edx_video_id, val_transcript_language_code)
)
# Verify that transcript from sub field is imported.
self.assertDictContainsSubset(
self.get_video_transcript_data(
edx_video_id,
language_code=self.descriptor.transcript_language
),
get_video_transcript(video.edx_video_id, self.descriptor.transcript_language)
)
# Verify that transcript from transcript field is imported.
self.assertDictContainsSubset(
self.get_video_transcript_data(
edx_video_id,
language_code=external_transcript_language_code
),
get_video_transcript(video.edx_video_id, external_transcript_language_code)
)
def test_import_no_video_id(self):
"""
Test that importing a video with no video id, creates a new external video.
"""
xml_data = """<video><video_asset></video_asset></video>"""
module_system = DummySystem(load_error_modules=True)
id_generator = Mock()
# Verify edx_video_id is empty before.
assert self.descriptor.edx_video_id == ''
video = self.descriptor.from_xml(xml_data, module_system, id_generator)
# Verify edx_video_id is populated after the import.
assert video.edx_video_id != ''
video_data = get_video_info(video.edx_video_id)
assert video_data['client_video_id'] == 'External Video'
assert video_data['duration'] == 0.0
assert video_data['status'] == 'external'
def test_import_val_transcript(self):
"""
Test that importing a video with val transcript, creates a new transcript record.
"""
edx_video_id = 'test_edx_video_id'
val_transcript_language_code = 'es'
val_transcript_provider = 'Cielo24'
xml_data = """
<video edx_video_id='{edx_video_id}'>
<video_asset client_video_id="test_client_video_id" duration="111.0">
<transcripts>
<transcript file_format="srt" language_code="{val_transcript_language_code}" provider="{val_transcript_provider}"/>
</transcripts>
</video_asset>
</video>
""".format(
edx_video_id=edx_video_id,
val_transcript_language_code=val_transcript_language_code,
val_transcript_provider=val_transcript_provider
)
module_system = DummySystem(load_error_modules=True)
id_generator = Mock()
# Create static directory in import file system and place transcript files inside it.
module_system.resources_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True)
# Create VAL transcript.
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
'test_edx_video_id-es.srt',
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
# Verify edx_video_id is empty before.
assert self.descriptor.edx_video_id == ''
video = self.descriptor.from_xml(xml_data, module_system, id_generator)
# Verify edx_video_id is populated after the import.
assert video.edx_video_id != ''
video_data = get_video_info(video.edx_video_id)
assert video_data['status'] == 'external'
# Verify that VAL transcript is imported.
self.assertDictContainsSubset(
self.get_video_transcript_data(
edx_video_id,
language_code=val_transcript_language_code,
provider=val_transcript_provider
),
get_video_transcript(video.edx_video_id, val_transcript_language_code)
)
@ddt.data(
(
'test_sub_id',
{'en': 'The_Flash.srt'},
'<transcripts><transcript file_format="srt" language_code="en" provider="Cielo24"/></transcripts>',
# VAL transcript takes priority
{
'video_id': 'test_edx_video_id',
'language_code': 'en',
'file_format': 'srt',
'provider': 'Cielo24'
}
),
(
'',
{'en': 'The_Flash.srt'},
'<transcripts><transcript file_format="srt" language_code="en" provider="Cielo24"/></transcripts>',
# VAL transcript takes priority
{
'video_id': 'test_edx_video_id',
'language_code': 'en',
'file_format': 'srt',
'provider': 'Cielo24'
}
),
(
'test_sub_id',
{},
'<transcripts><transcript file_format="srt" language_code="en" provider="Cielo24"/></transcripts>',
# VAL transcript takes priority
{
'video_id': 'test_edx_video_id',
'language_code': 'en',
'file_format': 'srt',
'provider': 'Cielo24'
}
),
(
'test_sub_id',
{'en': 'The_Flash.srt'},
'',
# self.sub transcript takes priority
{
'video_id': 'test_edx_video_id',
'language_code': 'en',
'file_format': 'sjson',
'provider': 'Custom'
}
),
(
'',
{'en': 'The_Flash.srt'},
'',
# self.transcripts would be saved.
{
'video_id': 'test_edx_video_id',
'language_code': 'en',
'file_format': 'srt',
'provider': 'Custom'
}
)
)
@ddt.unpack
def test_import_val_transcript_priority(self, sub_id, external_transcripts, val_transcripts, expected_transcript):
"""
Test that importing a video with different type of transcripts for same language,
creates expected transcript record.
"""
edx_video_id = 'test_edx_video_id'
language_code = 'en'
module_system = DummySystem(load_error_modules=True)
id_generator = Mock()
# Create static directory in import file system and place transcript files inside it.
module_system.resources_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True)
xml_data = "<video edx_video_id='test_edx_video_id'"
# Prepare self.sub transcript data.
if sub_id:
create_file_in_fs(
TRANSCRIPT_FILE_SJSON_DATA,
subs_filename(sub_id, language_code),
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
xml_data += " sub='{sub_id}'".format(
sub_id=sub_id
)
# Prepare self.transcripts transcripts data.
if external_transcripts:
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
external_transcripts['en'],
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
xml_data += " transcripts='{transcripts}'".format(
transcripts=json.dumps(external_transcripts),
)
xml_data += '><video_asset client_video_id="test_client_video_id" duration="111.0">'
# Prepare VAL transcripts data.
if val_transcripts:
create_file_in_fs(
TRANSCRIPT_FILE_SRT_DATA,
'{edx_video_id}-{language_code}.srt'.format(
edx_video_id=edx_video_id,
language_code=language_code
),
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR
)
xml_data += val_transcripts
xml_data += '</video_asset></video>'
# Verify edx_video_id is empty before import.
assert self.descriptor.edx_video_id == ''
video = self.descriptor.from_xml(xml_data, module_system, id_generator)
# Verify edx_video_id is not empty after import.
assert video.edx_video_id != ''
video_data = get_video_info(video.edx_video_id)
assert video_data['status'] == 'external'
# Verify that correct transcripts are imported.
self.assertDictContainsSubset(
expected_transcript,
get_video_transcript(video.edx_video_id, language_code)
)
def test_import_val_data_invalid(self):
create_profile('mobile')
module_system = DummySystem(load_error_modules=True)
# Negative file_size is invalid
xml_data = """
<video edx_video_id="test_edx_video_id">
<video_asset client_video_id="test_client_video_id" duration="111.0">
<encoded_video profile="mobile" url="http://example.com/video" file_size="-222" bitrate="333"/>
</video_asset>
</video>
"""
with pytest.raises(ValCannotCreateError):
VideoBlock.from_xml(xml_data, module_system, id_generator=Mock())
with pytest.raises(ValVideoNotFoundError):
get_video_info("test_edx_video_id")
class TestVideoWithBumper(TestVideo): # pylint: disable=test-inherits-tests
"""
Tests rendered content in presence of video bumper.
"""
CATEGORY = "video"
METADATA = {}
# Use temporary FEATURES in this test without affecting the original
FEATURES = dict(settings.FEATURES)
@patch('xmodule.video_module.bumper_utils.get_bumper_settings')
def test_is_bumper_enabled(self, get_bumper_settings):
"""
Check that bumper is (not)shown if ENABLE_VIDEO_BUMPER is (False)True
Assume that bumper settings are correct.
"""
self.FEATURES.update({
"SHOW_BUMPER_PERIODICITY": 1,
"ENABLE_VIDEO_BUMPER": True,
})
get_bumper_settings.return_value = {
"video_id": "edx_video_id",
"transcripts": {},
}
with override_settings(FEATURES=self.FEATURES):
assert bumper_utils.is_bumper_enabled(self.item_descriptor)
self.FEATURES.update({"ENABLE_VIDEO_BUMPER": False})
with override_settings(FEATURES=self.FEATURES):
assert not bumper_utils.is_bumper_enabled(self.item_descriptor)
@patch('xmodule.video_module.bumper_utils.is_bumper_enabled')
@patch('xmodule.video_module.bumper_utils.get_bumper_settings')
@patch('edxval.api.get_urls_for_profiles')
def test_bumper_metadata(self, get_url_for_profiles, get_bumper_settings, is_bumper_enabled):
"""
Test content with rendered bumper metadata.
"""
get_url_for_profiles.return_value = {
'desktop_mp4': 'http://test_bumper.mp4',
'desktop_webm': '',
}
get_bumper_settings.return_value = {
'video_id': 'edx_video_id',
'transcripts': {},
}
is_bumper_enabled.return_value = True
content = self.item_descriptor.render(STUDENT_VIEW).content
sources = ['example.mp4', 'example.webm']
expected_context = {
'autoadvance_enabled': False,
'branding_info': None,
'license': None,
'bumper_metadata': json.dumps(OrderedDict({
'saveStateUrl': self.item_descriptor.ajax_url + '/save_user_state',
'showCaptions': 'true',
'sources': ['http://test_bumper.mp4'],
'streams': '',
'transcriptLanguage': 'en',
'transcriptLanguages': {'en': 'English'},
'transcriptTranslationUrl': video_utils.set_query_parameter(
self.get_handler_url('transcript', 'translation/__lang__'), 'is_bumper', 1
),
'transcriptAvailableTranslationsUrl': video_utils.set_query_parameter(
self.get_handler_url('transcript', 'available_translations'), 'is_bumper', 1
),
"publishCompletionUrl": video_utils.set_query_parameter(
self.get_handler_url('publish_completion', ''), 'is_bumper', 1
),
})),
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': 'A Name',
'download_video_link': 'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'metadata': json.dumps(OrderedDict({
'autoAdvance': False,
'saveStateEnabled': True,
'saveStateUrl': self.item_descriptor.ajax_url + '/save_user_state',
'autoplay': False,
'streams': '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg',
'sources': sources,
'poster': None,
'duration': None,
'captionDataDir': None,
'showCaptions': 'true',
'generalSpeed': 1.0,
'speed': None,
'savedVideoPosition': 0.0,
'start': 3603.0,
'end': 3610.0,
'transcriptLanguage': 'en',
'transcriptLanguages': OrderedDict({'en': 'English', 'uk': 'Українська'}),
'ytMetadataEndpoint': '',
'ytTestTimeout': 1500,
'ytApiUrl': 'https://www.youtube.com/iframe_api',
'lmsRootURL': settings.LMS_ROOT_URL,
'transcriptTranslationUrl': self.get_handler_url('transcript', 'translation/__lang__'),
'transcriptAvailableTranslationsUrl': self.get_handler_url('transcript', 'available_translations'),
'autohideHtml5': False,
'recordedYoutubeIsAvailable': True,
'completionEnabled': False,
'completionPercentage': 0.95,
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'prioritizeHls': False,
})),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': json.dumps(OrderedDict({
'url': 'http://img.youtube.com/vi/ZwkTiUPN0mg/0.jpg',
'type': 'youtube'
}))
}
expected_content = self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
assert get_context_dict_from_string(content) == get_context_dict_from_string(expected_content)
@ddt.ddt
class TestAutoAdvanceVideo(TestVideo): # lint-amnesty, pylint: disable=test-inherits-tests
"""
Tests the server side of video auto-advance.
"""
maxDiff = None
CATEGORY = "video"
METADATA = {}
# Use temporary FEATURES in this test without affecting the original
FEATURES = dict(settings.FEATURES)
def prepare_expected_context(self, autoadvanceenabled_flag, autoadvance_flag):
"""
Build a dictionary with data expected by some operations in this test.
Only parameters related to auto-advance are variable, rest is fixed.
"""
context = {
'autoadvance_enabled': autoadvanceenabled_flag,
'branding_info': None,
'license': None,
'cdn_eval': False,
'cdn_exp_group': None,
'display_name': 'A Name',
'download_video_link': 'example.mp4',
'handout': None,
'id': self.item_descriptor.location.html_id(),
'bumper_metadata': 'null',
'metadata': json.dumps(OrderedDict({
'autoAdvance': autoadvance_flag,
'saveStateEnabled': True,
'saveStateUrl': self.item_descriptor.ajax_url + '/save_user_state',
'autoplay': False,
'streams': '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg',
'sources': ['example.mp4', 'example.webm'],
'duration': None,
'poster': None,
'captionDataDir': None,
'showCaptions': 'true',
'generalSpeed': 1.0,
'speed': None,
'savedVideoPosition': 0.0,
'start': 3603.0,
'end': 3610.0,
'transcriptLanguage': 'en',
'transcriptLanguages': OrderedDict({'en': 'English', 'uk': 'Українська'}),
'ytMetadataEndpoint': '',
'ytTestTimeout': 1500,
'ytApiUrl': 'https://www.youtube.com/iframe_api',
'lmsRootURL': settings.LMS_ROOT_URL,
'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'translation/__lang__'
).rstrip('/?'),
'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url(
self.item_descriptor, 'transcript', 'available_translations'
).rstrip('/?'),
'autohideHtml5': False,
'recordedYoutubeIsAvailable': True,
'completionEnabled': False,
'completionPercentage': 0.95,
'publishCompletionUrl': self.get_handler_url('publish_completion', ''),
'prioritizeHls': False,
})),
'track': None,
'transcript_download_format': 'srt',
'transcript_download_formats_list': [
{'display_name': 'SubRip (.srt) file', 'value': 'srt'},
{'display_name': 'Text (.txt) file', 'value': 'txt'}
],
'poster': 'null'
}
return context
def assert_content_matches_expectations(self, autoadvanceenabled_must_be, autoadvance_must_be):
"""
Check (assert) that loading video.html produces content that corresponds
to the passed context.
Helper function to avoid code repetition.
"""
with override_settings(FEATURES=self.FEATURES):
content = self.item_descriptor.render(STUDENT_VIEW).content
expected_context = self.prepare_expected_context(
autoadvanceenabled_flag=autoadvanceenabled_must_be,
autoadvance_flag=autoadvance_must_be,
)
with override_settings(FEATURES=self.FEATURES):
expected_content = self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context)
assert get_context_dict_from_string(content) == get_context_dict_from_string(expected_content)
def change_course_setting_autoadvance(self, new_value):
"""
Change the .video_auto_advance course setting (a.k.a. advanced setting).
This avoids doing .save(), and instead modifies the instance directly.
Based on test code for video_bumper setting.
"""
# This first render is done to initialize the instance
self.item_descriptor.render(STUDENT_VIEW)
self.item_descriptor.video_auto_advance = new_value
self.item_descriptor._reset_dirty_field(self.item_descriptor.fields['video_auto_advance']) # pylint: disable=protected-access
# After this step, render() should see the new value
# e.g. use self.item_descriptor.render(STUDENT_VIEW).content
@ddt.data(
(False, False),
(False, True),
(True, False),
(True, True),
)
@ddt.unpack
def test_is_autoadvance_available_and_enabled(self, global_setting, course_setting):
"""
Check that the autoadvance is not available when it is disabled via feature flag
(ENABLE_AUTOADVANCE_VIDEOS set to False) or by the course setting.
It checks that:
- only when the feature flag and the course setting are True (at the same time)
the controls are visible
- in that case (when the controls are visible) the video will autoadvance
(because that's the default), in other cases it won't
"""
self.FEATURES.update({"ENABLE_AUTOADVANCE_VIDEOS": global_setting})
self.change_course_setting_autoadvance(new_value=course_setting)
self.assert_content_matches_expectations(
autoadvanceenabled_must_be=(global_setting and course_setting),
autoadvance_must_be=(global_setting and course_setting),
)
|
sgoodm/python-distance-rasters
|
refs/heads/master
|
examples/cdist_example.py
|
1
|
import os
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
from distancerasters import build_distance_array, rasterize, export_raster
from distancerasters import distance
from distancerasters.utils import convert_index_to_coords, calc_haversine_distance
import fiona
import numpy as np
import math
# -------------------------------------
shp_path = "{0}/data/line_test.shp".format(base)
out_name = "line_test"
# -------------------------------------
rasterized_feature_output_path = "{0}/data/{1}_binary.tif".format(base, out_name)
output_raster_path = "{0}/data/{1}_distance.tif".format(base, out_name)
# 0.1 is probably too coarse for quality
# 0.001 might be more than we need for quality
# test with central america rivers @ 30s res
# run time for rasterization was reasonable
# distance processes may be too slow at this fine scale though
# testing with 0.01 for now
pixel_size = 0.01
shp = fiona.open(shp_path, "r")
rv_array, affine = rasterize(shp, pixel_size=pixel_size, bounds=shp.bounds,
output=rasterized_feature_output_path)
# rv_array = fake_rasterize()
# max distance in cells
# for actual distance: max_dist * pixel_size
max_dist = 100
nrows, ncols = rv_array.shape
# print rv_array
print rv_array.shape
print nrows * ncols
# raise
z = np.empty(rv_array.shape, dtype=float)
# -----------------------------------------------------------------------------
import time
t_start = time.time()
# row_dur = 0
# row_count = 0
t1 = 0
t1c = 0
t111 = 0
t111c = 0
t2 = 0
t2c = 0
t22 = 0
t22c = 0
t3 = 0
t3c = 0
for r in range(nrows):
# for r in range(1000, 1100):
if r == 0 or r+1 % 50 == 0:
trow_start = time.time()
for c in range(ncols):
# for c in range(1000, 1100):
cur_index = (r, c)
# print "Current index (r, c): {0}".format(cur_index)
# print "Current coords (lon, lat): {0}".format(
# convert_index_to_coords(cur_index, affine))
t1s = time.time()
rmin = r - max_dist if r >= max_dist else 0
rmax = r + max_dist if r <= nrows - max_dist else nrows
cmin = c - max_dist if c >= max_dist else 0
cmax = c + max_dist if c <= ncols - max_dist else ncols
sub_raster = rv_array[rmin:rmax, cmin:cmax]
t1 += time.time() - t1s
t1c += 1
# print "\trmin: {0}, rmax: {1}, cmin: {2}, cmax: {3}, ".format(
# rmin, rmax, cmin, cmax)
# print sub_raster
t111s = time.time()
line_indexes = np.array(np.nonzero(sub_raster)).T
t111 += time.time() - t111s
t111c += 1
# print len(line_indexes)
# print line_indexes
if len(line_indexes) == 0:
# print "\tOut of range"
z[r][c] = -1
continue
t2s = time.time()
# convert current index to sub index range
sub_cur_index = (cur_index[0]-rmin, cur_index[1]-cmin)
dist_list = distance.cdist([sub_cur_index], line_indexes)[0]
min_dist = min(dist_list)
t2 += time.time() - t2s
t2c += 1
t22s = time.time()
dist_list_index = np.where(dist_list == min_dist)
# # handle multiple min matches
# for i in dist_list_index:
# print line_indexes[i]
# just take first if there are multiple min matches
sub_min_index = line_indexes[dist_list_index[0][0]]
# convert min_index from sub_raster to
# main raster index
min_index = (sub_min_index[0] + rmin,
sub_min_index[1] + cmin)
t22 += time.time() - t22s
t22c += 1
# print "\tsub_cur_index: {0}".format(sub_cur_index)
# print "\tMin coords (lon, lat): {0}".format(
# convert_index_to_coords(min_index, affine))
# print "\tsub_min_index: {0}".format(sub_min_index)
# print dist_list
# print "\tmin_dist: {0}".format(min_dist)
# print dist_list_index
# print "\tmin_index: {0}".format(min_index)
t3s = time.time()
if cur_index[1] == min_index[1]:
# columns are different meaning nearest is
# either vertical or self.
# no correction needed,
# just convert to meters
dd_min_dist = min_dist * pixel_size
m_min_dist = dd_min_dist * 111.321 * 10**3
# print "\tdd_min_dist: {0}".format(dd_min_dist)
# print "\tm_min_dist: {0}".format(m_min_dist)
val = m_min_dist
else:
val = calc_haversine_distance(
convert_index_to_coords(cur_index, affine),
convert_index_to_coords(min_index, affine)
) * 1000
t3 += time.time() - t3s
t3c += 1
# print "\tval: {0}".format(val)
z[r][c] = val
# raise
if r+1 % 50 == 0:
print "Row {0}-{1}/{2} ran in {3} seconds".format(r+1-50, r+1, nrows, time.time() - trow_start)
# row_dur += time.time() - trow_start
# row_count += 1
# if r == 200:
# print "Run time: {0} seconds for {1} rows ({2}s avg)".format(row_dur, row_count, row_dur/row_count)
# print "t1 total: {0}, count: {1}, avg: {2}".format(t1, t1c, t1/t1c)
# print "t11 total: {0}, count: {1}, avg: {2}".format(t11, t11c, t11/t11c)
# print "t111 total: {0}, count: {1}, avg: {2}".format(t111, t111c, t111/t111c)
# print "t2 total: {0}, count: {1}, avg: {2}".format(t2, t2c, t2/t2c)
# print "t22 total: {0}, count: {1}, avg: {2}".format(t22, t22c, t22/t22c)
# print "t3 total: {0}, count: {1}, avg: {2}".format(t3, t3c, t3/t3c)
# raise
# print rv_array
# print z
print rv_array.shape
print nrows * ncols
print "t1 total: {0}, count: {1}, avg: {2}".format(t1, t1c, t1/t1c)
print "t111 total: {0}, count: {1}, avg: {2}".format(t111, t111c, t111/t111c)
print "t2 total: {0}, count: {1}, avg: {2}".format(t2, t2c, t2/t2c)
print "t22 total: {0}, count: {1}, avg: {2}".format(t22, t22c, t22/t22c)
print "t3 total: {0}, count: {1}, avg: {2}".format(t3, t3c, t3/t3c)
dur = time.time() - t_start
print "Run time: {0} seconds".format(round(dur, 2))
export_raster(z, affine, output_raster_path)
|
kzganesan/you-get
|
refs/heads/develop
|
src/you_get/extractor/soundcloud.py
|
6
|
#!/usr/bin/env python
__all__ = ['soundcloud_download', 'soundcloud_download_by_id']
from ..common import *
def soundcloud_download_by_id(id, title = None, output_dir = '.', merge = True, info_only = False):
assert title
#if info["downloadable"]:
# url = 'https://api.soundcloud.com/tracks/' + id + '/download?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
url = 'https://api.soundcloud.com/tracks/' + id + '/stream?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
assert url
type, ext, size = url_info(url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge = merge)
def soundcloud_download(url, output_dir = '.', merge = True, info_only = False):
metadata = get_html('https://api.sndcdn.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28')
import json
info = json.loads(metadata)
title = info["title"]
id = str(info["id"])
soundcloud_download_by_id(id, title, output_dir, merge = merge, info_only = info_only)
site_info = "SoundCloud.com"
download = soundcloud_download
download_playlist = playlist_not_supported('soundcloud')
|
spaceone/pyjs
|
refs/heads/master
|
examples/showcase/src/demos_widgets/namedFrame.py
|
12
|
"""
The ``ui.NamedFrame`` class is a variation of the ``ui.Frame`` which lets you
assign a name to the frame. Naming a frame allows you to refer to that frame
by name in Javascript code, and as the target for a hyperlink.
"""
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.NamedFrame import NamedFrame
from pyjamas.ui.HTML import HTML
class NamedFrameDemo(SimplePanel):
def __init__(self):
SimplePanel.__init__(self)
vPanel = VerticalPanel(Spacing=5)
frame = NamedFrame("myFrame",
Width="100%",
Height="200px")
vPanel.add(frame)
vPanel.add(HTML('<a href="http://google.com" target="myFrame">Google</a>'))
vPanel.add(HTML('<a href="http://yahoo.com" target="myFrame">Yahoo</a>'))
vPanel.add(HTML('<a href="http://pyjs.org" target="myFrame">Pyjamas</a>'))
self.add(vPanel)
|
willingc/oh-mainline
|
refs/heads/master
|
vendor/packages/sphinx/sphinx/builders/manpage.py
|
16
|
# -*- coding: utf-8 -*-
"""
sphinx.builders.manpage
~~~~~~~~~~~~~~~~~~~~~~~
Manual pages builder.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from os import path
from docutils.io import FileOutput
from docutils.frontend import OptionParser
from sphinx import addnodes
from sphinx.errors import SphinxError
from sphinx.builders import Builder
from sphinx.environment import NoUri
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.console import bold, darkgreen
from sphinx.writers.manpage import ManualPageWriter, has_manpage_writer
class ManualPageBuilder(Builder):
"""
Builds groff output in manual page format.
"""
name = 'man'
format = 'man'
supported_image_types = []
def init(self):
if not has_manpage_writer:
raise SphinxError('The docutils manual page writer can\'t be '
'found; it is only available as of docutils 0.6.')
if not self.config.man_pages:
self.warn('no "man_pages" config value found; no manual pages '
'will be written')
def get_outdated_docs(self):
return 'all manpages' # for now
def get_target_uri(self, docname, typ=None):
if typ == 'token':
return ''
raise NoUri
def write(self, *ignored):
docwriter = ManualPageWriter(self)
docsettings = OptionParser(
defaults=self.env.settings,
components=(docwriter,),
read_config_files=True).get_default_values()
self.info(bold('writing... '), nonl=True)
for info in self.config.man_pages:
docname, name, description, authors, section = info
if isinstance(authors, basestring):
if authors:
authors = [authors]
else:
authors = []
targetname = '%s.%s' % (name, section)
self.info(darkgreen(targetname) + ' { ', nonl=True)
destination = FileOutput(
destination_path=path.join(self.outdir, targetname),
encoding='utf-8')
tree = self.env.get_doctree(docname)
docnames = set()
largetree = inline_all_toctrees(self, docnames, docname, tree,
darkgreen)
self.info('} ', nonl=True)
self.env.resolve_references(largetree, docname, self)
# remove pending_xref nodes
for pendingnode in largetree.traverse(addnodes.pending_xref):
pendingnode.replace_self(pendingnode.children)
largetree.settings = docsettings
largetree.settings.title = name
largetree.settings.subtitle = description
largetree.settings.authors = authors
largetree.settings.section = section
docwriter.write(largetree, destination)
self.info()
def finish(self):
pass
|
dbarbier/ot-svn
|
refs/heads/master
|
python/test/t_Waarts_concave.py
|
2
|
#! /usr/bin/env python
from openturns import *
#from math import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
#
# Physical model
#
EtatLimite = NumericalMathFunction(
['X1', 'X2'], ['G'], ["-0.5*(X1-X2)*(X1-X2) - (X1+X2)/(sqrt(2)) + 3"])
dim = EtatLimite.getInputDimension()
print dim
#
# probabilistic model
#
mean = [0.] * dim
sigma = [1.] * dim
R = IdentityMatrix(dim)
myDistribution = Normal(mean, sigma, R)
start = myDistribution.getMean()
Covariance = myDistribution.getCovariance()
#
# limit state
#
vect = RandomVector(myDistribution)
output = RandomVector(EtatLimite, vect)
myEvent = Event(output, Less(), 0.0)
#
# Calculation
#
#
# FORM/SORM Cobyla
myCobyla = Cobyla()
myCobyla.setSpecificParameters(CobylaSpecificParameters())
myCobyla.setMaximumIterationsNumber(100)
myCobyla.setMaximumAbsoluteError(1.0e-10)
myCobyla.setMaximumRelativeError(1.0e-10)
myCobyla.setMaximumResidualError(1.0e-10)
myCobyla.setMaximumConstraintError(1.0e-10)
myAlgoC = FORM(myCobyla, myEvent, start)
myAlgoC2 = SORM(myCobyla, myEvent, start)
myAlgoC.run()
myAlgoC2.run()
resultC = FORMResult(myAlgoC.getResult())
resultC2 = SORMResult(myAlgoC2.getResult())
#
# FORM/SORM Abdo Rackwitz
myAbdoRackwitz = AbdoRackwitz()
myAbdoRackwitz.setSpecificParameters(AbdoRackwitzSpecificParameters())
myAbdoRackwitz.setMaximumIterationsNumber(100)
myAbdoRackwitz.setMaximumAbsoluteError(1.0e-10)
myAbdoRackwitz.setMaximumRelativeError(1.0e-10)
myAbdoRackwitz.setMaximumResidualError(1.0e-10)
myAbdoRackwitz.setMaximumConstraintError(1.0e-10)
myAlgoAR = FORM(myAbdoRackwitz, myEvent, start)
myAlgoAR2 = SORM(myAbdoRackwitz, myEvent, start)
myAlgoAR.run()
# myAlgoAR2.run()
resultAR = FORMResult(myAlgoAR.getResult())
#resultAR2 = SORMResult(myAlgoAR2.getResult())
#
# Monte Carlo
CoV_MC = 0.5
myMC = MonteCarlo(myEvent)
myMC.setMaximumOuterSampling(100000)
myMC.setBlockSize(1)
myMC.setMaximumCoefficientOfVariation(CoV_MC)
myMC.run()
#
# LHS
CoV_LHS = 0.1
myLHS = LHS(myEvent)
myLHS.setMaximumOuterSampling(100000)
myLHS.setBlockSize(1)
myLHS.setMaximumCoefficientOfVariation(CoV_LHS)
myLHS.run()
#
# Outputs
#
print ""
print ""
print "************************************************************************************************"
print "***************************************** FORM COBYLA *****************************************"
print "************************************************************************************************"
print "event probability = %.5e" % PfC
print "generalized reliability index = %.5f" % Beta_generalizedC
print "************************************************************************************************"
for i in range(u_starC.getDimension()):
print "standard space design point = %.5f" % u_starC[i]
print "************************************************************************************************"
for i in range(x_starC.getDimension()):
print "physical space design point = %.5f" % x_starC[i]
print "************************************************************************************************"
print "is standard point origin in failure space? ", PtC
print "************************************************************************************************"
for i in range(gammaC.getDimension()):
print "importance factors = %.5f" % gammaC[i]
print "************************************************************************************************"
print "Hasofer reliability index = %.5f" % beta_hasoferC
print "************************************************************************************************"
for i in range(SensitivityC.getSize()):
for j in range(SensitivityC[i].getDimension()):
print "Pf sensitivity = %.5f" % SensitivityC[i][j]
print "************************************************************************************************"
print ""
print "************************************************************************************************"
print "************************************** FORM ABDO RACKWITZ **************************************"
print "************************************************************************************************"
print "event probability = %.5e" % PfAR
print "generalized reliability index = %.5f" % Beta_generalizedAR
print "************************************************************************************************"
for i in range(u_starAR.getDimension()):
print "standard space design point = %.5f" % u_starAR[i]
print "************************************************************************************************"
for i in range(x_starAR.getDimension()):
print "physical space design point = %.5f" % x_starAR[i]
print "************************************************************************************************"
print "is standard point origin in failure space? ", PtAR
print "************************************************************************************************"
for i in range(gammaAR.getDimension()):
print "importance factors = %.5f" % gammaAR[i]
print "************************************************************************************************"
print "Hasofer reliability index = %.5f" % beta_hasoferAR
print "************************************************************************************************"
for i in range(SensitivityAR.getSize()):
for j in range(SensitivityAR[i].getDimension()):
print "Pf sensitivity = %.5f" % SensitivityAR[i][j]
print "************************************************************************************************"
print ""
print "************************************************************************************************"
print "***************************************** SORM COBYLA *****************************************"
print "************************************************************************************************"
print "Breitung event probability = %.5e" % PFBreitC2
print "Breitung generalized reliability index = %.5f" % BetaBreitC2
print "HohenBichler event probability = %.5e" % PFHBC2
print "HohenBichler generalized reliability index = %.5f" % BetaHBC2
print "Tvedt event probability = %.5e" % PFTvedtC2
print "Tvedt generalized reliability index = %.5f" % BetaTvedtC2
print "************************************************************************************************"
for i in range(CurvC2.getDimension()):
print "sorted curvatures = %.5f" % cleanScalar(CurvC2[i])
print "************************************************************************************************"
for i in range(u_starC2.getDimension()):
print "standard space design point = %.5f" % u_starC2[i]
print "************************************************************************************************"
for i in range(x_starC2.getDimension()):
print "physical space design point = %.5f" % x_starC2[i]
print "************************************************************************************************"
print "************************************************************************************************"
print "is standard point origin in failure space? ", PtC2
print "************************************************************************************************"
for i in range(gammaC2.getDimension()):
print "importance factors = %.5f" % gammaC2[i]
print "************************************************************************************************"
print "Hasofer reliability index = %.5f" % beta_hasoferC2
print "************************************************************************************************"
print ""
print "************************************************************************************************"
print "************************************** SORM ABDO RACKWITZ **************************************"
print "************************************************************************************************"
print "Breitung event probability = %.5e" % PFBreitAR2
print "Breitung generalized reliability index = %.5f" % BetaBreitAR2
print "HohenBichler event probability = %.5e" % PFHBAR2
print "HohenBichler generalized reliability index = %.5f" % BetaHBAR2
print "Tvedt event probability = %.5e" % PFTvedtAR2
print "Tvedt generalized reliability index = %.5f" % BetaTvedtAR2
print "************************************************************************************************"
for i in range(CurvAR2.getDimension()):
print "sorted curvatures = %.5f" % cleanScalar(CurvAR2[i])
print "************************************************************************************************"
for i in range(u_starAR2.getDimension()):
print "standard space design point = %.5f" % u_starAR2[i]
print "************************************************************************************************"
for i in range(x_starAR2.getDimension()):
print "physical space design point = %.5f" % x_starAR2[i]
print "************************************************************************************************"
print "************************************************************************************************"
print "is standard point origin in failure space? ", PtAR2
print "************************************************************************************************"
for i in range(gammaAR2.getDimension()):
print "importance factors = %.5f" % gammaAR2[i]
print "************************************************************************************************"
print "Hasofer reliability index = %.5f" % beta_hasoferAR2
print "************************************************************************************************"
print ""
print "************************************************************************************************"
print "**************************************** MONTE CARLO *******************************************"
print "************************************************************************************************"
print "Pf estimation = %.5e" % PFMC
print "Pf Variance estimation = %.5e" % Variance_PF_MC
print "CoV = %.5f" % CVMC
print "90% Confidence Interval =", "%.5e" % length90MC
print "CI at 90% =[", "%.5e" % (PFMC - 0.5 * length90MC), "; %.5e" % (PFMC + 0.5 * length90MC), "]"
print "************************************************************************************************"
print ""
print "************************************************************************************************"
print "******************************************* L H S **********************************************"
print "************************************************************************************************"
print "Pf estimation = %.5e" % PFLHS
print "Pf Variance estimation = %.5e" % Variance_PF_LHS
print "CoV = %.5f" % CVLHS
print "90% Confidence Interval =", "%.5e" % length90LHS
print "CI at 90% =[", "%.5e" % (PFLHS - 0.5 * length90LHS), "; %.5e" % (PFLHS + 0.5 * length90LHS), "]"
print "************************************************************************************************"
print ""
except:
import sys
print "t_Waarts_concave.py", sys.exc_type, sys.exc_value
|
richard-willowit/odoo
|
refs/heads/master
|
addons/website_rating/controllers/__init__.py
|
29
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import website_mail
|
amw2104/fireplace
|
refs/heads/master
|
fireplace/cards/tgt/druid.py
|
1
|
from ..utils import *
##
# Minions
# Darnassus Aspirant
class AT_038:
play = GainEmptyMana(CONTROLLER, 1)
deathrattle = GainMana(CONTROLLER, -1)
# Savage Combatant
class AT_039:
inspire = Buff(FRIENDLY_HERO, "AT_039e")
AT_039e = buff(atk=2)
# Wildwalker
class AT_040:
play = Buff(TARGET, "AT_040e")
AT_040e = buff(health=3)
# Knight of the Wild
class AT_041:
events = Summon(CONTROLLER, BEAST).on(Buff(SELF, "AT_041e"))
AT_041e = buff(cost=-1)
# Druid of the Saber
class AT_042:
choose = ("AT_042a", "AT_042b")
class AT_042a:
play = Morph(SELF, "AT_042t")
class AT_042b:
play = Morph(SELF, "AT_042t2")
# Aviana
class AT_045:
update = Refresh(FRIENDLY_HAND + MINION, {GameTag.COST: SET(1)})
##
# Spells
# Living Roots
class AT_037:
choose = ("AT_037a", "AT_037b")
class AT_037a:
play = Hit(TARGET, 2)
class AT_037b:
play = Summon(CONTROLLER, "AT_037t") * 2
# Astral Communion
class AT_043:
play = GainMana(CONTROLLER, 10), Discard(FRIENDLY_HAND)
# Mulch
class AT_044:
play = Destroy(TARGET), Give(OPPONENT, RandomMinion())
|
Sendoushi/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/wptserve/tests/functional/test_server.py
|
299
|
import os
import unittest
import urllib2
import json
import wptserve
from base import TestUsingServer, doc_root
class TestFileHandler(TestUsingServer):
def test_not_handled(self):
with self.assertRaises(urllib2.HTTPError) as cm:
resp = self.request("/not_existing")
self.assertEquals(cm.exception.code, 404)
class TestRewriter(TestUsingServer):
def test_rewrite(self):
@wptserve.handlers.handler
def handler(request, response):
return request.request_path
route = ("GET", "/test/rewritten", handler)
self.server.rewriter.register("GET", "/test/original", route[1])
self.server.router.register(*route)
resp = self.request("/test/original")
self.assertEquals(200, resp.getcode())
self.assertEquals("/test/rewritten", resp.read())
class TestRequestHandler(TestUsingServer):
def test_exception(self):
@wptserve.handlers.handler
def handler(request, response):
raise Exception
route = ("GET", "/test/raises", handler)
self.server.router.register(*route)
with self.assertRaises(urllib2.HTTPError) as cm:
resp = self.request("/test/raises")
self.assertEquals(cm.exception.code, 500)
if __name__ == "__main__":
unittest.main()
|
danlrobertson/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/webdriver/tests/element_send_keys/user_prompts.py
|
26
|
# META: timeout=long
import pytest
from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
from tests.support.inline import inline
def element_send_keys(session, element, text):
return session.transport.send(
"POST", "/session/{session_id}/element/{element_id}/value".format(
session_id=session.session_id,
element_id=element.id),
{"text": text})
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog):
def check_user_prompt_closed_without_exception(dialog_type, retval):
session.url = inline("<input type=text>")
element = session.find.css("input", all=False)
create_dialog(dialog_type, text=dialog_type)
response = element_send_keys(session, element, "foo")
assert_success(response)
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert element.property("value") == "foo"
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog):
def check_user_prompt_closed_with_exception(dialog_type, retval):
session.url = inline("<input type=text>")
element = session.find.css("input", all=False)
create_dialog(dialog_type, text=dialog_type)
response = element_send_keys(session, element, "foo")
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert element.property("value") == ""
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog):
def check_user_prompt_not_closed_but_exception(dialog_type):
session.url = inline("<input type=text>")
element = session.find.css("input", all=False)
create_dialog(dialog_type, text=dialog_type)
response = element_send_keys(session, element, "foo")
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
assert element.property("value") == ""
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
|
sikarash/linux-pm
|
refs/heads/master
|
tools/perf/scripts/python/export-to-postgresql.py
|
617
|
# export-to-postgresql.py: export perf data to a postgresql database
# Copyright (c) 2014, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
import os
import sys
import struct
import datetime
from PySide.QtSql import *
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
PQfinish = libpq.PQfinish
PQstatus = libpq.PQstatus
PQexec = libpq.PQexec
PQexec.restype = c_void_p
PQresultStatus = libpq.PQresultStatus
PQputCopyData = libpq.PQputCopyData
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
perf_db_export_mode = True
perf_db_export_calls = False
def usage():
print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>]"
print >> sys.stderr, "where: columns 'all' or 'branches'"
print >> sys.stderr, " calls 'calls' => create calls table"
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
usage()
dbname = sys.argv[1]
if (len(sys.argv) >= 3):
columns = sys.argv[2]
else:
columns = "all"
if columns not in ("all", "branches"):
usage()
branches = (columns == "branches")
if (len(sys.argv) >= 4):
if (sys.argv[3] == "calls"):
perf_db_export_calls = True
else:
usage()
output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
os.mkdir(output_dir_name)
def do_query(q, s):
if (q.exec_(s)):
return
raise Exception("Query failed: " + q.lastError().text())
print datetime.datetime.today(), "Creating database..."
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
db.setDatabaseName('postgres')
db.open()
try:
do_query(query, 'CREATE DATABASE ' + dbname)
except:
os.rmdir(output_dir_name)
raise
query.finish()
query.clear()
db.close()
db.setDatabaseName(dbname)
db.open()
query = QSqlQuery(db)
do_query(query, 'SET client_min_messages TO WARNING')
do_query(query, 'CREATE TABLE selected_events ('
'id bigint NOT NULL,'
'name varchar(80))')
do_query(query, 'CREATE TABLE machines ('
'id bigint NOT NULL,'
'pid integer,'
'root_dir varchar(4096))')
do_query(query, 'CREATE TABLE threads ('
'id bigint NOT NULL,'
'machine_id bigint,'
'process_id bigint,'
'pid integer,'
'tid integer)')
do_query(query, 'CREATE TABLE comms ('
'id bigint NOT NULL,'
'comm varchar(16))')
do_query(query, 'CREATE TABLE comm_threads ('
'id bigint NOT NULL,'
'comm_id bigint,'
'thread_id bigint)')
do_query(query, 'CREATE TABLE dsos ('
'id bigint NOT NULL,'
'machine_id bigint,'
'short_name varchar(256),'
'long_name varchar(4096),'
'build_id varchar(64))')
do_query(query, 'CREATE TABLE symbols ('
'id bigint NOT NULL,'
'dso_id bigint,'
'sym_start bigint,'
'sym_end bigint,'
'binding integer,'
'name varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
'id integer NOT NULL,'
'name varchar(80))')
if branches:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean)')
else:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'period bigint,'
'weight bigint,'
'transaction bigint,'
'data_src bigint,'
'branch_type integer,'
'in_tx boolean)')
if perf_db_export_calls:
do_query(query, 'CREATE TABLE call_paths ('
'id bigint NOT NULL,'
'parent_id bigint,'
'symbol_id bigint,'
'ip bigint)')
do_query(query, 'CREATE TABLE calls ('
'id bigint NOT NULL,'
'thread_id bigint,'
'comm_id bigint,'
'call_path_id bigint,'
'call_time bigint,'
'return_time bigint,'
'branch_count bigint,'
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer)')
do_query(query, 'CREATE VIEW samples_view AS '
'SELECT '
'id,'
'time,'
'cpu,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
'to_hex(ip) AS ip_hex,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'sym_offset,'
'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
'to_hex(to_ip) AS to_ip_hex,'
'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx'
' FROM samples')
file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
file_trailer = "\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
file = open(path_name, "w+")
file.write(file_header)
return file
def close_output_file(file):
file.write(file_trailer)
file.close()
def copy_output_file_direct(file, table_name):
close_output_file(file)
sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')"
do_query(query, sql)
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
conn = PQconnectdb("dbname = " + dbname)
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
res = PQexec(conn, sql)
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
while (len(data)):
ret = PQputCopyData(conn, data, len(data))
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret))
data = file.read(65536)
ret = PQputCopyEnd(conn, None)
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret))
PQfinish(conn)
def remove_output_file(file):
name = file.name
file.close()
os.unlink(name)
evsel_file = open_output_file("evsel_table.bin")
machine_file = open_output_file("machine_table.bin")
thread_file = open_output_file("thread_table.bin")
comm_file = open_output_file("comm_table.bin")
comm_thread_file = open_output_file("comm_thread_table.bin")
dso_file = open_output_file("dso_table.bin")
symbol_file = open_output_file("symbol_table.bin")
branch_type_file = open_output_file("branch_type_table.bin")
sample_file = open_output_file("sample_table.bin")
if perf_db_export_calls:
call_path_file = open_output_file("call_path_table.bin")
call_file = open_output_file("call_table.bin")
def trace_begin():
print datetime.datetime.today(), "Writing to intermediate files..."
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
thread_table(0, 0, 0, -1, -1)
comm_table(0, "unknown")
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls:
call_path_table(0, 0, 0, 0)
unhandled_count = 0
def trace_end():
print datetime.datetime.today(), "Copying to database..."
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
copy_output_file(comm_file, "comms")
copy_output_file(comm_thread_file, "comm_threads")
copy_output_file(dso_file, "dsos")
copy_output_file(symbol_file, "symbols")
copy_output_file(branch_type_file, "branch_types")
copy_output_file(sample_file, "samples")
if perf_db_export_calls:
copy_output_file(call_path_file, "call_paths")
copy_output_file(call_file, "calls")
print datetime.datetime.today(), "Removing intermediate files..."
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
remove_output_file(comm_file)
remove_output_file(comm_thread_file)
remove_output_file(dso_file)
remove_output_file(symbol_file)
remove_output_file(branch_type_file)
remove_output_file(sample_file)
if perf_db_export_calls:
remove_output_file(call_path_file)
remove_output_file(call_file)
os.rmdir(output_dir_name)
print datetime.datetime.today(), "Adding primary keys"
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
print datetime.datetime.today(), "Adding foreign keys"
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE comm_threads '
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE dsos '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)')
do_query(query, 'ALTER TABLE symbols '
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)')
do_query(query, 'ALTER TABLE samples '
'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),'
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths '
'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
do_query(query, 'ALTER TABLE calls '
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),'
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
if (unhandled_count):
print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
print datetime.datetime.today(), "Done"
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
unhandled_count += 1
def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
machine_file.write(value)
def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid)
thread_file.write(value)
def comm_table(comm_id, comm_str, *x):
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
comm_file.write(value)
def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
fmt = "!hiqiqiq"
value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id)
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
branch_type_file.write(value)
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, *x):
if branches:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiB", 17, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx)
else:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiB", 21, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx)
sample_file.write(value)
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
fmt = "!hiqiqiqiq"
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
call_file.write(value)
|
moazzemi/HAMEX
|
refs/heads/master
|
cpu/gem5/src/python/m5/util/orderdict.py
|
88
|
# Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
__all__ = [ 'orderdict' ]
from UserDict import DictMixin
class orderdict(dict, DictMixin):
def __init__(self, *args, **kwargs):
if len(args) > 1:
raise TypeError("expected at most one argument, got %d" % \
len(args))
self._keys = []
self.update(*args, **kwargs)
def __setitem__(self, key, item):
if key not in self:
self._keys.append(key)
super(orderdict, self).__setitem__(key, item)
def __delitem__(self, key):
super(orderdict, self).__delitem__(key)
self._keys.remove(key)
def clear(self):
super(orderdict, self).clear()
self._keys = []
def iterkeys(self):
for key in self._keys:
yield key
def itervalues(self):
for key in self._keys:
yield self[key]
def iteritems(self):
for key in self._keys:
yield key, self[key]
def keys(self):
return self._keys[:]
def values(self):
return [ self[key] for key in self._keys ]
def items(self):
return [ (self[key],key) for key in self._keys ]
|
mozata/menpo
|
refs/heads/master
|
menpo/feature/__init__.py
|
1
|
from .features import (gradient, hog, lbp, es, igo, no_op, gaussian_filter,
daisy, features_selection_widget)
# If cyvlfeat is not installed, then access to vlfeat features should be blocked
try:
from .vlfeat import dsift
except ImportError:
pass
from .predefined import sparse_hog, double_igo
try:
from .predefined import fast_dsift
except ImportError:
pass
from .base import ndfeature, imgfeature
from .visualize import glyph, sum_channels
|
gujiawen/flask_web
|
refs/heads/master
|
venv/lib/python2.7/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py
|
9
|
# postgresql/psycopg2.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+psycopg2
:name: psycopg2
:dbapi: psycopg2
:connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
:url: http://pypi.python.org/pypi/psycopg2/
psycopg2 Connect Arguments
-----------------------------------
psycopg2-specific keyword arguments which are accepted by
:func:`.create_engine()` are:
* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL
statements which support this feature. What this essentially means from a
psycopg2 point of view is that the cursor is created using a name, e.g.
``connection.cursor('some name')``, which has the effect that result rows are
not immediately pre-fetched and buffered after statement execution, but are
instead left on the server and only retrieved as needed. SQLAlchemy's
:class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering
behavior when this feature is enabled, such that groups of 100 rows at a
time are fetched over the wire to reduce conversational overhead.
Note that the ``stream_results=True`` execution option is a more targeted
way of enabling this mode on a per-execution basis.
* ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode
per connection. True by default.
* ``isolation_level``: This option, available for all Posgtresql dialects,
includes the ``AUTOCOMMIT`` isolation level when using the psycopg2
dialect. See :ref:`psycopg2_isolation_level`.
Unix Domain Connections
------------------------
psycopg2 supports connecting via Unix domain connections. When the ``host``
portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
which specifies Unix-domain communication rather than TCP/IP communication::
create_engine("postgresql+psycopg2://user:password@/dbname")
By default, the socket file used is to connect to a Unix-domain socket
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
See also:
`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
Per-Statement/Connection Execution Options
-------------------------------------------
The following DBAPI-specific options are respected when used with
:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`,
:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs:
* isolation_level - Set the transaction isolation level for the lifespan of a
:class:`.Connection` (can only be set on a connection, not a statement
or query). See :ref:`psycopg2_isolation_level`.
* stream_results - Enable or disable usage of psycopg2 server side cursors -
this feature makes use of "named" cursors in combination with special
result handling methods so that result rows are not fully buffered.
If ``None`` or not set, the ``server_side_cursors`` option of the
:class:`.Engine` is used.
Unicode
-------
By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE``
extension, such that the DBAPI receives and returns all strings as Python
Unicode objects directly - SQLAlchemy passes these values through without
change. Psycopg2 here will encode/decode string values based on the
current "client encoding" setting; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf-8``, as a more useful default::
#client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
A second way to affect the client encoding is to set it within Psycopg2
locally. SQLAlchemy will call psycopg2's ``set_client_encoding()``
method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding)
on all new connections based on the value passed to
:func:`.create_engine` using the ``client_encoding`` parameter::
engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8')
This overrides the encoding specified in the Postgresql client configuration.
.. versionadded:: 0.7.3
The psycopg2-specific ``client_encoding`` parameter to
:func:`.create_engine`.
SQLAlchemy can also be instructed to skip the usage of the psycopg2
``UNICODE`` extension and to instead utilize it's own unicode encode/decode
services, which are normally reserved only for those DBAPIs that don't
fully support unicode directly. Passing ``use_native_unicode=False`` to
:func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``.
SQLAlchemy will instead encode data itself into Python bytestrings on the way
in and coerce from bytes on the way back,
using the value of the :func:`.create_engine` ``encoding`` parameter, which
defaults to ``utf-8``.
SQLAlchemy's own unicode encode/decode functionality is steadily becoming
obsolete as more DBAPIs support unicode fully along with the approach of
Python 3; in modern usage psycopg2 should be relied upon to handle unicode.
Transactions
------------
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
.. _psycopg2_isolation_level:
Psycopg2 Transaction Isolation Level
-------------------------------------
As discussed in :ref:`postgresql_isolation_level`,
all Postgresql dialects support setting of transaction isolation level
both via the ``isolation_level`` parameter passed to :func:`.create_engine`,
as well as the ``isolation_level`` argument used by :meth:`.Connection.execution_options`.
When using the psycopg2 dialect, these options make use of
psycopg2's ``set_isolation_level()`` connection method, rather than
emitting a Postgresql directive; this is because psycopg2's API-level
setting is always emitted at the start of each transaction in any case.
The psycopg2 dialect supports these constants for isolation level:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. versionadded:: 0.8.2 support for AUTOCOMMIT isolation level when using
psycopg2.
NOTICE logging
---------------
The psycopg2 dialect will log Postgresql NOTICE messages via the
``sqlalchemy.dialects.postgresql`` logger::
import logging
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
HSTORE type
------------
The psycopg2 dialect will make use of the
``psycopg2.extensions.register_hstore()`` extension when using the HSTORE
type. This replaces SQLAlchemy's pure-Python HSTORE coercion which takes
effect for other DBAPIs.
"""
from __future__ import absolute_import
import re
import logging
from ... import util, exc
import decimal
from ... import processors
from ...engine import result as _result
from ...sql import expression
from ... import types as sqltypes
from .base import PGDialect, PGCompiler, \
PGIdentifierPreparer, PGExecutionContext, \
ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\
_INT_TYPES
from .hstore import HSTORE
logger = logging.getLogger('sqlalchemy.dialects.postgresql')
class _PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(decimal.Decimal)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
class _PGEnum(ENUM):
def __init__(self, *arg, **kw):
super(_PGEnum, self).__init__(*arg, **kw)
# Py2K
if self.convert_unicode:
self.convert_unicode = "force"
# end Py2K
class _PGArray(ARRAY):
def __init__(self, *arg, **kw):
super(_PGArray, self).__init__(*arg, **kw)
# Py2K
# FIXME: this check won't work for setups that
# have convert_unicode only on their create_engine().
if isinstance(self.item_type, sqltypes.String) and \
self.item_type.convert_unicode:
self.item_type.convert_unicode = "force"
# end Py2K
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).bind_processor(dialect)
def result_processor(self, dialect, coltype):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).result_processor(dialect, coltype)
# When we're handed literal SQL, ensure it's a SELECT-query. Since
# 8.3, combining cursors and "FOR UPDATE" has been fine.
SERVER_SIDE_CURSOR_RE = re.compile(
r'\s*SELECT',
re.I | re.UNICODE)
_server_side_id = util.counter()
class PGExecutionContext_psycopg2(PGExecutionContext):
def create_cursor(self):
# TODO: coverage for server side cursors + select.for_update()
if self.dialect.server_side_cursors:
is_server_side = \
self.execution_options.get('stream_results', True) and (
(self.compiled and isinstance(self.compiled.statement, expression.Selectable) \
or \
(
(not self.compiled or
isinstance(self.compiled.statement, expression.TextClause))
and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement))
)
)
else:
is_server_side = \
self.execution_options.get('stream_results', False)
self.__is_server_side = is_server_side
if is_server_side:
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
else:
return self._dbapi_connection.cursor()
def get_result_proxy(self):
# TODO: ouch
if logger.isEnabledFor(logging.INFO):
self._log_notices(self.cursor)
if self.__is_server_side:
return _result.BufferedRowResultProxy(self)
else:
return _result.ResultProxy(self)
def _log_notices(self, cursor):
for notice in cursor.connection.notices:
# NOTICE messages have a
# newline character at the end
logger.info(notice.rstrip())
cursor.connection.notices[:] = []
class PGCompiler_psycopg2(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
return text.replace('%', '%%')
class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace('%', '%%')
class PGDialect_psycopg2(PGDialect):
driver = 'psycopg2'
# Py2K
supports_unicode_statements = False
# end Py2K
default_paramstyle = 'pyformat'
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
psycopg2_version = (0, 0)
_has_native_hstore = False
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumeric,
ENUM: _PGEnum, # needs force_unicode
sqltypes.Enum: _PGEnum, # needs force_unicode
ARRAY: _PGArray, # needs force_unicode
HSTORE: _PGHStore,
}
)
def __init__(self, server_side_cursors=False, use_native_unicode=True,
client_encoding=None,
use_native_hstore=True,
**kwargs):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
self.use_native_hstore = use_native_hstore
self.supports_unicode_binds = use_native_unicode
self.client_encoding = client_encoding
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
int(x)
for x in m.group(1, 2, 3)
if x is not None)
def initialize(self, connection):
super(PGDialect_psycopg2, self).initialize(connection)
self._has_native_hstore = self.use_native_hstore and \
self._hstore_oids(connection.connection) \
is not None
@classmethod
def dbapi(cls):
import psycopg2
return psycopg2
@util.memoized_property
def _isolation_lookup(self):
extensions = __import__('psycopg2.extensions').extensions
return {
'AUTOCOMMIT': extensions.ISOLATION_LEVEL_AUTOCOMMIT,
'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED,
'READ UNCOMMITTED': extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
'REPEATABLE READ': extensions.ISOLATION_LEVEL_REPEATABLE_READ,
'SERIALIZABLE': extensions.ISOLATION_LEVEL_SERIALIZABLE
}
def set_isolation_level(self, connection, level):
try:
level = self._isolation_lookup[level.replace('_', ' ')]
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
connection.set_isolation_level(level)
def on_connect(self):
from psycopg2 import extras, extensions
fns = []
if self.client_encoding is not None:
def on_connect(conn):
conn.set_client_encoding(self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if self.dbapi and self.use_native_unicode:
def on_connect(conn):
extensions.register_type(extensions.UNICODE, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_hstore:
def on_connect(conn):
hstore_oids = self._hstore_oids(conn)
if hstore_oids is not None:
oid, array_oid = hstore_oids
if util.py2k:
extras.register_hstore(conn, oid=oid,
array_oid=array_oid,
unicode=True)
else:
extras.register_hstore(conn, oid=oid,
array_oid=array_oid)
fns.append(on_connect)
if fns:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
@util.memoized_instancemethod
def _hstore_oids(self, conn):
if self.psycopg2_version >= (2, 4):
from psycopg2 import extras
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
return oids[0:2]
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
str_e = str(e).partition("\n")[0]
for msg in [
# these error messages from libpq: interfaces/libpq/fe-misc.c
# and interfaces/libpq/fe-secure.c.
# TODO: these are sent through gettext in libpq and we can't
# check within other locales - consider using connection.closed
'terminating connection',
'closed the connection',
'connection not open',
'could not receive data from server',
# psycopg2 client errors, psycopg2/conenction.h, psycopg2/cursor.h
'connection already closed',
'cursor already closed',
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
'losed the connection unexpectedly'
]:
idx = str_e.find(msg)
if idx >= 0 and '"' not in str_e[:idx]:
return True
return False
dialect = PGDialect_psycopg2
|
g19-hs/personfinder
|
refs/heads/master
|
app/unidecode/x08a.py
|
253
|
data = (
'Yan ', # 0x00
'Yan ', # 0x01
'Ding ', # 0x02
'Fu ', # 0x03
'Qiu ', # 0x04
'Qiu ', # 0x05
'Jiao ', # 0x06
'Hong ', # 0x07
'Ji ', # 0x08
'Fan ', # 0x09
'Xun ', # 0x0a
'Diao ', # 0x0b
'Hong ', # 0x0c
'Cha ', # 0x0d
'Tao ', # 0x0e
'Xu ', # 0x0f
'Jie ', # 0x10
'Yi ', # 0x11
'Ren ', # 0x12
'Xun ', # 0x13
'Yin ', # 0x14
'Shan ', # 0x15
'Qi ', # 0x16
'Tuo ', # 0x17
'Ji ', # 0x18
'Xun ', # 0x19
'Yin ', # 0x1a
'E ', # 0x1b
'Fen ', # 0x1c
'Ya ', # 0x1d
'Yao ', # 0x1e
'Song ', # 0x1f
'Shen ', # 0x20
'Yin ', # 0x21
'Xin ', # 0x22
'Jue ', # 0x23
'Xiao ', # 0x24
'Ne ', # 0x25
'Chen ', # 0x26
'You ', # 0x27
'Zhi ', # 0x28
'Xiong ', # 0x29
'Fang ', # 0x2a
'Xin ', # 0x2b
'Chao ', # 0x2c
'She ', # 0x2d
'Xian ', # 0x2e
'Sha ', # 0x2f
'Tun ', # 0x30
'Xu ', # 0x31
'Yi ', # 0x32
'Yi ', # 0x33
'Su ', # 0x34
'Chi ', # 0x35
'He ', # 0x36
'Shen ', # 0x37
'He ', # 0x38
'Xu ', # 0x39
'Zhen ', # 0x3a
'Zhu ', # 0x3b
'Zheng ', # 0x3c
'Gou ', # 0x3d
'Zi ', # 0x3e
'Zi ', # 0x3f
'Zhan ', # 0x40
'Gu ', # 0x41
'Fu ', # 0x42
'Quan ', # 0x43
'Die ', # 0x44
'Ling ', # 0x45
'Di ', # 0x46
'Yang ', # 0x47
'Li ', # 0x48
'Nao ', # 0x49
'Pan ', # 0x4a
'Zhou ', # 0x4b
'Gan ', # 0x4c
'Yi ', # 0x4d
'Ju ', # 0x4e
'Ao ', # 0x4f
'Zha ', # 0x50
'Tuo ', # 0x51
'Yi ', # 0x52
'Qu ', # 0x53
'Zhao ', # 0x54
'Ping ', # 0x55
'Bi ', # 0x56
'Xiong ', # 0x57
'Qu ', # 0x58
'Ba ', # 0x59
'Da ', # 0x5a
'Zu ', # 0x5b
'Tao ', # 0x5c
'Zhu ', # 0x5d
'Ci ', # 0x5e
'Zhe ', # 0x5f
'Yong ', # 0x60
'Xu ', # 0x61
'Xun ', # 0x62
'Yi ', # 0x63
'Huang ', # 0x64
'He ', # 0x65
'Shi ', # 0x66
'Cha ', # 0x67
'Jiao ', # 0x68
'Shi ', # 0x69
'Hen ', # 0x6a
'Cha ', # 0x6b
'Gou ', # 0x6c
'Gui ', # 0x6d
'Quan ', # 0x6e
'Hui ', # 0x6f
'Jie ', # 0x70
'Hua ', # 0x71
'Gai ', # 0x72
'Xiang ', # 0x73
'Wei ', # 0x74
'Shen ', # 0x75
'Chou ', # 0x76
'Tong ', # 0x77
'Mi ', # 0x78
'Zhan ', # 0x79
'Ming ', # 0x7a
'E ', # 0x7b
'Hui ', # 0x7c
'Yan ', # 0x7d
'Xiong ', # 0x7e
'Gua ', # 0x7f
'Er ', # 0x80
'Beng ', # 0x81
'Tiao ', # 0x82
'Chi ', # 0x83
'Lei ', # 0x84
'Zhu ', # 0x85
'Kuang ', # 0x86
'Kua ', # 0x87
'Wu ', # 0x88
'Yu ', # 0x89
'Teng ', # 0x8a
'Ji ', # 0x8b
'Zhi ', # 0x8c
'Ren ', # 0x8d
'Su ', # 0x8e
'Lang ', # 0x8f
'E ', # 0x90
'Kuang ', # 0x91
'E ', # 0x92
'Shi ', # 0x93
'Ting ', # 0x94
'Dan ', # 0x95
'Bo ', # 0x96
'Chan ', # 0x97
'You ', # 0x98
'Heng ', # 0x99
'Qiao ', # 0x9a
'Qin ', # 0x9b
'Shua ', # 0x9c
'An ', # 0x9d
'Yu ', # 0x9e
'Xiao ', # 0x9f
'Cheng ', # 0xa0
'Jie ', # 0xa1
'Xian ', # 0xa2
'Wu ', # 0xa3
'Wu ', # 0xa4
'Gao ', # 0xa5
'Song ', # 0xa6
'Pu ', # 0xa7
'Hui ', # 0xa8
'Jing ', # 0xa9
'Shuo ', # 0xaa
'Zhen ', # 0xab
'Shuo ', # 0xac
'Du ', # 0xad
'Yasashi ', # 0xae
'Chang ', # 0xaf
'Shui ', # 0xb0
'Jie ', # 0xb1
'Ke ', # 0xb2
'Qu ', # 0xb3
'Cong ', # 0xb4
'Xiao ', # 0xb5
'Sui ', # 0xb6
'Wang ', # 0xb7
'Xuan ', # 0xb8
'Fei ', # 0xb9
'Chi ', # 0xba
'Ta ', # 0xbb
'Yi ', # 0xbc
'Na ', # 0xbd
'Yin ', # 0xbe
'Diao ', # 0xbf
'Pi ', # 0xc0
'Chuo ', # 0xc1
'Chan ', # 0xc2
'Chen ', # 0xc3
'Zhun ', # 0xc4
'Ji ', # 0xc5
'Qi ', # 0xc6
'Tan ', # 0xc7
'Zhui ', # 0xc8
'Wei ', # 0xc9
'Ju ', # 0xca
'Qing ', # 0xcb
'Jian ', # 0xcc
'Zheng ', # 0xcd
'Ze ', # 0xce
'Zou ', # 0xcf
'Qian ', # 0xd0
'Zhuo ', # 0xd1
'Liang ', # 0xd2
'Jian ', # 0xd3
'Zhu ', # 0xd4
'Hao ', # 0xd5
'Lun ', # 0xd6
'Shen ', # 0xd7
'Biao ', # 0xd8
'Huai ', # 0xd9
'Pian ', # 0xda
'Yu ', # 0xdb
'Die ', # 0xdc
'Xu ', # 0xdd
'Pian ', # 0xde
'Shi ', # 0xdf
'Xuan ', # 0xe0
'Shi ', # 0xe1
'Hun ', # 0xe2
'Hua ', # 0xe3
'E ', # 0xe4
'Zhong ', # 0xe5
'Di ', # 0xe6
'Xie ', # 0xe7
'Fu ', # 0xe8
'Pu ', # 0xe9
'Ting ', # 0xea
'Jian ', # 0xeb
'Qi ', # 0xec
'Yu ', # 0xed
'Zi ', # 0xee
'Chuan ', # 0xef
'Xi ', # 0xf0
'Hui ', # 0xf1
'Yin ', # 0xf2
'An ', # 0xf3
'Xian ', # 0xf4
'Nan ', # 0xf5
'Chen ', # 0xf6
'Feng ', # 0xf7
'Zhu ', # 0xf8
'Yang ', # 0xf9
'Yan ', # 0xfa
'Heng ', # 0xfb
'Xuan ', # 0xfc
'Ge ', # 0xfd
'Nuo ', # 0xfe
'Qi ', # 0xff
)
|
tanmaykm/thrift
|
refs/heads/julia1.0-thrift-0.11.0
|
contrib/zeromq/TZmqServer.py
|
43
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import zmq
import thrift.server.TServer
import thrift.transport.TTransport
class TZmqServer(thrift.server.TServer.TServer):
def __init__(self, processor, ctx, endpoint, sock_type):
thrift.server.TServer.TServer.__init__(self, processor, None)
self.zmq_type = sock_type
self.socket = ctx.socket(sock_type)
self.socket.bind(endpoint)
def serveOne(self):
msg = self.socket.recv()
itrans = thrift.transport.TTransport.TMemoryBuffer(msg)
otrans = thrift.transport.TTransport.TMemoryBuffer()
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
self.processor.process(iprot, oprot)
except Exception:
logging.exception("Exception while processing request")
# Fall through and send back a response, even if empty or incomplete.
if self.zmq_type == zmq.REP:
msg = otrans.getvalue()
self.socket.send(msg)
def serve(self):
while True:
self.serveOne()
class TZmqMultiServer(object):
def __init__(self):
self.servers = []
def serveOne(self, timeout=-1):
self._serveActive(self._setupPoll(), timeout)
def serveForever(self):
poll_info = self._setupPoll()
while True:
self._serveActive(poll_info, -1)
def _setupPoll(self):
server_map = {}
poller = zmq.Poller()
for server in self.servers:
server_map[server.socket] = server
poller.register(server.socket, zmq.POLLIN)
return (server_map, poller)
def _serveActive(self, poll_info, timeout):
(server_map, poller) = poll_info
ready = dict(poller.poll())
for sock, state in ready.items():
assert (state & zmq.POLLIN) != 0
server_map[sock].serveOne()
|
Smarsh/django
|
refs/heads/master
|
django/contrib/gis/geos/error.py
|
641
|
"""
This module houses the GEOS exceptions, specifically, GEOSException and
GEOSGeometryIndexError.
"""
class GEOSException(Exception):
"The base GEOS exception, indicates a GEOS-related error."
pass
class GEOSIndexError(GEOSException, KeyError):
"""
This exception is raised when an invalid index is encountered, and has
the 'silent_variable_feature' attribute set to true. This ensures that
django's templates proceed to use the next lookup type gracefully when
an Exception is raised. Fixes ticket #4740.
"""
# "If, during the method lookup, a method raises an exception, the exception
# will be propagated, unless the exception has an attribute
# `silent_variable_failure` whose value is True." -- Django template docs.
silent_variable_failure = True
|
idncom/odoo
|
refs/heads/8.0
|
addons/website_forum/tests/common.py
|
201
|
# -*- coding: utf-8 -*-
from openerp.tests import common
KARMA = {
'ask': 5, 'ans': 10,
'com_own': 5, 'com_all': 10,
'com_conv_all': 50,
'upv': 5, 'dwv': 10,
'edit_own': 10, 'edit_all': 20,
'close_own': 10, 'close_all': 20,
'unlink_own': 10, 'unlink_all': 20,
'gen_que_new': 1, 'gen_que_upv': 5, 'gen_que_dwv': -10,
'gen_ans_upv': 10, 'gen_ans_dwv': -20,
}
class TestForumCommon(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestForumCommon, cls).setUpClass()
Forum = cls.env['forum.forum']
Post = cls.env['forum.post']
# Test users
TestUsersEnv = cls.env['res.users'].with_context({'no_reset_password': True})
group_employee_id = cls.env.ref('base.group_user').id
group_portal_id = cls.env.ref('base.group_portal').id
group_public_id = cls.env.ref('base.group_public').id
cls.user_employee = TestUsersEnv.create({
'name': 'Armande Employee',
'login': 'Armande',
'alias_name': 'armande',
'email': 'armande.employee@example.com',
'karma': 0,
'groups_id': [(6, 0, [group_employee_id])]
})
cls.user_portal = TestUsersEnv.create({
'name': 'Beatrice Portal',
'login': 'Beatrice',
'alias_name': 'beatrice',
'email': 'beatrice.employee@example.com',
'karma': 0,
'groups_id': [(6, 0, [group_portal_id])]
})
cls.user_public = TestUsersEnv.create({
'name': 'Cedric Public',
'login': 'Cedric',
'alias_name': 'cedric',
'email': 'cedric.employee@example.com',
'karma': 0,
'groups_id': [(6, 0, [group_public_id])]
})
# Test forum
cls.forum = Forum.create({
'name': 'TestForum',
'karma_ask': KARMA['ask'],
'karma_answer': KARMA['ans'],
'karma_comment_own': KARMA['com_own'],
'karma_comment_all': KARMA['com_all'],
'karma_answer_accept_own': 9999,
'karma_answer_accept_all': 9999,
'karma_upvote': KARMA['upv'],
'karma_downvote': KARMA['dwv'],
'karma_edit_own': KARMA['edit_own'],
'karma_edit_all': KARMA['edit_all'],
'karma_close_own': KARMA['close_own'],
'karma_close_all': KARMA['close_all'],
'karma_unlink_own': KARMA['unlink_own'],
'karma_unlink_all': KARMA['unlink_all'],
'karma_comment_convert_all': KARMA['com_conv_all'],
'karma_gen_question_new': KARMA['gen_que_new'],
'karma_gen_question_upvote': KARMA['gen_que_upv'],
'karma_gen_question_downvote': KARMA['gen_que_dwv'],
'karma_gen_answer_upvote': KARMA['gen_ans_upv'],
'karma_gen_answer_downvote': KARMA['gen_ans_dwv'],
'karma_gen_answer_accept': 9999,
'karma_gen_answer_accepted': 9999,
})
cls.post = Post.create({
'name': 'TestQuestion',
'content': 'I am not a bird.',
'forum_id': cls.forum.id,
'tag_ids': [(0, 0, {'name': 'Tag0', 'forum_id': cls.forum.id})]
})
cls.answer = Post.create({
'name': 'TestAnswer',
'content': 'I am an anteater.',
'forum_id': cls.forum.id,
'parent_id': cls.post.id,
})
|
dennisobrien/bokeh
|
refs/heads/master
|
examples/plotting/file/bar_colormapped.py
|
9
|
from bokeh.io import show, output_file
from bokeh.models import ColumnDataSource
from bokeh.palettes import Spectral6
from bokeh.plotting import figure
from bokeh.transform import factor_cmap
output_file("bar_colormapped.html")
fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries']
counts = [5, 3, 4, 2, 4, 6]
source = ColumnDataSource(data=dict(fruits=fruits, counts=counts))
p = figure(x_range=fruits, plot_height=350, toolbar_location=None, title="Fruit Counts")
p.vbar(x='fruits', top='counts', width=0.9, source=source, legend="fruits",
line_color='white', fill_color=factor_cmap('fruits', palette=Spectral6, factors=fruits))
p.xgrid.grid_line_color = None
p.y_range.start = 0
p.y_range.end = 9
p.legend.orientation = "horizontal"
p.legend.location = "top_center"
show(p)
|
cmvelo/ansible
|
refs/heads/devel
|
lib/ansible/executor/module_common.py
|
4
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import imp
import json
import os
import shlex
import zipfile
from io import BytesIO
# from Ansible
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.utils.unicode import to_bytes, to_unicode
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.plugins import strategy
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
# we've moved the module_common relative to the snippets, so fix the path
_SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ZIPLOADER_TEMPLATE = u'''%(shebang)s
%(coding)s
ZIPLOADER_WRAPPER = True # For test-module script to tell this is a ZIPLOADER_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import base64
import shutil
import zipfile
import tempfile
import subprocess
if sys.version_info < (3,):
bytes = str
PY3 = False
else:
unicode = str
PY3 = True
try:
# Python-2.6+
from io import BytesIO as IOStream
except ImportError:
# Python < 2.6
from StringIO import StringIO as IOStream
ZIPDATA = """%(zipdata)s"""
def invoke_module(module, modlib_path, json_params):
pythonpath = os.environ.get('PYTHONPATH')
if pythonpath:
os.environ['PYTHONPATH'] = ':'.join((modlib_path, pythonpath))
else:
os.environ['PYTHONPATH'] = modlib_path
p = subprocess.Popen([%(interpreter)s, module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate(json_params)
if not isinstance(stderr, (bytes, unicode)):
stderr = stderr.read()
if not isinstance(stdout, (bytes, unicode)):
stdout = stdout.read()
if PY3:
sys.stderr.buffer.write(stderr)
sys.stdout.buffer.write(stdout)
else:
sys.stderr.write(stderr)
sys.stdout.write(stdout)
return p.returncode
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ziploader
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
script_path = os.path.join(basedir, 'ansible_module_%(ansible_module)s.py')
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'w')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'w')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# This differs slightly from default Ansible execution of Python modules
# as it passes the arguments to the module via a file instead of stdin.
# Set pythonpath to the debug dir
pythonpath = os.environ.get('PYTHONPATH')
if pythonpath:
os.environ['PYTHONPATH'] = ':'.join((basedir, pythonpath))
else:
os.environ['PYTHONPATH'] = basedir
p = subprocess.Popen([%(interpreter)s, script_path, args_path], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if not isinstance(stderr, (bytes, unicode)):
stderr = stderr.read()
if not isinstance(stdout, (bytes, unicode)):
stdout = stdout.read()
if PY3:
sys.stderr.buffer.write(stderr)
sys.stdout.buffer.write(stdout)
else:
sys.stderr.write(stderr)
sys.stdout.write(stdout)
return p.returncode
elif command == 'excommunicate':
# This attempts to run the module in-process (by importing a main
# function and then calling it). It is not the way ansible generally
# invokes the module so it won't work in every case. It is here to
# aid certain debuggers which work better when the code doesn't change
# from one process to another but there may be problems that occur
# when using this that are only artifacts of how we're invoking here,
# not actual bugs (as they don't affect the real way that we invoke
# ansible modules)
# stub the args and python path
sys.argv = ['%(ansible_module)s', args_path]
sys.path.insert(0, basedir)
from ansible_module_%(ansible_module)s import main
main()
print('WARNING: Module returned to wrapper instead of exiting')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
if __name__ == '__main__':
#
# See comments in the debug() method for information on debugging
#
ZIPLOADER_PARAMS = %(params)s
if PY3:
ZIPLOADER_PARAMS = ZIPLOADER_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
temp_path = tempfile.mkdtemp(prefix='ansible_')
zipped_mod = os.path.join(temp_path, 'ansible_modlib.zip')
modlib = open(zipped_mod, 'wb')
modlib.write(base64.b64decode(ZIPDATA))
modlib.close()
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ZIPLOADER_PARAMS)
else:
z = zipfile.ZipFile(zipped_mod)
module = os.path.join(temp_path, 'ansible_module_%(ansible_module)s.py')
f = open(module, 'wb')
f.write(z.read('ansible_module_%(ansible_module)s.py'))
f.close()
exitcode = invoke_module(module, zipped_mod, ZIPLOADER_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except OSError:
# tempdir creation probably failed
pass
sys.exit(exitcode)
'''
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ZIPLOADER_TEMPLATE = ZIPLOADER_TEMPLATE
else:
# ZIPLOADER_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ZIPLOADER_TEMPLATE = _strip_comments(ZIPLOADER_TEMPLATE)
class ModuleDepFinder(ast.NodeVisitor):
# Caveats:
# This code currently does not handle:
# * relative imports from py2.6+ from . import urls
IMPORT_PREFIX_SIZE = len('ansible.module_utils.')
def __init__(self, *args, **kwargs):
"""
Walk the ast tree for the python module.
Save submodule[.submoduleN][.identifier] into self.submodules
self.submodules will end up with tuples like:
- ('basic',)
- ('urls', 'fetch_url')
- ('database', 'postgres')
- ('database', 'postgres', 'quote')
It's up to calling code to determine whether the final element of the
dotted strings are module names or something else (function, class, or
variable names)
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self.submodules = set()
def visit_Import(self, node):
# import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
for alias in (a for a in node.names if a.name.startswith('ansible.module_utils.')):
py_mod = alias.name[self.IMPORT_PREFIX_SIZE:]
self.submodules.add((py_mod,))
self.generic_visit(node)
def visit_ImportFrom(self, node):
if node.module.startswith('ansible.module_utils'):
where_from = node.module[self.IMPORT_PREFIX_SIZE:]
if where_from:
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
py_mod = tuple(where_from.split('.'))
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
else:
# from ansible.module_utils import MODLIB [,MODLIB2] [as asname]
for alias in node.names:
self.submodules.add((alias.name,))
self.generic_visit(node)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
fd = open(path, 'rb')
data = fd.read()
fd.close()
return data
def _get_shebang(interpreter, task_vars, args=tuple()):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter).strip()
if interpreter_config not in task_vars:
return (None, interpreter)
interpreter = task_vars[interpreter_config].strip()
shebang = u'#!' + interpreter
if args:
shebang = shebang + u' ' + u' '.join(args)
return (shebang, interpreter)
def recursive_finder(name, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module its module_utils files needs.
"""
# Parse the module and find the imports of ansible.module_utils
tree = ast.parse(data)
finder = ModuleDepFinder()
finder.visit(tree)
#
# Determine what imports that we've found are modules (vs class, function.
# variable names) for packages
#
normalized_modules = set()
# Loop through the imports that we've found to normalize them
# Exclude paths that match with paths we've already processed
# (Have to exclude them a second time once the paths are processed)
for py_module_name in finder.submodules.difference(py_module_names):
module_info = None
# Check whether either the last or the second to last identifier is
# a module name
for idx in (1, 2):
if len(py_module_name) < idx:
break
try:
module_info = imp.find_module(py_module_name[-idx],
[os.path.join(_SNIPPET_PATH, *py_module_name[:-idx])])
break
except ImportError:
continue
# Could not find the module. Construct a helpful error message.
if module_info is None:
msg = ['Could not find imported module support code for %s. Looked for' % name]
if idx == 2:
msg.append('either %s or %s' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# If not already processed then we've got work to do
if py_module_name not in py_module_names:
# If not in the cache, then read the file into the cache
# We already have a file handle for the module open so it makes
# sense to read it now
if py_module_name not in py_module_cache:
if module_info[2][2] == imp.PKG_DIRECTORY:
# Read the __init__.py instead of the module file as this is
# a python package
py_module_cache[py_module_name + ('__init__',)] = _slurp(os.path.join(os.path.join(_SNIPPET_PATH, *py_module_name), '__init__.py'))
normalized_modules.add(py_module_name + ('__init__',))
else:
py_module_cache[py_module_name] = module_info[0].read()
module_info[0].close()
normalized_modules.add(py_module_name)
# Make sure that all the packages that this module is a part of
# are also added
for i in range(1, len(py_module_name)):
py_pkg_name = py_module_name[:-i] + ('__init__',)
if py_pkg_name not in py_module_names:
normalized_modules.add(py_pkg_name)
py_module_cache[py_pkg_name] = _slurp('%s.py' % os.path.join(_SNIPPET_PATH, *py_pkg_name))
#
# iterate through all of the ansible.module_utils* imports that we haven't
# already checked for new imports
#
# set of modules that we haven't added to the zipfile
unprocessed_py_module_names = normalized_modules.difference(py_module_names)
for py_module_name in unprocessed_py_module_names:
py_module_path = os.path.join(*py_module_name)
py_module_file_name = '%s.py' % py_module_path
zf.writestr(os.path.join("ansible/module_utils",
py_module_file_name), py_module_cache[py_module_name])
# Add the names of the files we're scheduling to examine in the loop to
# py_module_names so that we don't re-examine them in the next pass
# through recursive_finder()
py_module_names.update(unprocessed_py_module_names)
for py_module_file in unprocessed_py_module_names:
recursive_finder(py_module_file, py_module_cache[py_module_file], py_module_names, py_module_cache, zf)
# Save memory; the file won't have to be read again for this ansible module.
del py_module_cache[py_module_file]
def _is_binary(module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = module_data[:1024]
return bool(start.translate(None, textchars))
def _find_snippet_imports(module_name, module_data, module_path, module_args, task_vars, module_compression):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ziploader to format the module itself.
if _is_binary(module_data):
module_substyle = module_style = 'binary'
elif REPLACER in module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
module_data = module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif b'from ansible.module_utils.' in module_data:
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in module_data:
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return module_data, module_style, shebang
output = BytesIO()
py_module_names = set()
if module_substyle == 'python':
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
python_repred_params = to_bytes(repr(json.dumps(params)), errors='strict')
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ziploader_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ZIPLOADER: using cached module: %s' % cached_module_filename)
zipdata = open(cached_module_filename, 'rb').read()
# Fool the check later... I think we should just remove the check
py_module_names.add(('basic',))
else:
if module_name in strategy.action_write_locks:
display.debug('ZIPLOADER: Using lock for %s' % module_name)
lock = strategy.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ZIPLOADER: Using generic lock for %s' % module_name)
lock = strategy.action_write_locks[None]
display.debug('ZIPLOADER: Acquiring lock')
with lock:
display.debug('ZIPLOADER: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ZIPLOADER: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
zf.writestr('ansible/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\ntry:\n from ansible.release import __version__,__author__\nexcept ImportError:\n __version__="' + to_bytes(__version__) + b'"\n __author__="' + to_bytes(__author__) + b'"\n')
zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')
zf.writestr('ansible_module_%s.py' % module_name, module_data)
py_module_cache = { ('__init__',): b'' }
recursive_finder(module_name, module_data, py_module_names, py_module_cache, zf)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.mkdir(lookup_path)
display.debug('ZIPLOADER: Writing module')
with open(cached_module_filename + '-part', 'w') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ZIPLOADER: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ZIPLOADER: Done creating module')
if zipdata is None:
display.debug('ZIPLOADER: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
zipdata = open(cached_module_filename, 'rb').read()
except IOError:
raise AnsibleError('A different worker process failed to create module file. Look at traceback for that process for debugging information.')
# Fool the check later... I think we should just remove the check
py_module_names.add(('basic',))
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars)
if shebang is None:
shebang = u'#!/usr/bin/python'
executable = interpreter.split(u' ', 1)
if len(executable) == 2 and executable[0].endswith(u'env'):
# Handle /usr/bin/env python style interpreter settings
interpreter = u"'{0}', '{1}'".format(*executable)
else:
# Still have to enclose the parts of the interpreter in quotes
# because we're substituting it into the template as a python
# string
interpreter = u"'{0}'".format(interpreter)
output.write(to_bytes(ACTIVE_ZIPLOADER_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
params=python_repred_params,
shebang=shebang,
interpreter=interpreter,
coding=ENCODING_STRING,
)))
module_data = output.getvalue()
# Sanity check from 1.x days. Maybe too strict. Some custom python
# modules that use ziploader may implement their own helpers and not
# need basic.py. All the constants that we substituted into basic.py
# for module_replacer are now available in other, better ways.
if ('basic',) not in py_module_names:
raise AnsibleError("missing required import in %s: Did not import ansible.module_utils.basic for boilerplate helper code" % module_path)
elif module_substyle == 'powershell':
# Module replacer for jsonargs and windows
lines = module_data.split(b'\n')
for line in lines:
if REPLACER_WINDOWS in line:
ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1"))
output.write(ps_data)
py_module_names.add((b'powershell',))
continue
output.write(line + b'\n')
module_data = output.getvalue()
module_args_json = to_bytes(json.dumps(module_args))
module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
# Sanity check from 1.x days. This is currently useless as we only
# get here if we are going to substitute powershell.ps1 into the
# module anyway. Leaving it for when/if we add other powershell
# module_utils files.
if (b'powershell',) not in py_module_names:
raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ziploader) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
module_data = module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args)
module_data = module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='strict')
module_data = module_data.replace(b'syslog.LOG_USER', facility)
return (module_data, module_style, shebang)
# ******************************************************************************
def modify_module(module_name, module_path, module_args, task_vars=dict(), module_compression='ZIP_STORED'):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
All modules are required to import at least basic, though there will also
be other snippets.
For powershell, there's equivalent conventions like this:
# POWERSHELL_COMMON
which results in the inclusion of the common code from powershell.ps1
"""
with open(module_path, 'rb') as f:
# read in the module source
module_data = f.read()
(module_data, module_style, shebang) = _find_snippet_imports(module_name, module_data, module_path, module_args, task_vars, module_compression)
if module_style == 'binary':
return (module_data, module_style, shebang)
elif shebang is None:
lines = module_data.split(b"\n", 1)
if lines[0].startswith(b"#!"):
shebang = lines[0].strip()
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter = to_bytes(interpreter)
new_shebang = to_bytes(_get_shebang(interpreter, task_vars, args[1:])[0], errors='strict', nonstring='passthru')
if new_shebang:
lines[0] = shebang = new_shebang
if os.path.basename(interpreter).startswith(b'python'):
lines.insert(1, to_bytes(ENCODING_STRING))
else:
# No shebang, assume a binary module?
pass
module_data = b"\n".join(lines)
else:
shebang = to_bytes(shebang, errors='strict')
return (module_data, module_style, shebang)
|
xpansa/hr
|
refs/heads/8.0
|
hr_job_categories/__openerp__.py
|
1
|
# -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': 'Job Categories',
'version': '1.0',
'category': 'Generic Modules/Human Resources',
'description': """
Attach Categories (Tags) to Employees Based on Job Position
===========================================================
This module is useful for tagging employees based on their job positions.
For example, all Supervisors could be attached to the Supervisors category.
Define which categories a job belongs to in the configuration for the job.
When an employee is assigned a particular job the categories attached to that
job will be attached to the employee record as well.
""",
'author': 'Michael Telahun Makonnen <mmakonnen@gmail.com>',
'website': 'http://miketelahun.wordpress.com',
'license': 'AGPL-3',
'depends': [
'hr_contract',
],
'data': [
'hr_view.xml',
],
'test': [
],
'installable': True,
}
|
xydinesh/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/zdf.py
|
108
|
# coding: utf-8
from __future__ import unicode_literals
import functools
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
OnDemandPagedList,
)
def extract_from_xml_url(ie, video_id, xml_url):
doc = ie._download_xml(
xml_url, video_id,
note='Downloading video info',
errnote='Failed to download video info')
title = doc.find('.//information/title').text
description = doc.find('.//information/detail').text
duration = int(doc.find('.//details/lengthSec').text)
uploader_node = doc.find('.//details/originChannelTitle')
uploader = None if uploader_node is None else uploader_node.text
uploader_id_node = doc.find('.//details/originChannelId')
uploader_id = None if uploader_id_node is None else uploader_id_node.text
upload_date = unified_strdate(doc.find('.//details/airtime').text)
def xml_to_format(fnode):
video_url = fnode.find('url').text
is_available = 'http://www.metafilegenerator' not in video_url
format_id = fnode.attrib['basetype']
format_m = re.match(r'''(?x)
(?P<vcodec>[^_]+)_(?P<acodec>[^_]+)_(?P<container>[^_]+)_
(?P<proto>[^_]+)_(?P<index>[^_]+)_(?P<indexproto>[^_]+)
''', format_id)
ext = format_m.group('container')
proto = format_m.group('proto').lower()
quality = fnode.find('./quality').text
abr = int(fnode.find('./audioBitrate').text) // 1000
vbr_node = fnode.find('./videoBitrate')
vbr = None if vbr_node is None else int(vbr_node.text) // 1000
width_node = fnode.find('./width')
width = None if width_node is None else int_or_none(width_node.text)
height_node = fnode.find('./height')
height = None if height_node is None else int_or_none(height_node.text)
format_note = ''
if not format_note:
format_note = None
return {
'format_id': format_id + '-' + quality,
'url': video_url,
'ext': ext,
'acodec': format_m.group('acodec'),
'vcodec': format_m.group('vcodec'),
'abr': abr,
'vbr': vbr,
'width': width,
'height': height,
'filesize': int_or_none(fnode.find('./filesize').text),
'format_note': format_note,
'protocol': proto,
'_available': is_available,
}
format_nodes = doc.findall('.//formitaeten/formitaet')
formats = list(filter(
lambda f: f['_available'],
map(xml_to_format, format_nodes)))
ie._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'uploader': uploader,
'uploader_id': uploader_id,
'upload_date': upload_date,
'formats': formats,
}
class ZDFIE(InfoExtractor):
_VALID_URL = r'(?:zdf:|zdf:video:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/(.*beitrag/(?:video/)?))(?P<id>[0-9]+)(?:/[^/?]+)?(?:\?.*)?'
_TEST = {
'url': 'http://www.zdf.de/ZDFmediathek/beitrag/video/2037704/ZDFspezial---Ende-des-Machtpokers--?bc=sts;stt',
'info_dict': {
'id': '2037704',
'ext': 'webm',
'title': 'ZDFspezial - Ende des Machtpokers',
'description': 'Union und SPD haben sich auf einen Koalitionsvertrag geeinigt. Aber was bedeutet das für die Bürger? Sehen Sie hierzu das ZDFspezial "Ende des Machtpokers - Große Koalition für Deutschland".',
'duration': 1022,
'uploader': 'spezial',
'uploader_id': '225948',
'upload_date': '20131127',
},
'skip': 'Videos on ZDF.de are depublicised in short order',
}
def _real_extract(self, url):
video_id = self._match_id(url)
xml_url = 'http://www.zdf.de/ZDFmediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
return extract_from_xml_url(self, video_id, xml_url)
class ZDFChannelIE(InfoExtractor):
_VALID_URL = r'(?:zdf:topic:|https?://www\.zdf\.de/ZDFmediathek(?:#)?/.*kanaluebersicht/)(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.zdf.de/ZDFmediathek#/kanaluebersicht/1586442/sendung/Titanic',
'info_dict': {
'id': '1586442',
},
'playlist_count': 3,
}
_PAGE_SIZE = 50
def _fetch_page(self, channel_id, page):
offset = page * self._PAGE_SIZE
xml_url = (
'http://www.zdf.de/ZDFmediathek/xmlservice/web/aktuellste?ak=web&offset=%d&maxLength=%d&id=%s'
% (offset, self._PAGE_SIZE, channel_id))
doc = self._download_xml(
xml_url, channel_id,
note='Downloading channel info',
errnote='Failed to download channel info')
title = doc.find('.//information/title').text
description = doc.find('.//information/detail').text
for asset in doc.findall('.//teasers/teaser'):
a_type = asset.find('./type').text
a_id = asset.find('./details/assetId').text
if a_type not in ('video', 'topic'):
continue
yield {
'_type': 'url',
'playlist_title': title,
'playlist_description': description,
'url': 'zdf:%s:%s' % (a_type, a_id),
}
def _real_extract(self, url):
channel_id = self._match_id(url)
entries = OnDemandPagedList(
functools.partial(self._fetch_page, channel_id), self._PAGE_SIZE)
return {
'_type': 'playlist',
'id': channel_id,
'entries': entries,
}
|
PhiInnovations/mdp28-linux-bsp
|
refs/heads/master
|
bitbake/lib/bb/fetch2/gitsm.py
|
2
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake 'Fetch' git submodules implementation
"""
# Copyright (C) 2013 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import bb
from bb import data
from bb.fetch2.git import Git
from bb.fetch2 import runfetchcmd
from bb.fetch2 import logger
class GitSM(Git):
def supports(self, url, ud, d):
"""
Check to see if a given url can be fetched with git.
"""
return ud.type in ['gitsm']
def uses_submodules(self, ud, d):
for name in ud.names:
try:
runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True)
return True
except bb.fetch.FetchError:
pass
return False
def update_submodules(self, u, ud, d):
# We have to convert bare -> full repo, do the submodule bit, then convert back
tmpclonedir = ud.clonedir + ".tmp"
gitdir = tmpclonedir + os.sep + ".git"
bb.utils.remove(tmpclonedir, True)
os.mkdir(tmpclonedir)
os.rename(ud.clonedir, gitdir)
runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*true/bare = false/'", d)
os.chdir(tmpclonedir)
runfetchcmd("git reset --hard", d)
runfetchcmd("git submodule init", d)
runfetchcmd("git submodule update", d)
runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*false/bare = true/'", d)
os.rename(gitdir, ud.clonedir,)
bb.utils.remove(tmpclonedir, True)
def download(self, loc, ud, d):
Git.download(self, loc, ud, d)
os.chdir(ud.clonedir)
submodules = self.uses_submodules(ud, d)
if submodules:
self.update_submodules(loc, ud, d)
def unpack(self, ud, destdir, d):
Git.unpack(self, ud, destdir, d)
os.chdir(ud.destdir)
submodules = self.uses_submodules(ud, d)
if submodules:
runfetchcmd("cp -r " + ud.clonedir + "/modules " + ud.destdir + "/.git/", d)
runfetchcmd("git submodule init", d)
runfetchcmd("git submodule update", d)
|
erickt/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/conf/locale/sl/formats.py
|
200
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j. M. Y'
SHORT_DATETIME_FORMAT = 'j.n.Y. H:i'
FIRST_DAY_OF_WEEK = 0
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
weinbe58/QuSpin
|
refs/heads/master
|
docs/downloads/8ebdaf354c80ef927ecd6a3597c6b0f6/example5.py
|
3
|
from __future__ import print_function, division
import sys,os
# line 4 and line 5 below are for development purposes and can be removed
qspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,qspin_path)
#####################################################################
# example 5 #
# In this script we demonstrate how to use QuSpin's to build #
# the Hamiltonian of the SSH model in real and momentum space. #
# Along the way, we showcase the block tools which allow the #
# user to create block-diagonal Hamiltonians. Last, we show #
# how to time-evolve free fermion states like the Fermi sea #
# and measure correlators. #
#####################################################################
from quspin.operators import hamiltonian,exp_op # Hamiltonians and operators
from quspin.basis import spinless_fermion_basis_1d # Hilbert space fermion basis
from quspin.tools.block_tools import block_diag_hamiltonian # block diagonalisation
import numpy as np # generic math functions
import matplotlib.pyplot as plt # plotting library
try: # import python 3 zip function in python 2 and pass if already using python 3
import itertools.izip as zip
except ImportError:
pass
##### define model parameters #####
L=100 # system size
J=1.0 # uniform hopping
deltaJ=0.1 # bond dimerisation
Delta=0.5 # staggered potential
beta=100.0 # inverse temperature for Fermi-Dirac distribution
##### construct single-particle Hamiltonian #####
# define site-coupling lists
hop_pm=[[-J-deltaJ*(-1)**i,i,(i+1)%L] for i in range(L)] # PBC
hop_mp=[[+J+deltaJ*(-1)**i,i,(i+1)%L] for i in range(L)] # PBC
stagg_pot=[[Delta*(-1)**i,i] for i in range(L)]
# define static and dynamic lists
static=[["+-",hop_pm],["-+",hop_mp],['n',stagg_pot]]
dynamic=[]
# define basis
basis=spinless_fermion_basis_1d(L,Nf=1)
# build real-space Hamiltonian
H=hamiltonian(static,dynamic,basis=basis,dtype=np.float64)
# diagonalise real-space Hamiltonian
E,V=H.eigh()
##### compute Fourier transform and momentum-space Hamiltonian #####
# define momentm blocks and basis arguments
blocks=[dict(Nf=1,kblock=i,a=2) for i in range(L//2)] # only L//2 distinct momenta
basis_args = (L,)
# construct block-diagonal Hamiltonian
FT,Hblock = block_diag_hamiltonian(blocks,static,dynamic,spinless_fermion_basis_1d,
basis_args,np.complex128,get_proj_kwargs=dict(pcon=True))
# diagonalise momentum-space Hamiltonian
Eblock,Vblock=Hblock.eigh()
##### prepare the density observables and initial states #####
# grab single-particle states and treat them as initial states
psi0=Vblock
# construct operator n_1 = $n_{j=0}$
n_1_static=[['n',[[1.0,0]]]]
n_1=hamiltonian(n_1_static,[],basis=basis,dtype=np.float64,
check_herm=False,check_pcon=False)
# construct operator n_2 = $n_{j=L/2}$
n_2_static=[['n',[[1.0,L//2]]]]
n_2=hamiltonian(n_2_static,[],basis=basis,dtype=np.float64,
check_herm=False,check_pcon=False)
# transform n_j operators to momentum space
n_1=n_1.rotate_by(FT,generator=False)
n_2=n_2.rotate_by(FT,generator=False)
##### evaluate nonequal time correlator <FS|n_2(t) n_1(0)|FS> #####
# define time vector
t=np.linspace(0.0,90.0,901)
# calcualte state acted on by n_1
n_psi0=n_1.dot(psi0)
# construct time-evolution operator using exp_op class (sometimes faster)
U = exp_op(Hblock,a=-1j,start=t.min(),stop=t.max(),num=len(t),iterate=True)
# evolve states
psi_t=U.dot(psi0)
n_psi_t = U.dot(n_psi0)
# alternative method for time evolution using Hamiltonian class
#psi_t=Hblock.evolve(psi0,0.0,t,iterate=True)
#n_psi_t=Hblock.evolve(n_psi0,0.0,t,iterate=True)
# preallocate variable
correlators=np.zeros(t.shape+psi0.shape[1:])
# loop over the time-evolved states
for i, (psi,n_psi) in enumerate( zip(psi_t,n_psi_t) ):
correlators[i,:]=n_2.matrix_ele(psi,n_psi,diagonal=True).real
# evaluate correlator at finite temperature
n_FD=1.0/(np.exp(beta*E)+1.0)
correlator = (n_FD*correlators).sum(axis=-1)
##### plot spectra
plt.plot(np.arange(H.Ns),E/L,
marker='o',color='b',label='real space')
plt.plot(np.arange(Hblock.Ns),Eblock/L,
marker='x',color='r',markersize=2,label='momentum space')
plt.xlabel('state number',fontsize=16)
plt.ylabel('energy',fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.legend(fontsize=16)
plt.grid()
plt.tight_layout()
plt.savefig('example5a.pdf', bbox_inches='tight')
#plt.show()
plt.close()
##### plot correlator
plt.plot(t,correlator,linewidth=2)
plt.xlabel('$t$',fontsize=16)
plt.ylabel('$C_{0,L/2}(t,\\beta)$',fontsize=16)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.grid()
plt.tight_layout()
plt.savefig('example5b.pdf', bbox_inches='tight')
#plt.show()
plt.close()
|
multikatt/CouchPotatoServer
|
refs/heads/master
|
libs/pyutil/_version.py
|
92
|
# This is the version of this tree, as created by setup.py darcsver from the darcs patch
# information: the main version number is taken from the most recent release
# tag. If some patches have been added since the last release, this will have a
# -NN "build number" suffix, or else a -rNN "revision number" suffix. Please see
# pyutil.version_class for a description of what the different fields mean.
__pkgname__ = "pyutil"
verstr = "1.9.7"
try:
from pyutil.version_class import Version as pyutil_Version
__version__ = pyutil_Version(verstr)
except (ImportError, ValueError):
# Maybe there is no pyutil installed, or this may be an older version of
# pyutil.version_class which does not support SVN-alike revision numbers.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Version(verstr)
|
Tithen-Firion/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/aenetworks.py
|
23
|
from __future__ import unicode_literals
import re
from .theplatform import ThePlatformIE
from ..utils import (
smuggle_url,
update_url_query,
unescapeHTML,
extract_attributes,
get_element_by_attribute,
)
from ..compat import (
compat_urlparse,
)
class AENetworksBaseIE(ThePlatformIE):
_THEPLATFORM_KEY = 'crazyjava'
_THEPLATFORM_SECRET = 's3cr3t'
class AENetworksIE(AENetworksBaseIE):
IE_NAME = 'aenetworks'
IE_DESC = 'A+E Networks: A&E, Lifetime, History.com, FYI Network'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?
(?P<domain>
(?:history|aetv|mylifetime|lifetimemovieclub)\.com|
fyi\.tv
)/
(?:
shows/(?P<show_path>[^/]+(?:/[^/]+){0,2})|
movies/(?P<movie_display_id>[^/]+)(?:/full-movie)?|
specials/(?P<special_display_id>[^/]+)/full-special
)
'''
_TESTS = [{
'url': 'http://www.history.com/shows/mountain-men/season-1/episode-1',
'md5': 'a97a65f7e823ae10e9244bc5433d5fe6',
'info_dict': {
'id': '22253814',
'ext': 'mp4',
'title': 'Winter Is Coming',
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
'timestamp': 1338306241,
'upload_date': '20120529',
'uploader': 'AENE-NEW',
},
'add_ie': ['ThePlatform'],
}, {
'url': 'http://www.history.com/shows/ancient-aliens/season-1',
'info_dict': {
'id': '71889446852',
},
'playlist_mincount': 5,
}, {
'url': 'http://www.mylifetime.com/shows/atlanta-plastic',
'info_dict': {
'id': 'SERIES4317',
'title': 'Atlanta Plastic',
},
'playlist_mincount': 2,
}, {
'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1',
'only_matching': True
}, {
'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8',
'only_matching': True
}, {
'url': 'http://www.mylifetime.com/shows/project-runway-junior/season-1/episode-6',
'only_matching': True
}, {
'url': 'http://www.mylifetime.com/movies/center-stage-on-pointe/full-movie',
'only_matching': True
}, {
'url': 'https://www.lifetimemovieclub.com/movies/a-killer-among-us',
'only_matching': True
}, {
'url': 'http://www.history.com/specials/sniper-into-the-kill-zone/full-special',
'only_matching': True
}]
_DOMAIN_TO_REQUESTOR_ID = {
'history.com': 'HISTORY',
'aetv.com': 'AETV',
'mylifetime.com': 'LIFETIME',
'lifetimemovieclub.com': 'LIFETIMEMOVIECLUB',
'fyi.tv': 'FYI',
}
def _real_extract(self, url):
domain, show_path, movie_display_id, special_display_id = re.match(self._VALID_URL, url).groups()
display_id = show_path or movie_display_id or special_display_id
webpage = self._download_webpage(url, display_id)
if show_path:
url_parts = show_path.split('/')
url_parts_len = len(url_parts)
if url_parts_len == 1:
entries = []
for season_url_path in re.findall(r'(?s)<li[^>]+data-href="(/shows/%s/season-\d+)"' % url_parts[0], webpage):
entries.append(self.url_result(
compat_urlparse.urljoin(url, season_url_path), 'AENetworks'))
if entries:
return self.playlist_result(
entries, self._html_search_meta('aetn:SeriesId', webpage),
self._html_search_meta('aetn:SeriesTitle', webpage))
else:
# single season
url_parts_len = 2
if url_parts_len == 2:
entries = []
for episode_item in re.findall(r'(?s)<[^>]+class="[^"]*(?:episode|program)-item[^"]*"[^>]*>', webpage):
episode_attributes = extract_attributes(episode_item)
episode_url = compat_urlparse.urljoin(
url, episode_attributes['data-canonical'])
entries.append(self.url_result(
episode_url, 'AENetworks',
episode_attributes.get('data-videoid') or episode_attributes.get('data-video-id')))
return self.playlist_result(
entries, self._html_search_meta('aetn:SeasonId', webpage))
query = {
'mbr': 'true',
'assetTypes': 'high_video_s3'
}
video_id = self._html_search_meta('aetn:VideoID', webpage)
media_url = self._search_regex(
[r"media_url\s*=\s*'(?P<url>[^']+)'",
r'data-media-url=(?P<url>(?:https?:)?//[^\s>]+)',
r'data-media-url=(["\'])(?P<url>(?:(?!\1).)+?)\1'],
webpage, 'video url', group='url')
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
r'https?://link.theplatform.com/s/([^?]+)', media_url, 'theplatform_path'), video_id)
info = self._parse_theplatform_metadata(theplatform_metadata)
if theplatform_metadata.get('AETN$isBehindWall'):
requestor_id = self._DOMAIN_TO_REQUESTOR_ID[domain]
resource = self._get_mvpd_resource(
requestor_id, theplatform_metadata['title'],
theplatform_metadata.get('AETN$PPL_pplProgramId') or theplatform_metadata.get('AETN$PPL_pplProgramId_OLD'),
theplatform_metadata['ratings'][0]['rating'])
query['auth'] = self._extract_mvpd_auth(
url, video_id, requestor_id, resource)
info.update(self._search_json_ld(webpage, video_id, fatal=False))
media_url = update_url_query(media_url, query)
media_url = self._sign_url(media_url, self._THEPLATFORM_KEY, self._THEPLATFORM_SECRET)
formats, subtitles = self._extract_theplatform_smil(media_url, video_id)
self._sort_formats(formats)
info.update({
'id': video_id,
'formats': formats,
'subtitles': subtitles,
})
return info
class HistoryTopicIE(AENetworksBaseIE):
IE_NAME = 'history:topic'
IE_DESC = 'History.com Topic'
_VALID_URL = r'https?://(?:www\.)?history\.com/topics/(?:[^/]+/)?(?P<topic_id>[^/]+)(?:/[^/]+(?:/(?P<video_display_id>[^/?#]+))?)?'
_TESTS = [{
'url': 'http://www.history.com/topics/valentines-day/history-of-valentines-day/videos/bet-you-didnt-know-valentines-day?m=528e394da93ae&s=undefined&f=1&free=false',
'info_dict': {
'id': '40700995724',
'ext': 'mp4',
'title': "Bet You Didn't Know: Valentine's Day",
'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
'timestamp': 1375819729,
'upload_date': '20130806',
'uploader': 'AENE-NEW',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['ThePlatform'],
}, {
'url': 'http://www.history.com/topics/world-war-i/world-war-i-history/videos',
'info_dict':
{
'id': 'world-war-i-history',
'title': 'World War I History',
},
'playlist_mincount': 23,
}, {
'url': 'http://www.history.com/topics/world-war-i-history/videos',
'only_matching': True,
}, {
'url': 'http://www.history.com/topics/world-war-i/world-war-i-history',
'only_matching': True,
}, {
'url': 'http://www.history.com/topics/world-war-i/world-war-i-history/speeches',
'only_matching': True,
}]
def theplatform_url_result(self, theplatform_url, video_id, query):
return {
'_type': 'url_transparent',
'id': video_id,
'url': smuggle_url(
update_url_query(theplatform_url, query),
{
'sig': {
'key': self._THEPLATFORM_KEY,
'secret': self._THEPLATFORM_SECRET,
},
'force_smil_url': True
}),
'ie_key': 'ThePlatform',
}
def _real_extract(self, url):
topic_id, video_display_id = re.match(self._VALID_URL, url).groups()
if video_display_id:
webpage = self._download_webpage(url, video_display_id)
release_url, video_id = re.search(r"_videoPlayer.play\('([^']+)'\s*,\s*'[^']+'\s*,\s*'(\d+)'\)", webpage).groups()
release_url = unescapeHTML(release_url)
return self.theplatform_url_result(
release_url, video_id, {
'mbr': 'true',
'switch': 'hls',
'assetTypes': 'high_video_ak',
})
else:
webpage = self._download_webpage(url, topic_id)
entries = []
for episode_item in re.findall(r'<a.+?data-release-url="[^"]+"[^>]*>', webpage):
video_attributes = extract_attributes(episode_item)
entries.append(self.theplatform_url_result(
video_attributes['data-release-url'], video_attributes['data-id'], {
'mbr': 'true',
'switch': 'hls',
'assetTypes': 'high_video_ak',
}))
return self.playlist_result(entries, topic_id, get_element_by_attribute('class', 'show-title', webpage))
|
2014c2g23/2015cda-w17
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/browser/indexed_db.py
|
632
|
class EventListener:
def __init__(self, events=[]):
self._events=events
def append(self, event):
self._events.append(event)
def fire(self, e):
for _event in self._events:
_event(e)
class IndexedDB:
def __init__(self):
if not __BRYTHON__.has_indexedDB:
raise NotImplementedError("Your browser doesn't support indexedDB")
return
self._indexedDB=__BRYTHON__.indexedDB()
self._db=None
self._version=None
def _onsuccess(self, event):
self._db=event.target.result
def open(self, name, onsuccess, version=1.0, onerror=None,
onupgradeneeded=None):
self._version=version
_result=self._indexedDB.open(name, version)
_success=EventListener([self._onsuccess, onsuccess])
_result.onsuccess=_success.fire
_result.onupgradeneeded=onupgradeneeded
#if onerror is None:
def onerror(e):
print("onerror: %s:%s" % (e.type, e.target.result))
def onblocked(e):
print("blocked: %s:%s" % (e.type, e.result))
_result.onerror=onerror
_result.onblocked=onblocked
def transaction(self, entities, mode='read'):
return Transaction(self._db.transaction(entities, mode))
class Transaction:
def __init__(self, transaction):
self._transaction=transaction
def objectStore(self, name):
return ObjectStore(self._transaction.objectStore(name))
class ObjectStore:
def __init__(self, objectStore):
self._objectStore=objectStore
self._data=[]
def clear(self, onsuccess=None, onerror=None):
_result=self._objectStore.clear()
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def _helper(self, func, object, onsuccess=None, onerror=None):
_result=func(object)
if onsuccess is not None:
_result.onsuccess=onsuccess
if onerror is not None:
_result.onerror=onerror
def put(self, obj, key=None, onsuccess=None, onerror=None):
_r = self._objectStore.put(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
def add(self, obj, key, onsuccess=None, onerror=None):
_r = self._objectStore.add(obj, key)
_r.onsuccess = onsuccess
_r.onerror = onerror
#self._helper(self._objectStore.add, object, onsuccess, onerror)
def delete(self, index, onsuccess=None, onerror=None):
self._helper(self._objectStore.delete, index, onsuccess, onerror)
def query(self, *args):
self._data=[]
def onsuccess(event):
cursor=event.target.result
if cursor is not None:
self._data.append(cursor.value)
getattr(cursor,"continue")() # cursor.continue() is illegal
self._objectStore.openCursor(args).onsuccess=onsuccess
def fetchall(self):
yield self._data
def get(self, key, onsuccess=None, onerror=None):
self._helper(self._objectStore.get, key, onsuccess, onerror)
|
lyft/incubator-airflow
|
refs/heads/master
|
tests/contrib/utils/test_weekday.py
|
5
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from enum import Enum
from airflow.contrib.utils.weekday import WeekDay
class TestWeekDay(unittest.TestCase):
def test_weekday_enum_length(self):
self.assertEqual(len(WeekDay), 7)
def test_weekday_name_value(self):
weekdays = "MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY SUNDAY"
weekdays = weekdays.split()
for i, weekday in enumerate(weekdays, start=1):
weekday_enum = WeekDay(i)
self.assertEqual(weekday_enum, i)
self.assertEqual(int(weekday_enum), i)
self.assertEqual(weekday_enum.name, weekday)
self.assertTrue(weekday_enum in WeekDay)
self.assertTrue(0 < weekday_enum < 8)
self.assertIsInstance(weekday_enum, WeekDay)
self.assertIsInstance(weekday_enum, int)
self.assertIsInstance(weekday_enum, Enum)
|
furthz/colegio
|
refs/heads/master
|
src/utils/apps.py
|
15
|
from django.apps import AppConfig
class UtilsConfig(AppConfig):
name = 'utils'
|
sharad/calibre
|
refs/heads/master
|
src/calibre/ebooks/rtf/preprocess.py
|
10
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2010, Gerendi Sandor Attila'
__docformat__ = 'restructuredtext en'
"""
RTF tokenizer and token parser. v.1.0 (1/17/2010)
Author: Gerendi Sandor Attila
At this point this will tokenize a RTF file then rebuild it from the tokens.
In the process the UTF8 tokens are altered to be supported by the RTF2XML and also remain RTF specification compilant.
"""
class tokenDelimitatorStart():
def __init__(self):
pass
def toRTF(self):
return b'{'
def __repr__(self):
return '{'
class tokenDelimitatorEnd():
def __init__(self):
pass
def toRTF(self):
return b'}'
def __repr__(self):
return '}'
class tokenControlWord():
def __init__(self, name, separator = ''):
self.name = name
self.separator = separator
def toRTF(self):
return self.name + self.separator
def __repr__(self):
return self.name + self.separator
class tokenControlWordWithNumericArgument():
def __init__(self, name, argument, separator = ''):
self.name = name
self.argument = argument
self.separator = separator
def toRTF(self):
return self.name + repr(self.argument) + self.separator
def __repr__(self):
return self.name + repr(self.argument) + self.separator
class tokenControlSymbol():
def __init__(self, name):
self.name = name
def toRTF(self):
return self.name
def __repr__(self):
return self.name
class tokenData():
def __init__(self, data):
self.data = data
def toRTF(self):
return self.data
def __repr__(self):
return self.data
class tokenBinN():
def __init__(self, data, separator = ''):
self.data = data
self.separator = separator
def toRTF(self):
return "\\bin" + repr(len(self.data)) + self.separator + self.data
def __repr__(self):
return "\\bin" + repr(len(self.data)) + self.separator + self.data
class token8bitChar():
def __init__(self, data):
self.data = data
def toRTF(self):
return "\\'" + self.data
def __repr__(self):
return "\\'" + self.data
class tokenUnicode():
def __init__(self, data, separator = '', current_ucn = 1, eqList = []):
self.data = data
self.separator = separator
self.current_ucn = current_ucn
self.eqList = eqList
def toRTF(self):
result = '\\u' + repr(self.data) + ' '
ucn = self.current_ucn
if len(self.eqList) < ucn:
ucn = len(self.eqList)
result = tokenControlWordWithNumericArgument('\\uc', ucn).toRTF() + result
i = 0
for eq in self.eqList:
if i >= ucn:
break
result = result + eq.toRTF()
return result
def __repr__(self):
return '\\u' + repr(self.data)
def isAsciiLetter(value):
return ((value >= 'a') and (value <= 'z')) or ((value >= 'A') and (value <= 'Z'))
def isDigit(value):
return (value >= '0') and (value <= '9')
def isChar(value, char):
return value == char
def isString(buffer, string):
return buffer == string
class RtfTokenParser():
def __init__(self, tokens):
self.tokens = tokens
self.process()
self.processUnicode()
def process(self):
i = 0
newTokens = []
while i < len(self.tokens):
if isinstance(self.tokens[i], tokenControlSymbol):
if isString(self.tokens[i].name, "\\'"):
i = i + 1
if not isinstance(self.tokens[i], tokenData):
raise Exception('Error: token8bitChar without data.')
if len(self.tokens[i].data) < 2:
raise Exception('Error: token8bitChar without data.')
newTokens.append(token8bitChar(self.tokens[i].data[0:2]))
if len(self.tokens[i].data) > 2:
newTokens.append(tokenData(self.tokens[i].data[2:]))
i = i + 1
continue
newTokens.append(self.tokens[i])
i = i + 1
self.tokens = list(newTokens)
def processUnicode(self):
i = 0
newTokens = []
ucNbStack = [1]
while i < len(self.tokens):
if isinstance(self.tokens[i], tokenDelimitatorStart):
ucNbStack.append(ucNbStack[len(ucNbStack) - 1])
newTokens.append(self.tokens[i])
i = i + 1
continue
if isinstance(self.tokens[i], tokenDelimitatorEnd):
ucNbStack.pop()
newTokens.append(self.tokens[i])
i = i + 1
continue
if isinstance(self.tokens[i], tokenControlWordWithNumericArgument):
if isString(self.tokens[i].name, '\\uc'):
ucNbStack[len(ucNbStack) - 1] = self.tokens[i].argument
newTokens.append(self.tokens[i])
i = i + 1
continue
if isString(self.tokens[i].name, '\\u'):
x = i
j = 0
i = i + 1
replace = []
partialData = None
ucn = ucNbStack[len(ucNbStack) - 1]
while (i < len(self.tokens)) and (j < ucn):
if isinstance(self.tokens[i], tokenDelimitatorStart):
break
if isinstance(self.tokens[i], tokenDelimitatorEnd):
break
if isinstance(self.tokens[i], tokenData):
if len(self.tokens[i].data) >= ucn - j:
replace.append(tokenData(self.tokens[i].data[0 : ucn - j]))
if len(self.tokens[i].data) > ucn - j:
partialData = tokenData(self.tokens[i].data[ucn - j:])
i = i + 1
break
else:
replace.append(self.tokens[i])
j = j + len(self.tokens[i].data)
i = i + 1
continue
if isinstance(self.tokens[i], token8bitChar) or isinstance(self.tokens[i], tokenBinN):
replace.append(self.tokens[i])
i = i + 1
j = j + 1
continue
raise Exception('Error: incorect utf replacement.')
#calibre rtf2xml does not support utfreplace
replace = []
newTokens.append(tokenUnicode(self.tokens[x].argument, self.tokens[x].separator, ucNbStack[len(ucNbStack) - 1], replace))
if partialData != None:
newTokens.append(partialData)
continue
newTokens.append(self.tokens[i])
i = i + 1
self.tokens = list(newTokens)
def toRTF(self):
result = []
for token in self.tokens:
result.append(token.toRTF())
return "".join(result)
class RtfTokenizer():
def __init__(self, rtfData):
self.rtfData = []
self.tokens = []
self.rtfData = rtfData
self.tokenize()
def tokenize(self):
i = 0
lastDataStart = -1
while i < len(self.rtfData):
if isChar(self.rtfData[i], '{'):
if lastDataStart > -1:
self.tokens.append(tokenData(self.rtfData[lastDataStart : i]))
lastDataStart = -1
self.tokens.append(tokenDelimitatorStart())
i = i + 1
continue
if isChar(self.rtfData[i], '}'):
if lastDataStart > -1:
self.tokens.append(tokenData(self.rtfData[lastDataStart : i]))
lastDataStart = -1
self.tokens.append(tokenDelimitatorEnd())
i = i + 1
continue
if isChar(self.rtfData[i], '\\'):
if i + 1 >= len(self.rtfData):
raise Exception('Error: Control character found at the end of the document.')
if lastDataStart > -1:
self.tokens.append(tokenData(self.rtfData[lastDataStart : i]))
lastDataStart = -1
tokenStart = i
i = i + 1
#Control Words
if isAsciiLetter(self.rtfData[i]):
#consume <ASCII Letter Sequence>
consumed = False
while i < len(self.rtfData):
if not isAsciiLetter(self.rtfData[i]):
tokenEnd = i
consumed = True
break
i = i + 1
if not consumed:
raise Exception('Error (at:%d): Control Word without end.'%(tokenStart))
#we have numeric argument before delimiter
if isChar(self.rtfData[i], '-') or isDigit(self.rtfData[i]):
#consume the numeric argument
consumed = False
l = 0
while i < len(self.rtfData):
if not isDigit(self.rtfData[i]):
consumed = True
break
l = l + 1
i = i + 1
if l > 10 :
raise Exception('Error (at:%d): Too many digits in control word numeric argument.'%[tokenStart])
if not consumed:
raise Exception('Error (at:%d): Control Word without numeric argument end.'%[tokenStart])
separator = ''
if isChar(self.rtfData[i], ' '):
separator = ' '
controlWord = self.rtfData[tokenStart: tokenEnd]
if tokenEnd < i:
value = int(self.rtfData[tokenEnd: i])
if isString(controlWord, "\\bin"):
i = i + value
self.tokens.append(tokenBinN(self.rtfData[tokenStart:i], separator))
else:
self.tokens.append(tokenControlWordWithNumericArgument(controlWord, value, separator))
else:
self.tokens.append(tokenControlWord(controlWord, separator))
#space delimiter, we should discard it
if self.rtfData[i] == ' ':
i = i + 1
#Control Symbol
else:
self.tokens.append(tokenControlSymbol(self.rtfData[tokenStart : i + 1]))
i = i + 1
continue
if lastDataStart < 0:
lastDataStart = i
i = i + 1
def toRTF(self):
result = []
for token in self.tokens:
result.append(token.toRTF())
return "".join(result)
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print ("Usage %prog rtfFileToConvert")
sys.exit()
f = open(sys.argv[1], 'rb')
data = f.read()
f.close()
tokenizer = RtfTokenizer(data)
parsedTokens = RtfTokenParser(tokenizer.tokens)
data = parsedTokens.toRTF()
f = open(sys.argv[1], 'w')
f.write(data)
f.close()
|
dysya92/monkeys
|
refs/heads/master
|
flask/lib/python2.7/site-packages/whoosh/lang/dmetaphone.py
|
96
|
# coding= utf-8
# This script implements the Double Metaphone algorythm (c) 1998, 1999 by
# Lawrence Philips. It was translated to Python from the C source written by
# Kevin Atkinson (http://aspell.net/metaphone/) By Andrew Collins - January 12,
# 2007 who claims no rights to this work.
# http://atomboy.isa-geek.com:8080/plone/Members/acoil/programing/double-metaphone
import re
from whoosh.compat import u
vowels = frozenset("AEIOUY")
slavo_germ_exp = re.compile("W|K|CZ|WITZ")
silent_starts = re.compile("GN|KN|PN|WR|PS")
def double_metaphone(text):
text = text.upper()
slavo_germanic = bool(slavo_germ_exp.search(text))
length = len(text)
text = "--" + text + " "
first = pos = 2
last = first + length - 1
primary = secondary = ""
if silent_starts.match(text, pos):
pos += 1
while pos < length + 2:
ch = text[pos]
if ch in vowels:
# all init vowels now map to 'A'
if pos != first:
next = (None, 1)
else:
next = ("A", 1)
elif ch == "B":
#"-mb", e.g", "dumb", already skipped over... see 'M' below
if text[pos + 1] == "B":
next = ("P", 2)
else:
next = ("P", 1)
elif ch == "C":
# various germanic
if (pos > (first + 1) and text[pos - 2] not in vowels and text[pos - 1:pos + 2] == 'ACH' and \
(text[pos + 2] not in ['I', 'E'] or text[pos - 2:pos + 4] in ['BACHER', 'MACHER'])):
next = ('K', 2)
# special case 'CAESAR'
elif pos == first and text[first:first + 6] == 'CAESAR':
next = ('S', 2)
elif text[pos:pos + 4] == 'CHIA': # italian 'chianti'
next = ('K', 2)
elif text[pos:pos + 2] == 'CH':
# find 'michael'
if pos > first and text[pos:pos + 4] == 'CHAE':
next = ('K', 'X', 2)
elif pos == first and (text[pos + 1:pos + 6] in ['HARAC', 'HARIS'] or \
text[pos + 1:pos + 4] in ["HOR", "HYM", "HIA", "HEM"]) and text[first:first + 5] != 'CHORE':
next = ('K', 2)
# germanic, greek, or otherwise 'ch' for 'kh' sound
elif text[first:first + 4] in ['VAN ', 'VON '] or text[first:first + 3] == 'SCH' \
or text[pos - 2:pos + 4] in ["ORCHES", "ARCHIT", "ORCHID"] \
or text[pos + 2] in ['T', 'S'] \
or ((text[pos - 1] in ["A", "O", "U", "E"] or pos == first) \
and text[pos + 2] in ["L", "R", "N", "M", "B", "H", "F", "V", "W", " "]):
next = ('K', 1)
else:
if pos > first:
if text[first:first + 2] == 'MC':
next = ('K', 2)
else:
next = ('X', 'K', 2)
else:
next = ('X', 2)
# e.g, 'czerny'
elif text[pos:pos + 2] == 'CZ' and text[pos - 2:pos + 2] != 'WICZ':
next = ('S', 'X', 2)
# e.g., 'focaccia'
elif text[pos + 1:pos + 4] == 'CIA':
next = ('X', 3)
# double 'C', but not if e.g. 'McClellan'
elif text[pos:pos + 2] == 'CC' and not (pos == (first + 1) and text[first] == 'M'):
# 'bellocchio' but not 'bacchus'
if text[pos + 2] in ["I", "E", "H"] and text[pos + 2:pos + 4] != 'HU':
# 'accident', 'accede' 'succeed'
if (pos == (first + 1) and text[first] == 'A') or \
text[pos - 1:pos + 4] in ['UCCEE', 'UCCES']:
next = ('KS', 3)
# 'bacci', 'bertucci', other italian
else:
next = ('X', 3)
else:
next = ('K', 2)
elif text[pos:pos + 2] in ["CK", "CG", "CQ"]:
next = ('K', 'K', 2)
elif text[pos:pos + 2] in ["CI", "CE", "CY"]:
# italian vs. english
if text[pos:pos + 3] in ["CIO", "CIE", "CIA"]:
next = ('S', 'X', 2)
else:
next = ('S', 2)
else:
# name sent in 'mac caffrey', 'mac gregor
if text[pos + 1:pos + 3] in [" C", " Q", " G"]:
next = ('K', 3)
else:
if text[pos + 1] in ["C", "K", "Q"] and text[pos + 1:pos + 3] not in ["CE", "CI"]:
next = ('K', 2)
else: # default for 'C'
next = ('K', 1)
elif ch == u('\xc7'):
next = ('S', 1)
elif ch == 'D':
if text[pos:pos + 2] == 'DG':
if text[pos + 2] in ['I', 'E', 'Y']: # e.g. 'edge'
next = ('J', 3)
else:
next = ('TK', 2)
elif text[pos:pos + 2] in ['DT', 'DD']:
next = ('T', 2)
else:
next = ('T', 1)
elif ch == 'F':
if text[pos + 1] == 'F':
next = ('F', 2)
else:
next = ('F', 1)
elif ch == 'G':
if text[pos + 1] == 'H':
if pos > first and text[pos - 1] not in vowels:
next = ('K', 2)
elif pos < (first + 3):
if pos == first: # 'ghislane', ghiradelli
if text[pos + 2] == 'I':
next = ('J', 2)
else:
next = ('K', 2)
# Parker's rule (with some further refinements) - e.g., 'hugh'
elif (pos > (first + 1) and text[pos - 2] in ['B', 'H', 'D']) \
or (pos > (first + 2) and text[pos - 3] in ['B', 'H', 'D']) \
or (pos > (first + 3) and text[pos - 4] in ['B', 'H']):
next = (None, 2)
else:
# e.g., 'laugh', 'McLaughlin', 'cough', 'gough', 'rough', 'tough'
if pos > (first + 2) and text[pos - 1] == 'U' \
and text[pos - 3] in ["C", "G", "L", "R", "T"]:
next = ('F', 2)
else:
if pos > first and text[pos - 1] != 'I':
next = ('K', 2)
elif text[pos + 1] == 'N':
if pos == (first + 1) and text[first] in vowels and not slavo_germanic:
next = ('KN', 'N', 2)
else:
# not e.g. 'cagney'
if text[pos + 2:pos + 4] != 'EY' and text[pos + 1] != 'Y' and not slavo_germanic:
next = ('N', 'KN', 2)
else:
next = ('KN', 2)
# 'tagliaro'
elif text[pos + 1:pos + 3] == 'LI' and not slavo_germanic:
next = ('KL', 'L', 2)
# -ges-,-gep-,-gel-, -gie- at beginning
elif pos == first and (text[pos + 1] == 'Y' \
or text[pos + 1:pos + 3] in ["ES", "EP", "EB", "EL", "EY", "IB", "IL", "IN", "IE", "EI", "ER"]):
next = ('K', 'J', 2)
# -ger-, -gy-
elif (text[pos + 1:pos + 2] == 'ER' or text[pos + 1] == 'Y') \
and text[first:first + 6] not in ["DANGER", "RANGER", "MANGER"] \
and text[pos - 1] not in ['E', 'I'] and text[pos - 1:pos + 2] not in ['RGY', 'OGY']:
next = ('K', 'J', 2)
# italian e.g, 'biaggi'
elif text[pos + 1] in ['E', 'I', 'Y'] or text[pos - 1:pos + 3] in ["AGGI", "OGGI"]:
# obvious germanic
if text[first:first + 4] in ['VON ', 'VAN '] or text[first:first + 3] == 'SCH' \
or text[pos + 1:pos + 3] == 'ET':
next = ('K', 2)
else:
# always soft if french ending
if text[pos + 1:pos + 5] == 'IER ':
next = ('J', 2)
else:
next = ('J', 'K', 2)
elif text[pos + 1] == 'G':
next = ('K', 2)
else:
next = ('K', 1)
elif ch == 'H':
# only keep if first & before vowel or btw. 2 vowels
if (pos == first or text[pos - 1] in vowels) and text[pos + 1] in vowels:
next = ('H', 2)
else: # (also takes care of 'HH')
next = (None, 1)
elif ch == 'J':
# obvious spanish, 'jose', 'san jacinto'
if text[pos:pos + 4] == 'JOSE' or text[first:first + 4] == 'SAN ':
if (pos == first and text[pos + 4] == ' ') or text[first:first + 4] == 'SAN ':
next = ('H',)
else:
next = ('J', 'H')
elif pos == first and text[pos:pos + 4] != 'JOSE':
next = ('J', 'A') # Yankelovich/Jankelowicz
else:
# spanish pron. of e.g. 'bajador'
if text[pos - 1] in vowels and not slavo_germanic \
and text[pos + 1] in ['A', 'O']:
next = ('J', 'H')
else:
if pos == last:
next = ('J', ' ')
else:
if text[pos + 1] not in ["L", "T", "K", "S", "N", "M", "B", "Z"] \
and text[pos - 1] not in ["S", "K", "L"]:
next = ('J',)
else:
next = (None,)
if text[pos + 1] == 'J':
next = next + (2,)
else:
next = next + (1,)
elif ch == 'K':
if text[pos + 1] == 'K':
next = ('K', 2)
else:
next = ('K', 1)
elif ch == 'L':
if text[pos + 1] == 'L':
# spanish e.g. 'cabrillo', 'gallegos'
if (pos == (last - 2) and text[pos - 1:pos + 3] in ["ILLO", "ILLA", "ALLE"]) \
or ((text[last - 1:last + 1] in ["AS", "OS"] or text[last] in ["A", "O"]) \
and text[pos - 1:pos + 3] == 'ALLE'):
next = ('L', '', 2)
else:
next = ('L', 2)
else:
next = ('L', 1)
elif ch == 'M':
if text[pos + 1:pos + 4] == 'UMB' \
and (pos + 1 == last or text[pos + 2:pos + 4] == 'ER') \
or text[pos + 1] == 'M':
next = ('M', 2)
else:
next = ('M', 1)
elif ch == 'N':
if text[pos + 1] == 'N':
next = ('N', 2)
else:
next = ('N', 1)
elif ch == u('\xd1'):
next = ('N', 1)
elif ch == 'P':
if text[pos + 1] == 'H':
next = ('F', 2)
elif text[pos + 1] in ['P', 'B']: # also account for "campbell", "raspberry"
next = ('P', 2)
else:
next = ('P', 1)
elif ch == 'Q':
if text[pos + 1] == 'Q':
next = ('K', 2)
else:
next = ('K', 1)
elif ch == 'R':
# french e.g. 'rogier', but exclude 'hochmeier'
if pos == last and not slavo_germanic \
and text[pos - 2:pos] == 'IE' and text[pos - 4:pos - 2] not in ['ME', 'MA']:
next = ('', 'R')
else:
next = ('R',)
if text[pos + 1] == 'R':
next = next + (2,)
else:
next = next + (1,)
elif ch == 'S':
# special cases 'island', 'isle', 'carlisle', 'carlysle'
if text[pos - 1:pos + 2] in ['ISL', 'YSL']:
next = (None, 1)
# special case 'sugar-'
elif pos == first and text[first:first + 5] == 'SUGAR':
next = ('X', 'S', 1)
elif text[pos:pos + 2] == 'SH':
# germanic
if text[pos + 1:pos + 5] in ["HEIM", "HOEK", "HOLM", "HOLZ"]:
next = ('S', 2)
else:
next = ('X', 2)
# italian & armenian
elif text[pos:pos + 3] in ["SIO", "SIA"] or text[pos:pos + 4] == 'SIAN':
if not slavo_germanic:
next = ('S', 'X', 3)
else:
next = ('S', 3)
# german & anglicisations, e.g. 'smith' match 'schmidt', 'snider' match 'schneider'
# also, -sz- in slavic language altho in hungarian it is pronounced 's'
elif (pos == first and text[pos + 1] in ["M", "N", "L", "W"]) or text[pos + 1] == 'Z':
next = ('S', 'X')
if text[pos + 1] == 'Z':
next = next + (2,)
else:
next = next + (1,)
elif text[pos:pos + 2] == 'SC':
# Schlesinger's rule
if text[pos + 2] == 'H':
# dutch origin, e.g. 'school', 'schooner'
if text[pos + 3:pos + 5] in ["OO", "ER", "EN", "UY", "ED", "EM"]:
# 'schermerhorn', 'schenker'
if text[pos + 3:pos + 5] in ['ER', 'EN']:
next = ('X', 'SK', 3)
else:
next = ('SK', 3)
else:
if pos == first and text[first + 3] not in vowels and text[first + 3] != 'W':
next = ('X', 'S', 3)
else:
next = ('X', 3)
elif text[pos + 2] in ['I', 'E', 'Y']:
next = ('S', 3)
else:
next = ('SK', 3)
# french e.g. 'resnais', 'artois'
elif pos == last and text[pos - 2:pos] in ['AI', 'OI']:
next = ('', 'S', 1)
else:
next = ('S',)
if text[pos + 1] in ['S', 'Z']:
next = next + (2,)
else:
next = next + (1,)
elif ch == 'T':
if text[pos:pos + 4] == 'TION':
next = ('X', 3)
elif text[pos:pos + 3] in ['TIA', 'TCH']:
next = ('X', 3)
elif text[pos:pos + 2] == 'TH' or text[pos:pos + 3] == 'TTH':
# special case 'thomas', 'thames' or germanic
if text[pos + 2:pos + 4] in ['OM', 'AM'] or text[first:first + 4] in ['VON ', 'VAN '] \
or text[first:first + 3] == 'SCH':
next = ('T', 2)
else:
next = ('0', 'T', 2)
elif text[pos + 1] in ['T', 'D']:
next = ('T', 2)
else:
next = ('T', 1)
elif ch == 'V':
if text[pos + 1] == 'V':
next = ('F', 2)
else:
next = ('F', 1)
elif ch == 'W':
# can also be in middle of word
if text[pos:pos + 2] == 'WR':
next = ('R', 2)
elif pos == first and (text[pos + 1] in vowels or text[pos:pos + 2] == 'WH'):
# Wasserman should match Vasserman
if text[pos + 1] in vowels:
next = ('A', 'F', 1)
else:
next = ('A', 1)
# Arnow should match Arnoff
elif (pos == last and text[pos - 1] in vowels) \
or text[pos - 1:pos + 5] in ["EWSKI", "EWSKY", "OWSKI", "OWSKY"] \
or text[first:first + 3] == 'SCH':
next = ('', 'F', 1)
# polish e.g. 'filipowicz'
elif text[pos:pos + 4] in ["WICZ", "WITZ"]:
next = ('TS', 'FX', 4)
else: # default is to skip it
next = (None, 1)
elif ch == 'X':
# french e.g. breaux
next = (None,)
if not(pos == last and (text[pos - 3:pos] in ["IAU", "EAU"] \
or text[pos - 2:pos] in ['AU', 'OU'])):
next = ('KS',)
if text[pos + 1] in ['C', 'X']:
next = next + (2,)
else:
next = next + (1,)
elif ch == 'Z':
# chinese pinyin e.g. 'zhao'
if text[pos + 1] == 'H':
next = ('J',)
elif text[pos + 1:pos + 3] in ["ZO", "ZI", "ZA"] \
or (slavo_germanic and pos > first and text[pos - 1] != 'T'):
next = ('S', 'TS')
else:
next = ('S',)
if text[pos + 1] == 'Z':
next = next + (2,)
else:
next = next + (1,)
else:
next = (None, 1)
if len(next) == 2:
if next[0]:
primary += next[0]
secondary += next[0]
pos += next[1]
elif len(next) == 3:
if next[0]:
primary += next[0]
if next[1]:
secondary += next[1]
pos += next[2]
if primary == secondary:
return (primary, None)
else:
return (primary, secondary)
|
cybem/graphite-web-iow
|
refs/heads/master
|
webapp/graphite/account/urls.py
|
8
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from django.conf.urls import *
urlpatterns = patterns('graphite.account.views',
('^login/?$', 'loginView'),
('^logout/?$', 'logoutView'),
('^edit/?$', 'editProfile'),
('^update/?$','updateProfile'),
)
|
vdrey/Project-Programs
|
refs/heads/master
|
Python/caesarCipher.py
|
1
|
# Caesar Cipher
# the string to be encrypted/decrypted
message = input('Message: ')
# the encryption/decryption key
key = int(input('Key (0 to 25): '))
# tells the program to encrypt or decrypt
mode = input('Encrypt or Decrypt: ')
mode = mode.lower()
# every possible symbol that can be encrypted
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# stores the encrypted/decrypted form of the message
translated = ''
# capitalize the string in message
message = message.upper()
# run the encryption/decryption code on each symbol in the message string
for symbol in message:
if symbol in LETTERS:
# get the encrypted (or decrypted) number for this symbol
num = LETTERS.find(symbol) # get the number of the symbol
if mode == 'encrypt':
num = num + key
elif mode == 'decrypt':
num = num - key
# handle the wrap-around if num is larger than the length of
# LETTERS or less than 0
if num >= len(LETTERS):
num = num - len(LETTERS)
elif num < 0:
num = num + len(LETTERS)
# add encrypted/decrypted number's symbol at the end of translated
translated = translated + LETTERS[num]
else:
# just add the symbol without encrypting/decrypting
translated = translated + symbol
# print the encrypted/decrypted string to the screen
print(translated)
|
IsCoolEntertainment/debpkg_python-fabric
|
refs/heads/master
|
tests/test_server.py
|
35
|
"""
Tests for the test server itself.
Not intended to be run by the greater test suite, only by specifically
targeting it on the command-line. Rationale: not really testing Fabric itself,
no need to pollute Fab's own test suite. (Yes, if these tests fail, it's likely
that the Fabric tests using the test server may also have issues, but still.)
"""
__test__ = False
from nose.tools import eq_, ok_
from fabric.network import ssh
from server import FakeSFTPServer
class AttrHolder(object):
pass
def test_list_folder():
for desc, file_map, arg, expected in (
(
"Single file",
{'file.txt': 'contents'},
'',
['file.txt']
),
(
"Single absolute file",
{'/file.txt': 'contents'},
'/',
['file.txt']
),
(
"Multiple files",
{'file1.txt': 'contents', 'file2.txt': 'contents2'},
'',
['file1.txt', 'file2.txt']
),
(
"Single empty folder",
{'folder': None},
'',
['folder']
),
(
"Empty subfolders",
{'folder': None, 'folder/subfolder': None},
'',
['folder']
),
(
"Non-empty sub-subfolder",
{'folder/subfolder/subfolder2/file.txt': 'contents'},
"folder/subfolder/subfolder2",
['file.txt']
),
(
"Mixed files, folders empty and non-empty, in homedir",
{
'file.txt': 'contents',
'file2.txt': 'contents2',
'folder/file3.txt': 'contents3',
'empty_folder': None
},
'',
['file.txt', 'file2.txt', 'folder', 'empty_folder']
),
(
"Mixed files, folders empty and non-empty, in subdir",
{
'file.txt': 'contents',
'file2.txt': 'contents2',
'folder/file3.txt': 'contents3',
'folder/subfolder/file4.txt': 'contents4',
'empty_folder': None
},
"folder",
['file3.txt', 'subfolder']
),
):
# Pass in fake server obj. (Can't easily clean up API to be more
# testable since it's all implementing 'ssh' interface stuff.)
server = AttrHolder()
server.files = file_map
interface = FakeSFTPServer(server)
results = interface.list_folder(arg)
# In this particular suite of tests, all results should be a file list,
# not "no files found"
ok_(results != ssh.SFTP_NO_SUCH_FILE)
# Grab filename from SFTPAttribute objects in result
output = map(lambda x: x.filename, results)
# Yield test generator
eq_.description = "list_folder: %s" % desc
yield eq_, set(expected), set(output)
del eq_.description
|
balloob/home-assistant
|
refs/heads/dev
|
homeassistant/components/freebox/switch.py
|
12
|
"""Support for Freebox Delta, Revolution and Mini 4K."""
import logging
from typing import Dict
from aiofreepybox.exceptions import InsufficientPermissionsError
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN
from .router import FreeboxRouter
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the switch."""
router = hass.data[DOMAIN][entry.unique_id]
async_add_entities([FreeboxWifiSwitch(router)], True)
class FreeboxWifiSwitch(SwitchEntity):
"""Representation of a freebox wifi switch."""
def __init__(self, router: FreeboxRouter) -> None:
"""Initialize the Wifi switch."""
self._name = "Freebox WiFi"
self._state = None
self._router = router
self._unique_id = f"{self._router.mac} {self._name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name of the switch."""
return self._name
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._state
@property
def device_info(self) -> Dict[str, any]:
"""Return the device information."""
return self._router.device_info
async def _async_set_state(self, enabled: bool):
"""Turn the switch on or off."""
wifi_config = {"enabled": enabled}
try:
await self._router.wifi.set_global_config(wifi_config)
except InsufficientPermissionsError:
_LOGGER.warning(
"Home Assistant does not have permissions to modify the Freebox settings. Please refer to documentation"
)
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
await self._async_set_state(True)
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
await self._async_set_state(False)
async def async_update(self):
"""Get the state and update it."""
datas = await self._router.wifi.get_global_config()
active = datas["enabled"]
self._state = bool(active)
|
shawnlawson/The_Force
|
refs/heads/gh-pages
|
pythonBridge/websocketUDPBridge.py
|
1
|
import time, sys, os, pkg_resources
import SocketServer
from twisted.python import log
from twisted.internet import reactor, ssl
from twisted.application import service
from twisted.internet.protocol import DatagramProtocol, Protocol, Factory
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import WebSocketServerProtocol, \
WebSocketServerFactory, \
listenWS
from autobahn.twisted.resource import WebSocketResource, \
HTTPChannelHixie76Aware
# constants
# Me to receive
SERVER_IP = '127.0.0.1'
SERVER_UDP_PORT = 7600
SERVER_WS_PORT = 8000
SERVER_HTTP_PORT = 9000
SERVER_HTTP_RESOURCES = 'web'
# To someone/thing else
# to Bela
CLIENT_IP = '192.168.7.2'
CLIENT_UDP_PORT = 7562
#remote
# CLIENT_IP = '10.0.0.1'
# CLIENT_UDP_PORT = 7500
#local
# CLIENT_IP = '127.0.0.1'
# CLIENT_UDP_PORT = 8888
# [HTTP] > [CLIENT WS] > [SERVER WS] > bridge > [SERVER UDP] > [CLIENT UDP]
class Bridge():
def __init__(self):
self.udpServer = None
self.wsServer = None
def setUdpServer(self, udpServer):
self.udpServer = udpServer
def setWebsocketServer(self, wsServer):
self.wsServer = wsServer
def udpToWebsocket(self, data):
if self.wsServer is not None:
self.wsServer.sendMessage(data, True)
def websocketToUdp(self, data):
if self.udpServer is not None:
self.udpServer.transport.write(data, (CLIENT_IP, CLIENT_UDP_PORT))
# udp server
class UDPServer(DatagramProtocol):
def __init__(self, bridge):
self.bridge = bridge
self.bridge.setUdpServer(self)
def datagramReceived(self, data, (host, port)):
self.bridge.udpToWebsocket(data)
#print data
# websocket server
class BridgedWebSocketServerFactory(WebSocketServerFactory):
def __init__(self, url, debug, debugCodePaths, bridge):
WebSocketServerFactory.__init__(self, url, debug = debug, debugCodePaths = debugCodePaths)
self.bridge = bridge
class WebSocketServer(WebSocketServerProtocol):
def onOpen(self):
print 'WebSocket connection open.'
def onConnect(self, request):
self.factory.bridge.setWebsocketServer(self)
print 'Client connecting: {0}'.format(request.peer)
def onMessage(self, payload, isBinary):
# print payload
self.factory.bridge.websocketToUdp(payload)
def onClose(self, wasClean, code, reason):
print 'WebSocket connection closed: {0}'.format(reason)
# initalize servers
if __name__ == '__main__':
bridge = Bridge()
log.startLogging(sys.stdout)
# websocket setup
wsAddress = 'wss://%s:%d' % (SERVER_IP, SERVER_WS_PORT)
contextFactory = ssl.DefaultOpenSSLContextFactory('/etc/apache2/ssl/localhost.key',
'/etc/apache2/ssl/localhost.crt')
factory = BridgedWebSocketServerFactory(wsAddress, False, False, bridge)
factory.protocol = WebSocketServer
reactor.listenSSL(SERVER_WS_PORT, factory, contextFactory)
# reactor.listenTCP(SERVER_WS_PORT, factory)
# http setup
webdir = os.path.abspath(SERVER_HTTP_RESOURCES)
site = Site(File(webdir))
site.protocol = HTTPChannelHixie76Aware
reactor.listenTCP(SERVER_HTTP_PORT, site)
# udp setup
reactor.listenUDP(SERVER_UDP_PORT, UDPServer(bridge))
# start session
reactor.run()
|
mrrrgn/AutobahnPython
|
refs/heads/master
|
examples/asyncio/wamp/basic/session/series/__init__.py
|
561
|
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
|
mathspace/python-social-auth
|
refs/heads/master
|
social/apps/cherrypy_app/views.py
|
77
|
import cherrypy
from social.utils import setting_name, module_member
from social.actions import do_auth, do_complete, do_disconnect
from social.apps.cherrypy_app.utils import psa
class CherryPyPSAViews(object):
@cherrypy.expose
@psa('/complete/%(backend)s')
def login(self, backend):
return do_auth(self.backend)
@cherrypy.expose
@psa('/complete/%(backend)s')
def complete(self, backend, *args, **kwargs):
login = cherrypy.config.get(setting_name('LOGIN_METHOD'))
do_login = module_member(login) if login else self.do_login
user = getattr(cherrypy.request, 'user', None)
return do_complete(self.backend, do_login, user=user, *args, **kwargs)
@cherrypy.expose
@psa()
def disconnect(self, backend, association_id=None):
user = getattr(cherrypy.request, 'user', None)
return do_disconnect(self.backend, user, association_id)
def do_login(self, backend, user, social_user):
backend.strategy.session_set('user_id', user.id)
|
liyu1990/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/image_ops_test.py
|
5
|
"""Tests for tensorflow.ops.image_ops."""
import math
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import googletest
class FlipTest(test_util.TensorFlowTestCase):
def testIdempotentLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testIdempotentUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testIdempotentTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(image_ops.transpose_image(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
class RandomFlipTest(test_util.TensorFlowTestCase):
def testRandomLeftRight(self):
x_np = np.array([0, 1], dtype=np.uint8).reshape([1, 2, 1])
num_iterations = 500
hist = [0, 0]
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf)
for _ in xrange(num_iterations):
y_np = y.eval().flatten()[0]
hist[y_np] += 1
# Ensure that each entry is observed within 4 standard deviations.
four_stddev = 4.0 * np.sqrt(num_iterations / 2.0)
self.assertAllClose(hist, [num_iterations / 2.0] * 2, atol=four_stddev)
def testRandomUpDown(self):
x_np = np.array([0, 1], dtype=np.uint8).reshape([2, 1, 1])
num_iterations = 500
hist = [0, 0]
with self.test_session():
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf)
for _ in xrange(num_iterations):
y_np = y.eval().flatten()[0]
hist[y_np] += 1
# Ensure that each entry is observed within 4 standard deviations.
four_stddev = 4.0 * np.sqrt(num_iterations / 2.0)
self.assertAllClose(hist, [num_iterations / 2.0] * 2, atol=four_stddev)
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor, min_value, max_value):
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x,
contrast_factor,
min_value=min_value,
max_value=max_value)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 63, 169, 255, 29, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np,
y_np,
contrast_factor=2.0,
min_value=None,
max_value=None)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float).reshape(x_shape)
y_data = [0, 0, 0, 62.75, 169.25, 255, 28.75, 0, 255, 134.75, 255, 0]
y_np = np.array(y_data, dtype=np.float).reshape(x_shape)
self._testContrast(x_np,
y_np,
contrast_factor=2.0,
min_value=0,
max_value=255)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [23, 53, 66, 50, 118, 172, 41, 54, 176, 68, 178, 60]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np,
y_np,
contrast_factor=0.5,
min_value=None,
max_value=None)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 11, 0, 255, 117, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np,
y_np,
contrast_factor=2.0,
min_value=None,
max_value=None)
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta, min_value, max_value):
with self.test_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x,
delta,
min_value=min_value,
max_value=max_value)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10.0, min_value=None, max_value=None)
def testPositiveDeltaFloat(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10.0, min_value=None, max_value=None)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [5, 5, 5, 44, 125, 216, 27, 5, 224, 80, 245, 5]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10.0, min_value=5, max_value=None)
class RandomCropTest(test_util.TensorFlowTestCase):
def testNoOp(self):
# No random cropping is performed since the target width and height
# are match the image dimensions.
height = 4
width = 5
x_shape = [height, width, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=np.int32).reshape(x_shape)
target_shape_np = np.array([height, width], dtype=np.int64)
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
target_shape = constant_op.constant(target_shape_np, shape=[2])
y = image_ops.random_crop(x, target_shape)
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testRandomization(self):
# Run 1x1 crop num_samples times in an image and ensure that one finds each
# pixel 1/num_pixels of the time.
num_samples = 1000
height = 5
width = 4
num_pixels = height * width
data = np.arange(num_pixels).reshape([height, width, 1])
x_np = np.array(data).astype(np.int32)
target_shape_np = np.array([1, 1], dtype=np.int64)
y = []
with self.test_session():
x = constant_op.constant(x_np, shape=x_np.shape)
target_shape = constant_op.constant(target_shape_np, shape=[2])
y_tf = image_ops.random_crop(x, target_shape)
for _ in xrange(num_samples):
y_np = y_tf.eval()
self.assertAllEqual(y_np.shape, [1, 1, 1])
y.extend(y_np.flatten())
# Calculate the mean and 4 * standard deviation.
mean = [num_samples / num_pixels] * num_pixels
four_stddev = 4.0 * np.sqrt(mean)
# Ensure that each entry is observed in 1/num_pixels of the samples
# within 4 standard deviations.
counts = np.bincount(y)
self.assertAllClose(counts, mean, atol=four_stddev)
class PerImageWhiteningTest(test_util.TensorFlowTestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
x2 = np.square(x).astype(np.float32)
mn = np.mean(x)
vr = np.mean(x2) - (mn * mn)
stddev = max(math.sqrt(vr), 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
def testBasic(self):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=np.int32).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.per_image_whitening(x)
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, atol=1e-4)
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def testNoOp(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
target_height = x_shape[0]
target_width = x_shape[1]
y = image_ops.crop_to_bounding_box(x, 0, 0, target_height, target_width)
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testCropping(self):
x_np = np.arange(0, 30, dtype=np.int32).reshape([6, 5, 1])
offset_height = 1
after_height = 2
offset_width = 0
after_width = 3
target_height = x_np.shape[0] - offset_height - after_height
target_width = x_np.shape[1] - offset_width - after_width
y_np = x_np[offset_height:offset_height + target_height,
offset_width:offset_width + target_width, :]
with self.test_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.crop_to_bounding_box(x, offset_height, offset_width,
target_height, target_width)
y_tf = y.eval()
self.assertAllEqual(y_tf.flatten(), y_np.flatten())
class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
def testNoOp(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
target_height = x_shape[0]
target_width = x_shape[1]
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.pad_to_bounding_box(x, 0, 0, target_height, target_width)
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testPadding(self):
x_shape = [3, 4, 1]
x_np = np.ones(x_shape, dtype=np.float32)
offset_height = 2
after_height = 3
offset_width = 1
after_width = 4
target_height = x_shape[0] + offset_height + after_height
target_width = x_shape[1] + offset_width + after_width
# Note the padding are along batch, height, width and depth.
paddings = ((offset_height, after_height),
(offset_width, after_width),
(0, 0))
y_np = np.pad(x_np, paddings, 'constant')
with self.test_session():
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.pad_to_bounding_box(x, offset_height, offset_width,
target_height, target_width)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
class ResizeImagesTest(test_util.TensorFlowTestCase):
OPTIONS = [image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC,
image_ops.ResizeMethod.AREA]
def testNoOp(self):
img_shape = [1, 6, 4, 1]
data = [128, 128, 64, 64,
128, 128, 64, 64,
64, 64, 128, 128,
64, 64, 128, 128,
50, 50, 100, 100,
50, 50, 100, 100]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 6
target_width = 4
for opt in self.OPTIONS:
with self.test_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, target_height, target_width, opt)
resized = y.eval()
self.assertAllClose(resized, img_np, atol=1e-5)
def testResizeDown(self):
data = [128, 128, 64, 64,
128, 128, 64, 64,
64, 64, 128, 128,
64, 64, 128, 128,
50, 50, 100, 100,
50, 50, 100, 100]
expected_data = [128, 64,
64, 128,
50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, target_height, target_width, opt)
expected = np.array(expected_data).reshape(target_shape)
resized = y.eval()
self.assertAllClose(resized, expected, atol=1e-5)
def testResizeUp(self):
img_shape = [1, 3, 2, 1]
data = [128, 64,
64, 128,
50, 100]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
128.0, 96.0, 64.0, 64.0,
96.0, 96.0, 96.0, 96.0,
64.0, 96.0, 128.0, 128.0,
57.0, 85.5, 114.0, 114.0,
50.0, 75.0, 100.0, 100.0,
50.0, 75.0, 100.0, 100.0]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
128.0, 128.0, 64.0, 64.0,
128.0, 128.0, 64.0, 64.0,
64.0, 64.0, 128.0, 128.0,
64.0, 64.0, 128.0, 128.0,
50.0, 50.0, 100.0, 100.0,
50.0, 50.0, 100.0, 100.0]
expected_data[image_ops.ResizeMethod.AREA] = [
128.0, 128.0, 64.0, 64.0,
128.0, 128.0, 64.0, 64.0,
64.0, 64.0, 128.0, 128.0,
64.0, 64.0, 128.0, 128.0,
50.0, 50.0, 100.0, 100.0,
50.0, 50.0, 100.0, 100.0]
for opt in [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.AREA]:
with self.test_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, target_height, target_width, opt)
resized = y.eval()
expected = np.array(expected_data[opt]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [128, 128, 64, 64, 128, 128, 64, 64,
64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [128, 135, 96, 55, 64, 114, 134, 128,
78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84,
74, 70, 95, 122, 115, 69, 49, 55,
100, 105, 75, 43, 50, 89, 105, 100,
57, 54, 74, 96, 91, 65, 55, 58,
70, 69, 75, 81, 80, 72, 69, 70,
105, 112, 75, 36, 45, 92, 111, 105]
with self.test_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, target_height, target_width,
image_ops.ResizeMethod.BICUBIC)
resized = y.eval()
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [128, 64, 32, 16, 8, 4,
4, 8, 16, 32, 64, 128,
128, 64, 32, 16, 8, 4,
5, 10, 15, 20, 25, 30,
30, 25, 20, 15, 10, 5,
5, 10, 15, 20, 25, 30]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [73, 33, 23, 39,
73, 33, 23, 39,
14, 16, 19, 21,
14, 16, 19, 21]
with self.test_session():
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, target_height, target_width,
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = y.eval()
self.assertAllClose(resized, expected, atol=1)
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, original, original_shape,
expected, expected_shape):
x_np = np.array(original, dtype=np.uint8).reshape(original_shape)
y_np = np.array(expected).reshape(expected_shape)
target_height = expected_shape[0]
target_width = expected_shape[1]
with self.test_session():
image = constant_op.constant(x_np, shape=original_shape)
y = image_ops.resize_image_with_crop_or_pad(image,
target_height,
target_width)
resized = y.eval()
self.assertAllClose(resized, y_np, atol=1e-5)
def testBasic(self):
# Basic no-op.
original = [1, 2, 3, 4,
5, 6, 7, 8]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
original, [2, 4, 1])
def testPad(self):
# Pad even along col.
original = [1, 2, 3, 4, 5, 6, 7, 8]
expected = [0, 1, 2, 3, 4, 0,
0, 5, 6, 7, 8, 0]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
expected, [2, 6, 1])
# Pad odd along col.
original = [1, 2, 3, 4,
5, 6, 7, 8]
expected = [0, 1, 2, 3, 4, 0, 0,
0, 5, 6, 7, 8, 0, 0]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
expected, [2, 7, 1])
# Pad even along row.
original = [1, 2, 3, 4,
5, 6, 7, 8]
expected = [0, 0, 0, 0,
1, 2, 3, 4,
5, 6, 7, 8,
0, 0, 0, 0]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
expected, [4, 4, 1])
# Pad odd along row.
original = [1, 2, 3, 4,
5, 6, 7, 8]
expected = [0, 0, 0, 0,
1, 2, 3, 4,
5, 6, 7, 8,
0, 0, 0, 0,
0, 0, 0, 0]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
expected, [5, 4, 1])
def testCrop(self):
# Crop even along col.
original = [1, 2, 3, 4,
5, 6, 7, 8]
expected = [2, 3,
6, 7]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
expected, [2, 2, 1])
# Crop odd along col.
original = [1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12]
expected = [2, 3, 4,
8, 9, 10]
self._ResizeImageWithCropOrPad(original, [2, 6, 1],
expected, [2, 3, 1])
# Crop even along row.
original = [1, 2,
3, 4,
5, 6,
7, 8]
expected = [3, 4,
5, 6]
self._ResizeImageWithCropOrPad(original, [4, 2, 1],
expected, [2, 2, 1])
# Crop odd along row.
original = [1, 2,
3, 4,
5, 6,
7, 8,
9, 10,
11, 12,
13, 14,
15, 16]
expected = [3, 4,
5, 6,
7, 8,
9, 10,
11, 12]
self._ResizeImageWithCropOrPad(original, [8, 2, 1],
expected, [5, 2, 1])
def testCropAndPad(self):
# Pad along row but crop along col.
original = [1, 2, 3, 4,
5, 6, 7, 8]
expected = [0, 0,
2, 3,
6, 7,
0, 0]
self._ResizeImageWithCropOrPad(original, [2, 4, 1],
expected, [4, 2, 1])
# Crop along row but pad along col.
original = [1, 2,
3, 4,
5, 6,
7, 8]
expected = [0, 3, 4, 0,
0, 5, 6, 0]
self._ResizeImageWithCropOrPad(original, [4, 2, 1],
expected, [2, 4, 1])
def _SimpleColorRamp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / float(np.prod(image0.shape))
def testExisting(self):
# Read a real jpeg and verify shape
path = ('tensorflow/core/lib/jpeg/testdata/'
'jpeg_merge_test1.jpg')
with self.test_session() as sess:
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 0.8)
def testSynthetic(self):
with self.test_session() as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0)
image2 = image_ops.decode_jpeg(image_ops.encode_jpeg(image1))
jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testShape(self):
with self.test_session() as sess:
jpeg = constant_op.constant('nonsense')
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = 'tensorflow/core/lib/png/testdata/'
inputs = (1, 'lena_gray.png'), (4, 'lena_rgba.png')
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.test_session() as sess:
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = sess.run([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, image1.eval())
def testSynthetic(self):
with self.test_session() as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = sess.run([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testShape(self):
with self.test_session() as sess:
png = constant_op.constant('nonsense')
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
if __name__ == '__main__':
googletest.main()
|
luiseduardohdbackup/buck
|
refs/heads/master
|
third-party/py/twitter-commons/src/python/twitter/common/python/http/http.py
|
23
|
import contextlib
import hashlib
import os
import socket
import struct
import time
from ..common import safe_delete, safe_mkdir, safe_mkdtemp
from ..compatibility import PY2, PY3
from .tracer import TRACER
if PY3:
from http.client import parse_headers, HTTPException
from queue import Queue, Empty
import urllib.error as urllib_error
import urllib.parse as urlparse
import urllib.request as urllib_request
from urllib.request import addinfourl
else:
from httplib import HTTPMessage, HTTPException
from Queue import Queue, Empty
from urllib import addinfourl
import urllib2 as urllib_request
import urllib2 as urllib_error
import urlparse
class Timeout(Exception):
pass
class FetchError(Exception):
"""
Error occurred while fetching via HTTP
We raise this when we catch urllib or httplib errors because we don't want
to leak those implementation details to callers.
"""
def deadline(fn, *args, **kw):
"""Helper function to prevent fn(*args, **kw) from running more than
a specified timeout.
Takes timeout= kwarg in seconds, which defaults to 150ms (0.150)
"""
DEFAULT_TIMEOUT_SECS = 0.150
from threading import Thread
q = Queue(maxsize=1)
timeout = kw.pop('timeout', DEFAULT_TIMEOUT_SECS)
class AnonymousThread(Thread):
def run(self):
q.put(fn(*args, **kw))
AnonymousThread().start()
try:
return q.get(timeout=timeout)
except Empty:
raise Timeout
class Web(object):
NS_TIMEOUT_SECS = 5.0
CONN_TIMEOUT = 1.0
SCHEME_TO_PORT = {
'ftp': 21,
'http': 80,
'https': 443
}
def _resolves(self, fullurl):
try:
return socket.gethostbyname(fullurl.hostname)
except socket.gaierror:
return ''
def _reachable(self, fullurl, conn_timeout=None):
port = fullurl.port if fullurl.port else self.SCHEME_TO_PORT.get(fullurl.scheme, 80)
try:
conn = socket.create_connection(
(fullurl.hostname, port), timeout=(conn_timeout or self.CONN_TIMEOUT))
conn.close()
return True
except (socket.error, socket.timeout):
TRACER.log('Failed to connect to %s within deadline' % urlparse.urlunparse(fullurl))
return False
def reachable(self, url, conn_timeout=None):
"""Do we think this URL is reachable?
If this isn't here, it takes 5-30s to timeout on DNS resolution for
certain hosts, so we prefetch DNS at a cost of 5-8ms but cap
resolution at something sane, e.g. 5s.
"""
fullurl = urlparse.urlparse(url)
if not fullurl.scheme or not fullurl.netloc:
return True
try:
with TRACER.timed('Resolving %s' % fullurl.hostname, V=2):
if not deadline(self._resolves, fullurl, timeout=self.NS_TIMEOUT_SECS):
TRACER.log('Failed to resolve %s' % url)
return False
except Timeout:
TRACER.log('Timed out resolving %s' % fullurl.hostname)
return False
with TRACER.timed('Connecting to %s' % fullurl.hostname, V=2):
return self._reachable(fullurl, conn_timeout=conn_timeout)
def maybe_local_url(self, url):
full_url = urlparse.urlparse(url)
if full_url.scheme == '':
return 'file://' + os.path.realpath(url)
return url
def open(self, url, conn_timeout=None, **kw):
"""
Wrapper in front of urlopen that more gracefully handles odd network environments.
"""
url = self.maybe_local_url(url)
with TRACER.timed('Fetching %s' % url, V=1):
if not self.reachable(url, conn_timeout=conn_timeout):
raise FetchError('Could not reach %s within deadline.' % url)
try:
return urllib_request.urlopen(url, **kw)
except (urllib_error.URLError, HTTPException) as exc:
raise FetchError(exc)
class CachedWeb(object):
"""
A basic http cache.
Can act as a failsoft cache: If an object has expired but the fetch fails,
will fall back to the cached object if failsoft set to True.
"""
def __init__(self, cache=None, failsoft=True, clock=time, opener=None):
self._failsoft = failsoft
self._cache = cache or safe_mkdtemp()
safe_mkdir(self._cache)
self._clock = clock
self._opener = opener or Web()
super(CachedWeb, self).__init__()
def __contains__(self, url):
age = self.age(url)
return age is not None and age > 0
def translate_url(self, url):
return os.path.join(self._cache, hashlib.md5(url.encode('utf8')).hexdigest())
def translate_all(self, url):
return ('%(tgt)s %(tgt)s.tmp %(tgt)s.headers %(tgt)s.headers.tmp' % {
'tgt': self.translate_url(url)
}).split()
def age(self, url):
"""Return the age of an object in seconds, or None if object is not in cache."""
cached_object = self.translate_url(url)
if not os.path.exists(cached_object):
return None
return self._clock.time() - os.path.getmtime(cached_object)
def expired(self, url, ttl=None):
age = self.age(url)
if age is None:
return True
if ttl is None:
return False
return age > ttl
def really_open(self, url, conn_timeout=None):
try:
return self._opener.open(url, conn_timeout=conn_timeout)
except urllib_error.HTTPError as fp:
# HTTPError is a valid addinfourl -- use this instead of raising
return fp
def encode_url(self, url, conn_timeout=None):
target, target_tmp, headers, headers_tmp = self.translate_all(url)
with contextlib.closing(self.really_open(url, conn_timeout=conn_timeout)) as http_fp:
# File urls won't have a response code, they'll either open or raise.
if http_fp.getcode() and http_fp.getcode() != 200:
raise urllib_error.URLError('Non-200 response code from %s' % url)
with TRACER.timed('Caching %s' % url, V=2):
with open(target_tmp, 'wb') as disk_fp:
disk_fp.write(http_fp.read())
with open(headers_tmp, 'wb') as headers_fp:
headers_fp.write(struct.pack('>h', http_fp.code or 0))
headers_fp.write(str(http_fp.headers).encode('utf8'))
os.rename(target_tmp, target)
os.rename(headers_tmp, headers)
def decode_url(self, url):
target, _, headers, _ = self.translate_all(url)
headers_fp = open(headers, 'rb')
code, = struct.unpack('>h', headers_fp.read(2))
def make_headers(fp):
return HTTPMessage(fp) if PY2 else parse_headers(fp)
return addinfourl(open(target, 'rb'), make_headers(headers_fp), url, code)
def clear_url(self, url):
for path in self.translate_all(url):
safe_delete(path)
def cache(self, url, conn_timeout=None):
"""cache the contents of a url."""
try:
self.encode_url(url, conn_timeout=conn_timeout)
except urllib_error.URLError:
self.clear_url(url)
raise
def open(self, url, ttl=None, conn_timeout=None):
"""Return a file-like object with the content of the url."""
expired = self.expired(url, ttl=ttl)
with TRACER.timed('Opening %s' % ('(cached)' if not expired else '(uncached)'), V=1):
if expired:
try:
self.cache(url, conn_timeout=conn_timeout)
except (urllib_error.URLError, HTTPException) as exc:
if not self._failsoft or url not in self:
raise FetchError(exc)
return self.decode_url(url)
|
bawerd/mongrel2
|
refs/heads/master
|
examples/tornado/authdemo.py
|
101
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tornado.auth
import tornado.escape
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/auth/login", AuthHandler),
]
settings = dict(
cookie_secret="32oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
login_url="/auth/login",
)
tornado.web.Application.__init__(self, handlers, **settings)
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
user_json = self.get_secure_cookie("user")
if not user_json: return None
return tornado.escape.json_decode(user_json)
class MainHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
name = tornado.escape.xhtml_escape(self.current_user["name"])
self.write("Hello, " + name)
class AuthHandler(BaseHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
self.set_secure_cookie("user", tornado.escape.json_encode(user))
self.redirect("/")
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
amandolo/ansible-modules-core
|
refs/heads/devel
|
cloud/openstack/_quantum_network.py
|
127
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_network
version_added: "1.4"
deprecated: Deprecated in 2.0. Use os_network instead
short_description: Creates/Removes networks from OpenStack
description:
- Add or Remove network from OpenStack.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
tenant_name:
description:
- The name of the tenant for whom the network is created
required: false
default: None
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be assigned to the nework
required: true
default: None
provider_network_type:
description:
- The type of the network to be created, gre, vlan, local. Available types depend on the plugin. The Quantum service decides if not specified.
required: false
default: None
provider_physical_network:
description:
- The physical network which would realize the virtual network for flat and vlan networks.
required: false
default: None
provider_segmentation_id:
description:
- The id that has to be assigned to the network, in case of vlan networks that would be vlan id and for gre the tunnel id
required: false
default: None
router_external:
description:
- If 'yes', specifies that the virtual network is a external network (public).
required: false
default: false
shared:
description:
- Whether this network is shared or not
required: false
default: false
admin_state_up:
description:
- Whether the state should be marked as up or down
required: false
default: true
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
# Create a GRE backed Quantum network with tunnel id 1 for tenant1
- quantum_network: name=t1network tenant_name=tenant1 state=present
provider_network_type=gre provider_segmentation_id=1
login_username=admin login_password=admin login_tenant_name=admin
# Create an external network
- quantum_network: name=external_network state=present
provider_network_type=local router_external=yes
login_username=admin login_password=admin login_tenant_name=admin
'''
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s " %e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = " Error in connecting to neutron: %s " %e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
tenant_name = module.params['login_tenant_name']
else:
tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_net_id(neutron, module):
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['name'],
}
try:
networks = neutron.list_networks(**kwargs)
except Exception, e:
module.fail_json(msg = "Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _create_network(module, neutron):
neutron.format = 'json'
network = {
'name': module.params.get('name'),
'tenant_id': _os_tenant_id,
'provider:network_type': module.params.get('provider_network_type'),
'provider:physical_network': module.params.get('provider_physical_network'),
'provider:segmentation_id': module.params.get('provider_segmentation_id'),
'router:external': module.params.get('router_external'),
'shared': module.params.get('shared'),
'admin_state_up': module.params.get('admin_state_up'),
}
if module.params['provider_network_type'] == 'local':
network.pop('provider:physical_network', None)
network.pop('provider:segmentation_id', None)
if module.params['provider_network_type'] == 'flat':
network.pop('provider:segmentation_id', None)
if module.params['provider_network_type'] == 'gre':
network.pop('provider:physical_network', None)
if module.params['provider_network_type'] is None:
network.pop('provider:network_type', None)
network.pop('provider:physical_network', None)
network.pop('provider:segmentation_id', None)
try:
net = neutron.create_network({'network':network})
except Exception, e:
module.fail_json(msg = "Error in creating network: %s" % e.message)
return net['network']['id']
def _delete_network(module, net_id, neutron):
try:
id = neutron.delete_network(net_id)
except Exception, e:
module.fail_json(msg = "Error in deleting the network: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
tenant_name = dict(default=None),
provider_network_type = dict(default=None, choices=['local', 'vlan', 'flat', 'gre']),
provider_physical_network = dict(default=None),
provider_segmentation_id = dict(default=None),
router_external = dict(default=False, type='bool'),
shared = dict(default=False, type='bool'),
admin_state_up = dict(default=True, type='bool'),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
if module.params['provider_network_type'] in ['vlan' , 'flat']:
if not module.params['provider_physical_network']:
module.fail_json(msg = " for vlan and flat networks, variable provider_physical_network should be set.")
if module.params['provider_network_type'] in ['vlan', 'gre']:
if not module.params['provider_segmentation_id']:
module.fail_json(msg = " for vlan & gre networks, variable provider_segmentation_id should be set.")
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
network_id = _get_net_id(neutron, module)
if not network_id:
network_id = _create_network(module, neutron)
module.exit_json(changed = True, result = "Created", id = network_id)
else:
module.exit_json(changed = False, result = "Success", id = network_id)
if module.params['state'] == 'absent':
network_id = _get_net_id(neutron, module)
if not network_id:
module.exit_json(changed = False, result = "Success")
else:
_delete_network(module, network_id, neutron)
module.exit_json(changed = True, result = "Deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
Dhivyap/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/netscaler/netscaler_lb_monitor.py
|
114
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netscaler_lb_monitor
short_description: Manage load balancing monitors
description:
- Manage load balancing monitors.
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance.
version_added: "2.4"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
monitorname:
description:
- >-
Name for the monitor. Must begin with an ASCII alphanumeric or underscore C(_) character, and must
contain only ASCII alphanumeric, underscore, hash C(#), period C(.), space C( ), colon C(:), at C(@), equals
C(=), and hyphen C(-) characters.
- "Minimum length = 1"
type:
choices:
- 'PING'
- 'TCP'
- 'HTTP'
- 'TCP-ECV'
- 'HTTP-ECV'
- 'UDP-ECV'
- 'DNS'
- 'FTP'
- 'LDNS-PING'
- 'LDNS-TCP'
- 'LDNS-DNS'
- 'RADIUS'
- 'USER'
- 'HTTP-INLINE'
- 'SIP-UDP'
- 'SIP-TCP'
- 'LOAD'
- 'FTP-EXTENDED'
- 'SMTP'
- 'SNMP'
- 'NNTP'
- 'MYSQL'
- 'MYSQL-ECV'
- 'MSSQL-ECV'
- 'ORACLE-ECV'
- 'LDAP'
- 'POP3'
- 'CITRIX-XML-SERVICE'
- 'CITRIX-WEB-INTERFACE'
- 'DNS-TCP'
- 'RTSP'
- 'ARP'
- 'CITRIX-AG'
- 'CITRIX-AAC-LOGINPAGE'
- 'CITRIX-AAC-LAS'
- 'CITRIX-XD-DDC'
- 'ND6'
- 'CITRIX-WI-EXTENDED'
- 'DIAMETER'
- 'RADIUS_ACCOUNTING'
- 'STOREFRONT'
- 'APPC'
- 'SMPP'
- 'CITRIX-XNC-ECV'
- 'CITRIX-XDM'
- 'CITRIX-STA-SERVICE'
- 'CITRIX-STA-SERVICE-NHOP'
description:
- "Type of monitor that you want to create."
action:
choices:
- 'NONE'
- 'LOG'
- 'DOWN'
description:
- >-
Action to perform when the response to an inline monitor (a monitor of type C(HTTP-INLINE)) indicates
that the service is down. A service monitored by an inline monitor is considered C(DOWN) if the response
code is not one of the codes that have been specified for the Response Code parameter.
- "Available settings function as follows:"
- >-
* C(NONE) - Do not take any action. However, the show service command and the show lb monitor command
indicate the total number of responses that were checked and the number of consecutive error
responses received after the last successful probe.
- "* C(LOG) - Log the event in NSLOG or SYSLOG."
- >-
* C(DOWN) - Mark the service as being down, and then do not direct any traffic to the service until the
configured down time has expired. Persistent connections to the service are terminated as soon as the
service is marked as C(DOWN). Also, log the event in NSLOG or SYSLOG.
respcode:
description:
- >-
Response codes for which to mark the service as UP. For any other response code, the action performed
depends on the monitor type. C(HTTP) monitors and C(RADIUS) monitors mark the service as C(DOWN), while
C(HTTP-INLINE) monitors perform the action indicated by the Action parameter.
httprequest:
description:
- "HTTP request to send to the server (for example, C(\\"HEAD /file.html\\"))."
rtsprequest:
description:
- "RTSP request to send to the server (for example, C(\\"OPTIONS *\\"))."
customheaders:
description:
- "Custom header string to include in the monitoring probes."
maxforwards:
description:
- >-
Maximum number of hops that the SIP request used for monitoring can traverse to reach the server.
Applicable only to monitors of type C(SIP-UDP).
- "Minimum value = C(0)"
- "Maximum value = C(255)"
sipmethod:
choices:
- 'OPTIONS'
- 'INVITE'
- 'REGISTER'
description:
- "SIP method to use for the query. Applicable only to monitors of type C(SIP-UDP)."
sipuri:
description:
- >-
SIP URI string to send to the service (for example, C(sip:sip.test)). Applicable only to monitors of
type C(SIP-UDP).
- "Minimum length = 1"
sipreguri:
description:
- >-
SIP user to be registered. Applicable only if the monitor is of type C(SIP-UDP) and the SIP Method
parameter is set to C(REGISTER).
- "Minimum length = 1"
send:
description:
- "String to send to the service. Applicable to C(TCP-ECV), C(HTTP-ECV), and C(UDP-ECV) monitors."
recv:
description:
- >-
String expected from the server for the service to be marked as UP. Applicable to C(TCP-ECV), C(HTTP-ECV),
and C(UDP-ECV) monitors.
query:
description:
- "Domain name to resolve as part of monitoring the DNS service (for example, C(example.com))."
querytype:
choices:
- 'Address'
- 'Zone'
- 'AAAA'
description:
- >-
Type of DNS record for which to send monitoring queries. Set to C(Address) for querying A records, C(AAAA)
for querying AAAA records, and C(Zone) for querying the SOA record.
scriptname:
description:
- >-
Path and name of the script to execute. The script must be available on the NetScaler appliance, in
the /nsconfig/monitors/ directory.
- "Minimum length = 1"
scriptargs:
description:
- "String of arguments for the script. The string is copied verbatim into the request."
dispatcherip:
description:
- "IP address of the dispatcher to which to send the probe."
dispatcherport:
description:
- "Port number on which the dispatcher listens for the monitoring probe."
username:
description:
- >-
User name with which to probe the C(RADIUS), C(NNTP), C(FTP), C(FTP-EXTENDED), C(MYSQL), C(MSSQL), C(POP3), C(CITRIX-AG),
C(CITRIX-XD-DDC), C(CITRIX-WI-EXTENDED), C(CITRIX-XNC) or C(CITRIX-XDM) server.
- "Minimum length = 1"
password:
description:
- >-
Password that is required for logging on to the C(RADIUS), C(NNTP), C(FTP), C(FTP-EXTENDED), C(MYSQL), C(MSSQL), C(POP3),
C(CITRIX-AG), C(CITRIX-XD-DDC), C(CITRIX-WI-EXTENDED), C(CITRIX-XNC-ECV) or C(CITRIX-XDM) server. Used in
conjunction with the user name specified for the C(username) parameter.
- "Minimum length = 1"
secondarypassword:
description:
- >-
Secondary password that users might have to provide to log on to the Access Gateway server.
Applicable to C(CITRIX-AG) monitors.
logonpointname:
description:
- >-
Name of the logon point that is configured for the Citrix Access Gateway Advanced Access Control
software. Required if you want to monitor the associated login page or Logon Agent. Applicable to
C(CITRIX-AAC-LAS) and C(CITRIX-AAC-LOGINPAGE) monitors.
lasversion:
description:
- >-
Version number of the Citrix Advanced Access Control Logon Agent. Required by the C(CITRIX-AAC-LAS)
monitor.
radkey:
description:
- >-
Authentication key (shared secret text string) for RADIUS clients and servers to exchange. Applicable
to monitors of type C(RADIUS) and C(RADIUS_ACCOUNTING).
- "Minimum length = 1"
radnasid:
description:
- "NAS-Identifier to send in the Access-Request packet. Applicable to monitors of type C(RADIUS)."
- "Minimum length = 1"
radnasip:
description:
- >-
Network Access Server (NAS) IP address to use as the source IP address when monitoring a RADIUS
server. Applicable to monitors of type C(RADIUS) and C(RADIUS_ACCOUNTING).
radaccounttype:
description:
- "Account Type to be used in Account Request Packet. Applicable to monitors of type C(RADIUS_ACCOUNTING)."
- "Minimum value = 0"
- "Maximum value = 15"
radframedip:
description:
- "Source ip with which the packet will go out . Applicable to monitors of type C(RADIUS_ACCOUNTING)."
radapn:
description:
- >-
Called Station Id to be used in Account Request Packet. Applicable to monitors of type
C(RADIUS_ACCOUNTING).
- "Minimum length = 1"
radmsisdn:
description:
- >-
Calling Stations Id to be used in Account Request Packet. Applicable to monitors of type
C(RADIUS_ACCOUNTING).
- "Minimum length = 1"
radaccountsession:
description:
- >-
Account Session ID to be used in Account Request Packet. Applicable to monitors of type
C(RADIUS_ACCOUNTING).
- "Minimum length = 1"
lrtm:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Calculate the least response times for bound services. If this parameter is not enabled, the
appliance does not learn the response times of the bound services. Also used for LRTM load balancing.
deviation:
description:
- >-
Time value added to the learned average response time in dynamic response time monitoring (DRTM).
When a deviation is specified, the appliance learns the average response time of bound services and
adds the deviation to the average. The final value is then continually adjusted to accommodate
response time variations over time. Specified in milliseconds, seconds, or minutes.
- "Minimum value = C(0)"
- "Maximum value = C(20939)"
units1:
choices:
- 'SEC'
- 'MSEC'
- 'MIN'
description:
- "Unit of measurement for the Deviation parameter. Cannot be changed after the monitor is created."
interval:
description:
- "Time interval between two successive probes. Must be greater than the value of Response Time-out."
- "Minimum value = C(1)"
- "Maximum value = C(20940)"
units3:
choices:
- 'SEC'
- 'MSEC'
- 'MIN'
description:
- "monitor interval units."
resptimeout:
description:
- >-
Amount of time for which the appliance must wait before it marks a probe as FAILED. Must be less than
the value specified for the Interval parameter.
- >-
Note: For C(UDP-ECV) monitors for which a receive string is not configured, response timeout does not
apply. For C(UDP-ECV) monitors with no receive string, probe failure is indicated by an ICMP port
unreachable error received from the service.
- "Minimum value = C(1)"
- "Maximum value = C(20939)"
units4:
choices:
- 'SEC'
- 'MSEC'
- 'MIN'
description:
- "monitor response timeout units."
resptimeoutthresh:
description:
- >-
Response time threshold, specified as a percentage of the Response Time-out parameter. If the
response to a monitor probe has not arrived when the threshold is reached, the appliance generates an
SNMP trap called monRespTimeoutAboveThresh. After the response time returns to a value below the
threshold, the appliance generates a monRespTimeoutBelowThresh SNMP trap. For the traps to be
generated, the "MONITOR-RTO-THRESHOLD" alarm must also be enabled.
- "Minimum value = C(0)"
- "Maximum value = C(100)"
retries:
description:
- >-
Maximum number of probes to send to establish the state of a service for which a monitoring probe
failed.
- "Minimum value = C(1)"
- "Maximum value = C(127)"
failureretries:
description:
- >-
Number of retries that must fail, out of the number specified for the Retries parameter, for a
service to be marked as DOWN. For example, if the Retries parameter is set to 10 and the Failure
Retries parameter is set to 6, out of the ten probes sent, at least six probes must fail if the
service is to be marked as DOWN. The default value of 0 means that all the retries must fail if the
service is to be marked as DOWN.
- "Minimum value = C(0)"
- "Maximum value = C(32)"
alertretries:
description:
- >-
Number of consecutive probe failures after which the appliance generates an SNMP trap called
monProbeFailed.
- "Minimum value = C(0)"
- "Maximum value = C(32)"
successretries:
description:
- "Number of consecutive successful probes required to transition a service's state from DOWN to UP."
- "Minimum value = C(1)"
- "Maximum value = C(32)"
downtime:
description:
- >-
Time duration for which to wait before probing a service that has been marked as DOWN. Expressed in
milliseconds, seconds, or minutes.
- "Minimum value = C(1)"
- "Maximum value = C(20939)"
units2:
choices:
- 'SEC'
- 'MSEC'
- 'MIN'
description:
- "Unit of measurement for the Down Time parameter. Cannot be changed after the monitor is created."
destip:
description:
- >-
IP address of the service to which to send probes. If the parameter is set to 0, the IP address of
the server to which the monitor is bound is considered the destination IP address.
destport:
description:
- >-
TCP or UDP port to which to send the probe. If the parameter is set to 0, the port number of the
service to which the monitor is bound is considered the destination port. For a monitor of type C(USER),
however, the destination port is the port number that is included in the HTTP request sent to the
dispatcher. Does not apply to monitors of type C(PING).
state:
choices:
- 'enabled'
- 'disabled'
description:
- >-
State of the monitor. The C(disabled) setting disables not only the monitor being configured, but all
monitors of the same type, until the parameter is set to C(enabled). If the monitor is bound to a
service, the state of the monitor is not taken into account when the state of the service is
determined.
reverse:
description:
- >-
Mark a service as DOWN, instead of UP, when probe criteria are satisfied, and as UP instead of DOWN
when probe criteria are not satisfied.
type: bool
transparent:
description:
- >-
The monitor is bound to a transparent device such as a firewall or router. The state of a transparent
device depends on the responsiveness of the services behind it. If a transparent device is being
monitored, a destination IP address must be specified. The probe is sent to the specified IP address
by using the MAC address of the transparent device.
type: bool
iptunnel:
description:
- >-
Send the monitoring probe to the service through an IP tunnel. A destination IP address must be
specified.
type: bool
tos:
description:
- "Probe the service by encoding the destination IP address in the IP TOS (6) bits."
type: bool
tosid:
description:
- "The TOS ID of the specified destination IP. Applicable only when the TOS parameter is set."
- "Minimum value = C(1)"
- "Maximum value = C(63)"
secure:
description:
- >-
Use a secure SSL connection when monitoring a service. Applicable only to TCP based monitors. The
secure option cannot be used with a C(CITRIX-AG) monitor, because a CITRIX-AG monitor uses a secure
connection by default.
type: bool
validatecred:
description:
- >-
Validate the credentials of the Xen Desktop DDC server user. Applicable to monitors of type
C(CITRIX-XD-DDC).
type: bool
domain:
description:
- >-
Domain in which the XenDesktop Desktop Delivery Controller (DDC) servers or Web Interface servers are
present. Required by C(CITRIX-XD-DDC) and C(CITRIX-WI-EXTENDED) monitors for logging on to the DDC servers
and Web Interface servers, respectively.
ipaddress:
description:
- >-
Set of IP addresses expected in the monitoring response from the DNS server, if the record type is A
or AAAA. Applicable to C(DNS) monitors.
- "Minimum length = 1"
group:
description:
- >-
Name of a newsgroup available on the NNTP service that is to be monitored. The appliance periodically
generates an NNTP query for the name of the newsgroup and evaluates the response. If the newsgroup is
found on the server, the service is marked as UP. If the newsgroup does not exist or if the search
fails, the service is marked as DOWN. Applicable to NNTP monitors.
- "Minimum length = 1"
filename:
description:
- >-
Name of a file on the FTP server. The appliance monitors the FTP service by periodically checking the
existence of the file on the server. Applicable to C(FTP-EXTENDED) monitors.
- "Minimum length = 1"
basedn:
description:
- >-
The base distinguished name of the LDAP service, from where the LDAP server can begin the search for
the attributes in the monitoring query. Required for C(LDAP) service monitoring.
- "Minimum length = 1"
binddn:
description:
- >-
The distinguished name with which an LDAP monitor can perform the Bind operation on the LDAP server.
Optional. Applicable to C(LDAP) monitors.
- "Minimum length = 1"
filter:
description:
- "Filter criteria for the LDAP query. Optional."
- "Minimum length = 1"
attribute:
description:
- >-
Attribute to evaluate when the LDAP server responds to the query. Success or failure of the
monitoring probe depends on whether the attribute exists in the response. Optional.
- "Minimum length = 1"
database:
description:
- "Name of the database to connect to during authentication."
- "Minimum length = 1"
oraclesid:
description:
- "Name of the service identifier that is used to connect to the Oracle database during authentication."
- "Minimum length = 1"
sqlquery:
description:
- >-
SQL query for a C(MYSQL-ECV) or C(MSSQL-ECV) monitor. Sent to the database server after the server
authenticates the connection.
- "Minimum length = 1"
evalrule:
description:
- >-
Default syntax expression that evaluates the database server's response to a MYSQL-ECV or MSSQL-ECV
monitoring query. Must produce a Boolean result. The result determines the state of the server. If
the expression returns TRUE, the probe succeeds.
- >-
For example, if you want the appliance to evaluate the error message to determine the state of the
server, use the rule C(MYSQL.RES.ROW(10) .TEXT_ELEM(2).EQ("MySQL")).
mssqlprotocolversion:
choices:
- '70'
- '2000'
- '2000SP1'
- '2005'
- '2008'
- '2008R2'
- '2012'
- '2014'
description:
- "Version of MSSQL server that is to be monitored."
Snmpoid:
description:
- "SNMP OID for C(SNMP) monitors."
- "Minimum length = 1"
snmpcommunity:
description:
- "Community name for C(SNMP) monitors."
- "Minimum length = 1"
snmpthreshold:
description:
- "Threshold for C(SNMP) monitors."
- "Minimum length = 1"
snmpversion:
choices:
- 'V1'
- 'V2'
description:
- "SNMP version to be used for C(SNMP) monitors."
metrictable:
description:
- "Metric table to which to bind metrics."
- "Minimum length = 1"
- "Maximum length = 99"
application:
description:
- >-
Name of the application used to determine the state of the service. Applicable to monitors of type
C(CITRIX-XML-SERVICE).
- "Minimum length = 1"
sitepath:
description:
- >-
URL of the logon page. For monitors of type C(CITRIX-WEB-INTERFACE), to monitor a dynamic page under the
site path, terminate the site path with a slash C(/). Applicable to C(CITRIX-WEB-INTERFACE),
C(CITRIX-WI-EXTENDED) and C(CITRIX-XDM) monitors.
- "Minimum length = 1"
storename:
description:
- >-
Store Name. For monitors of type C(STOREFRONT), C(storename) is an optional argument defining storefront
service store name. Applicable to C(STOREFRONT) monitors.
- "Minimum length = 1"
storefrontacctservice:
description:
- >-
Enable/Disable probing for Account Service. Applicable only to Store Front monitors. For
multi-tenancy configuration users my skip account service.
type: bool
hostname:
description:
- "Hostname in the FQDN format (Example: C(porche.cars.org)). Applicable to C(STOREFRONT) monitors."
- "Minimum length = 1"
netprofile:
description:
- "Name of the network profile."
- "Minimum length = 1"
- "Maximum length = 127"
originhost:
description:
- >-
Origin-Host value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
- "Minimum length = 1"
originrealm:
description:
- >-
Origin-Realm value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
- "Minimum length = 1"
hostipaddress:
description:
- >-
Host-IP-Address value for the Capabilities-Exchange-Request (CER) message to use for monitoring
Diameter servers. If Host-IP-Address is not specified, the appliance inserts the mapped IP (MIP)
address or subnet IP (SNIP) address from which the CER request (the monitoring probe) is sent.
- "Minimum length = 1"
vendorid:
description:
- >-
Vendor-Id value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
productname:
description:
- >-
Product-Name value for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
- "Minimum length = 1"
firmwarerevision:
description:
- >-
Firmware-Revision value for the Capabilities-Exchange-Request (CER) message to use for monitoring
Diameter servers.
authapplicationid:
description:
- >-
List of Auth-Application-Id attribute value pairs (AVPs) for the Capabilities-Exchange-Request (CER)
message to use for monitoring Diameter servers. A maximum of eight of these AVPs are supported in a
monitoring CER message.
- "Minimum value = C(0)"
- "Maximum value = C(4294967295)"
acctapplicationid:
description:
- >-
List of Acct-Application-Id attribute value pairs (AVPs) for the Capabilities-Exchange-Request (CER)
message to use for monitoring Diameter servers. A maximum of eight of these AVPs are supported in a
monitoring message.
- "Minimum value = C(0)"
- "Maximum value = C(4294967295)"
inbandsecurityid:
choices:
- 'NO_INBAND_SECURITY'
- 'TLS'
description:
- >-
Inband-Security-Id for the Capabilities-Exchange-Request (CER) message to use for monitoring Diameter
servers.
supportedvendorids:
description:
- >-
List of Supported-Vendor-Id attribute value pairs (AVPs) for the Capabilities-Exchange-Request (CER)
message to use for monitoring Diameter servers. A maximum eight of these AVPs are supported in a
monitoring message.
- "Minimum value = C(1)"
- "Maximum value = C(4294967295)"
vendorspecificvendorid:
description:
- >-
Vendor-Id to use in the Vendor-Specific-Application-Id grouped attribute-value pair (AVP) in the
monitoring CER message. To specify Auth-Application-Id or Acct-Application-Id in
Vendor-Specific-Application-Id, use vendorSpecificAuthApplicationIds or
vendorSpecificAcctApplicationIds, respectively. Only one Vendor-Id is supported for all the
Vendor-Specific-Application-Id AVPs in a CER monitoring message.
- "Minimum value = 1"
vendorspecificauthapplicationids:
description:
- >-
List of Vendor-Specific-Auth-Application-Id attribute value pairs (AVPs) for the
Capabilities-Exchange-Request (CER) message to use for monitoring Diameter servers. A maximum of
eight of these AVPs are supported in a monitoring message. The specified value is combined with the
value of vendorSpecificVendorId to obtain the Vendor-Specific-Application-Id AVP in the CER
monitoring message.
- "Minimum value = C(0)"
- "Maximum value = C(4294967295)"
vendorspecificacctapplicationids:
description:
- >-
List of Vendor-Specific-Acct-Application-Id attribute value pairs (AVPs) to use for monitoring
Diameter servers. A maximum of eight of these AVPs are supported in a monitoring message. The
specified value is combined with the value of vendorSpecificVendorId to obtain the
Vendor-Specific-Application-Id AVP in the CER monitoring message.
- "Minimum value = C(0)"
- "Maximum value = C(4294967295)"
kcdaccount:
description:
- "KCD Account used by C(MSSQL) monitor."
- "Minimum length = 1"
- "Maximum length = 32"
storedb:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Store the database list populated with the responses to monitor probes. Used in database specific
load balancing if C(MSSQL-ECV)/C(MYSQL-ECV) monitor is configured.
storefrontcheckbackendservices:
description:
- >-
This option will enable monitoring of services running on storefront server. Storefront services are
monitored by probing to a Windows service that runs on the Storefront server and exposes details of
which storefront services are running.
type: bool
trofscode:
description:
- "Code expected when the server is under maintenance."
trofsstring:
description:
- >-
String expected from the server for the service to be marked as trofs. Applicable to HTTP-ECV/TCP-ECV
monitors.
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
- name: Set lb monitor
local_action:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
validate_certs: no
module: netscaler_lb_monitor
state: present
monitorname: monitor_1
type: HTTP-INLINE
action: DOWN
respcode: ['400']
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: ['message 1', 'message 2']
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: { 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import (
ConfigProxy,
get_nitro_client,
netscaler_common_arguments,
log,
loglines,
ensure_feature_is_enabled,
get_immutables_intersection
)
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor import lbmonitor
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
def lbmonitor_exists(client, module):
log('Checking if monitor exists')
if lbmonitor.count_filtered(client, 'monitorname:%s' % module.params['monitorname']) > 0:
return True
else:
return False
def lbmonitor_identical(client, module, lbmonitor_proxy):
log('Checking if monitor is identical')
count = lbmonitor.count_filtered(client, 'monitorname:%s' % module.params['monitorname'])
if count == 0:
return False
lbmonitor_list = lbmonitor.get_filtered(client, 'monitorname:%s' % module.params['monitorname'])
diff_dict = lbmonitor_proxy.diff_object(lbmonitor_list[0])
# Skipping hashed fields since the cannot be compared directly
hashed_fields = [
'password',
'secondarypassword',
'radkey',
]
for key in hashed_fields:
if key in diff_dict:
del diff_dict[key]
if diff_dict == {}:
return True
else:
return False
def diff_list(client, module, lbmonitor_proxy):
monitor_list = lbmonitor.get_filtered(client, 'monitorname:%s' % module.params['monitorname'])
return lbmonitor_proxy.diff_object(monitor_list[0])
def main():
module_specific_arguments = dict(
monitorname=dict(type='str'),
type=dict(
type='str',
choices=[
'PING',
'TCP',
'HTTP',
'TCP-ECV',
'HTTP-ECV',
'UDP-ECV',
'DNS',
'FTP',
'LDNS-PING',
'LDNS-TCP',
'LDNS-DNS',
'RADIUS',
'USER',
'HTTP-INLINE',
'SIP-UDP',
'SIP-TCP',
'LOAD',
'FTP-EXTENDED',
'SMTP',
'SNMP',
'NNTP',
'MYSQL',
'MYSQL-ECV',
'MSSQL-ECV',
'ORACLE-ECV',
'LDAP',
'POP3',
'CITRIX-XML-SERVICE',
'CITRIX-WEB-INTERFACE',
'DNS-TCP',
'RTSP',
'ARP',
'CITRIX-AG',
'CITRIX-AAC-LOGINPAGE',
'CITRIX-AAC-LAS',
'CITRIX-XD-DDC',
'ND6',
'CITRIX-WI-EXTENDED',
'DIAMETER',
'RADIUS_ACCOUNTING',
'STOREFRONT',
'APPC',
'SMPP',
'CITRIX-XNC-ECV',
'CITRIX-XDM',
'CITRIX-STA-SERVICE',
'CITRIX-STA-SERVICE-NHOP',
]
),
action=dict(
type='str',
choices=[
'NONE',
'LOG',
'DOWN',
]
),
respcode=dict(type='list'),
httprequest=dict(type='str'),
rtsprequest=dict(type='str'),
customheaders=dict(type='str'),
maxforwards=dict(type='float'),
sipmethod=dict(
type='str',
choices=[
'OPTIONS',
'INVITE',
'REGISTER',
]
),
sipuri=dict(type='str'),
sipreguri=dict(type='str'),
send=dict(type='str'),
recv=dict(type='str'),
query=dict(type='str'),
querytype=dict(
type='str',
choices=[
'Address',
'Zone',
'AAAA',
]
),
scriptname=dict(type='str'),
scriptargs=dict(type='str'),
dispatcherip=dict(type='str'),
dispatcherport=dict(type='int'),
username=dict(type='str'),
password=dict(type='str'),
secondarypassword=dict(type='str'),
logonpointname=dict(type='str'),
lasversion=dict(type='str'),
radkey=dict(type='str'),
radnasid=dict(type='str'),
radnasip=dict(type='str'),
radaccounttype=dict(type='float'),
radframedip=dict(type='str'),
radapn=dict(type='str'),
radmsisdn=dict(type='str'),
radaccountsession=dict(type='str'),
lrtm=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
deviation=dict(type='float'),
units1=dict(
type='str',
choices=[
'SEC',
'MSEC',
'MIN',
]
),
interval=dict(type='int'),
units3=dict(
type='str',
choices=[
'SEC',
'MSEC',
'MIN',
]
),
resptimeout=dict(type='int'),
units4=dict(
type='str',
choices=[
'SEC',
'MSEC',
'MIN',
]
),
resptimeoutthresh=dict(type='float'),
retries=dict(type='int'),
failureretries=dict(type='int'),
alertretries=dict(type='int'),
successretries=dict(type='int'),
downtime=dict(type='int'),
units2=dict(
type='str',
choices=[
'SEC',
'MSEC',
'MIN',
]
),
destip=dict(type='str'),
destport=dict(type='int'),
reverse=dict(type='bool'),
transparent=dict(type='bool'),
iptunnel=dict(type='bool'),
tos=dict(type='bool'),
tosid=dict(type='float'),
secure=dict(type='bool'),
validatecred=dict(type='bool'),
domain=dict(type='str'),
ipaddress=dict(type='list'),
group=dict(type='str'),
filename=dict(type='str'),
basedn=dict(type='str'),
binddn=dict(type='str'),
filter=dict(type='str'),
attribute=dict(type='str'),
database=dict(type='str'),
oraclesid=dict(type='str'),
sqlquery=dict(type='str'),
evalrule=dict(type='str'),
mssqlprotocolversion=dict(
type='str',
choices=[
'70',
'2000',
'2000SP1',
'2005',
'2008',
'2008R2',
'2012',
'2014',
]
),
Snmpoid=dict(type='str'),
snmpcommunity=dict(type='str'),
snmpthreshold=dict(type='str'),
snmpversion=dict(
type='str',
choices=[
'V1',
'V2',
]
),
application=dict(type='str'),
sitepath=dict(type='str'),
storename=dict(type='str'),
storefrontacctservice=dict(type='bool'),
hostname=dict(type='str'),
netprofile=dict(type='str'),
originhost=dict(type='str'),
originrealm=dict(type='str'),
hostipaddress=dict(type='str'),
vendorid=dict(type='float'),
productname=dict(type='str'),
firmwarerevision=dict(type='float'),
authapplicationid=dict(type='list'),
acctapplicationid=dict(type='list'),
inbandsecurityid=dict(
type='str',
choices=[
'NO_INBAND_SECURITY',
'TLS',
]
),
supportedvendorids=dict(type='list'),
vendorspecificvendorid=dict(type='float'),
vendorspecificauthapplicationids=dict(type='list'),
vendorspecificacctapplicationids=dict(type='list'),
storedb=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
storefrontcheckbackendservices=dict(type='bool'),
trofscode=dict(type='float'),
trofsstring=dict(type='str'),
)
hand_inserted_arguments = dict()
argument_spec = dict()
argument_spec.update(module_specific_arguments)
argument_spec.update(netscaler_common_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk', **module_result)
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
# Instantiate lb monitor object
readwrite_attrs = [
'monitorname',
'type',
'action',
'respcode',
'httprequest',
'rtsprequest',
'customheaders',
'maxforwards',
'sipmethod',
'sipuri',
'sipreguri',
'send',
'recv',
'query',
'querytype',
'scriptname',
'scriptargs',
'dispatcherip',
'dispatcherport',
'username',
'password',
'secondarypassword',
'logonpointname',
'lasversion',
'radkey',
'radnasid',
'radnasip',
'radaccounttype',
'radframedip',
'radapn',
'radmsisdn',
'radaccountsession',
'lrtm',
'deviation',
'units1',
'interval',
'units3',
'resptimeout',
'units4',
'resptimeoutthresh',
'retries',
'failureretries',
'alertretries',
'successretries',
'downtime',
'units2',
'destip',
'destport',
'reverse',
'transparent',
'iptunnel',
'tos',
'tosid',
'secure',
'validatecred',
'domain',
'ipaddress',
'group',
'filename',
'basedn',
'binddn',
'filter',
'attribute',
'database',
'oraclesid',
'sqlquery',
'evalrule',
'mssqlprotocolversion',
'Snmpoid',
'snmpcommunity',
'snmpthreshold',
'snmpversion',
'application',
'sitepath',
'storename',
'storefrontacctservice',
'netprofile',
'originhost',
'originrealm',
'hostipaddress',
'vendorid',
'productname',
'firmwarerevision',
'authapplicationid',
'acctapplicationid',
'inbandsecurityid',
'supportedvendorids',
'vendorspecificvendorid',
'vendorspecificauthapplicationids',
'vendorspecificacctapplicationids',
'storedb',
'storefrontcheckbackendservices',
'trofscode',
'trofsstring',
]
readonly_attrs = [
'lrtmconf',
'lrtmconfstr',
'dynamicresponsetimeout',
'dynamicinterval',
'multimetrictable',
'dup_state',
'dup_weight',
'weight',
]
immutable_attrs = [
'monitorname',
'type',
'units1',
'units3',
'units4',
'units2',
'Snmpoid',
'hostname',
'servicename',
'servicegroupname',
]
transforms = {
'storefrontcheckbackendservices': ['bool_yes_no'],
'secure': ['bool_yes_no'],
'tos': ['bool_yes_no'],
'validatecred': ['bool_yes_no'],
'storefrontacctservice': ['bool_yes_no'],
'iptunnel': ['bool_yes_no'],
'transparent': ['bool_yes_no'],
'reverse': ['bool_yes_no'],
'lrtm': [lambda v: v.upper()],
'storedb': [lambda v: v.upper()],
}
lbmonitor_proxy = ConfigProxy(
actual=lbmonitor(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
ensure_feature_is_enabled(client, 'LB')
if module.params['state'] == 'present':
log('Applying actions for state present')
if not lbmonitor_exists(client, module):
if not module.check_mode:
log('Adding monitor')
lbmonitor_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not lbmonitor_identical(client, module, lbmonitor_proxy):
# Check if we try to change value of immutable attributes
immutables_changed = get_immutables_intersection(lbmonitor_proxy, diff_list(client, module, lbmonitor_proxy).keys())
if immutables_changed != []:
diff = diff_list(client, module, lbmonitor_proxy)
msg = 'Cannot update immutable attributes %s' % (immutables_changed,)
module.fail_json(msg=msg, diff=diff, **module_result)
if not module.check_mode:
log('Updating monitor')
lbmonitor_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
log('Doing nothing for monitor')
module_result['changed'] = False
# Sanity check for result
log('Sanity checks for state present')
if not module.check_mode:
if not lbmonitor_exists(client, module):
module.fail_json(msg='lb monitor does not exist', **module_result)
if not lbmonitor_identical(client, module, lbmonitor_proxy):
module.fail_json(
msg='lb monitor is not configured correctly',
diff=diff_list(client, module, lbmonitor_proxy),
**module_result
)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if lbmonitor_exists(client, module):
if not module.check_mode:
lbmonitor_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for result
log('Sanity checks for state absent')
if not module.check_mode:
if lbmonitor_exists(client, module):
module.fail_json(msg='lb monitor still exists', **module_result)
module_result['actual_attributes'] = lbmonitor_proxy.get_actual_rw_attributes(filter='monitorname')
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
|
apanda/modeling
|
refs/heads/master
|
processing_tools/number_of_tenants.py
|
1
|
import sys
from collections import defaultdict
def Process (fnames):
tenant_time = defaultdict(lambda: defaultdict(lambda: 0.0))
tenant_run = defaultdict(lambda: defaultdict(lambda:0))
for fname in fnames:
f = open(fname)
for l in f:
if l.startswith("tenant"):
continue
parts = l.strip().split()
tenants = int(parts[0])
priv = int(parts[1])
pub = int(parts[2])
num_machines = tenants * priv * pub
int_checks = (tenants * tenants * priv * (priv - 1)) / 2
int_time = int_checks * float(parts[3])
ext_checks = (tenants * priv) * ((tenants - 1) * pub)
ext_time = ext_checks * float(parts[4])
oext_check = (tenants * priv) * (tenants * pub)
oext_time = oext_check * float(parts[5])
total = int_time + ext_time + oext_time
tenant_time[(priv, pub)][tenants] += total
tenant_run[(priv, pub)][tenants] += 1
for k in sorted(tenant_run.keys()):
print "# ----%s------"%(str(k))
for k2 in sorted(tenant_run[k].keys()):
print "%d %d %f"%(k2, tenant_run[k][k2], \
tenant_time[k][k2]/float(tenant_run[k][k2]))
print
print
#print "%d %d %f"%(k, runs[k], machines[k]/float(runs[k]))
if __name__ == "__main__":
Process(sys.argv[1:])
|
abhiatgithub/shogun-toolbox
|
refs/heads/master
|
examples/undocumented/python_static/classifier_mpdsvm.py
|
22
|
from tools.load import LoadMatrix
from sg import sg
lm=LoadMatrix()
traindat=lm.load_numbers('../data/fm_train_real.dat')
testdat=lm.load_numbers('../data/fm_test_real.dat')
train_label=lm.load_labels('../data/label_train_twoclass.dat')
parameter_list=[[traindat,testdat, train_label,10,2.1,1.2,1e-5,False],
[traindat,testdat,train_label,10,2.1,1.3,1e-4,False]]
def classifier_mpdsvm (fm_train_real=traindat,fm_test_real=testdat,
label_train_twoclass=train_label,
size_cache=10, width=2.1,C=1.2,
epsilon=1e-5,use_bias=False):
sg('set_features', 'TRAIN', fm_train_real)
sg('set_kernel', 'GAUSSIAN', 'REAL', size_cache, width)
sg('set_labels', 'TRAIN', label_train_twoclass)
sg('new_classifier', 'MPDSVM')
sg('svm_epsilon', epsilon)
sg('c', C)
sg('svm_use_bias', use_bias)
sg('train_classifier')
sg('set_features', 'TEST', fm_test_real)
result=sg('classify')
kernel_matrix = sg('get_kernel_matrix', 'TEST')
return result, kernel_matrix
if __name__=='__main__':
print('MPDSVM')
classifier_mpdsvm(*parameter_list[0])
|
philsch/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/ovs/openvswitch_port.py
|
42
|
#!/usr/bin/python
#coding: utf-8 -*-
# pylint: disable=C0111
# (c) 2013, David Stygstra <david.stygstra@gmail.com>
#
# Portions copyright @ 2015 VMware, Inc.
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: openvswitch_port
version_added: 1.4
author: "David Stygstra (@stygstra)"
short_description: Manage Open vSwitch ports
requirements: [ ovs-vsctl ]
description:
- Manage Open vSwitch ports
options:
bridge:
required: true
description:
- Name of bridge to manage
port:
required: true
description:
- Name of port to manage on the bridge
tag:
version_added: 2.2
required: false
description:
- VLAN tag for this port. Must be a value between
0 and 4095.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the port should exist
timeout:
required: false
default: 5
description:
- How long to wait for ovs-vswitchd to respond
external_ids:
version_added: 2.0
required: false
default: {}
description:
- Dictionary of external_ids applied to a port.
set:
version_added: 2.0
required: false
default: None
description:
- Set a single property on a port.
'''
EXAMPLES = '''
# Creates port eth2 on bridge br-ex
- openvswitch_port:
bridge: br-ex
port: eth2
state: present
# Creates port eth6
- openvswitch_port:
bridge: bridge-loop
port: eth6
state: present
set: Interface eth6
# Creates port vlan10 with tag 10 on bridge br-ex
- openvswitch_port:
bridge: br-ex
port: vlan10
tag: 10
state: present
set: Interface vlan10
# Assign interface id server1-vifeth6 and mac address 00:00:5E:00:53:23
# to port vifeth6 and setup port to be managed by a controller.
- openvswitch_port:
bridge: br-int
port: vifeth6
state: present
args:
external_ids:
iface-id: '{{ inventory_hostname }}-vifeth6'
attached-mac: '00:00:5E:00:53:23'
vm-id: '{{ inventory_hostname }}'
iface-status: active
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.pycompat24 import get_exception
def _external_ids_to_dict(text):
text = text.strip()
if text == '{}':
return None
else:
d = {}
for kv in text[1:-1].split(','):
kv = kv.strip()
k, v = kv.split('=')
d[k] = v
return d
def _tag_to_str(text):
text = text.strip()
if text == '[]':
return None
else:
return text
def map_obj_to_commands(want, have, module):
commands = list()
if module.params['state'] == 'absent':
if have:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s del-port"
" %(bridge)s %(port)s")
command = templatized_command % module.params
commands.append(command)
else:
if have:
if want['tag'] != have['tag']:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set port %(port)s tag=%(tag)s")
command = templatized_command % module.params
commands.append(command)
if want['external_ids'] != have['external_ids']:
for k, v in iteritems(want['external_ids']):
if (not have['external_ids']
or k not in have['external_ids']
or want['external_ids'][k] != have['external_ids'][k]):
if v is None:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" remove port %(port)s"
" external_ids " + k)
command = templatized_command % module.params
commands.append(command)
else:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set port %(port)s"
" external_ids:")
command = templatized_command % module.params
command += k + "=" + v
commands.append(command)
else:
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s add-port"
" %(bridge)s %(port)s")
command = templatized_command % module.params
if want['tag']:
templatized_command = " tag=%(tag)s"
command += templatized_command % module.params
if want['set']:
templatized_command = " -- set %(set)s"
command += templatized_command % module.params
commands.append(command)
if want['external_ids']:
for k, v in iteritems(want['external_ids']):
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s"
" set port %(port)s external_ids:")
command = templatized_command % module.params
command += k + "=" + v
commands.append(command)
return commands
def map_config_to_obj(module):
templatized_command = "%(ovs-vsctl)s -t %(timeout)s list-ports %(bridge)s"
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
if rc != 0:
module.fail_json(msg=err)
obj = {}
if module.params['port'] in out.splitlines():
obj['bridge'] = module.params['bridge']
obj['port'] = module.params['port']
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s get"
" Port %(port)s tag")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['tag'] = _tag_to_str(out)
templatized_command = ("%(ovs-vsctl)s -t %(timeout)s get"
" Port %(port)s external_ids")
command = templatized_command % module.params
rc, out, err = module.run_command(command, check_rc=True)
obj['external_ids'] = _external_ids_to_dict(out)
return obj
def map_params_to_obj(module):
obj = {
'bridge': module.params['bridge'],
'port': module.params['port'],
'tag': module.params['tag'],
'external_ids': module.params['external_ids'],
'set': module.params['set']
}
return obj
# pylint: disable=E0602
def main():
""" Entry point. """
argument_spec={
'bridge': {'required': True},
'port': {'required': True},
'state': {'default': 'present', 'choices': ['present', 'absent']},
'timeout': {'default': 5, 'type': 'int'},
'external_ids': {'default': None, 'type': 'dict'},
'tag': {'default': None},
'set': {'required': False, 'default': None}
}
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
# We add ovs-vsctl to module_params to later build up templatized commands
module.params["ovs-vsctl"] = module.get_bin_path("ovs-vsctl", True)
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
for c in commands:
module.run_command(c, check_rc=True)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
ettm2012/MissionPlanner
|
refs/heads/master
|
Lib/MimeWriter.py
|
67
|
"""Generic MIME writer.
This module defines the class MimeWriter. The MimeWriter class implements
a basic formatter for creating MIME multi-part files. It doesn't seek around
the output file nor does it use large amounts of buffer space. You must write
the parts out in the order that they should occur in the final file.
MimeWriter does buffer the headers you add, allowing you to rearrange their
order.
"""
import mimetools
__all__ = ["MimeWriter"]
import warnings
warnings.warn("the MimeWriter module is deprecated; use the email package instead",
DeprecationWarning, 2)
class MimeWriter:
"""Generic MIME writer.
Methods:
__init__()
addheader()
flushheaders()
startbody()
startmultipartbody()
nextpart()
lastpart()
A MIME writer is much more primitive than a MIME parser. It
doesn't seek around on the output file, and it doesn't use large
amounts of buffer space, so you have to write the parts in the
order they should occur on the output file. It does buffer the
headers you add, allowing you to rearrange their order.
General usage is:
f = <open the output file>
w = MimeWriter(f)
...call w.addheader(key, value) 0 or more times...
followed by either:
f = w.startbody(content_type)
...call f.write(data) for body data...
or:
w.startmultipartbody(subtype)
for each part:
subwriter = w.nextpart()
...use the subwriter's methods to create the subpart...
w.lastpart()
The subwriter is another MimeWriter instance, and should be
treated in the same way as the toplevel MimeWriter. This way,
writing recursive body parts is easy.
Warning: don't forget to call lastpart()!
XXX There should be more state so calls made in the wrong order
are detected.
Some special cases:
- startbody() just returns the file passed to the constructor;
but don't use this knowledge, as it may be changed.
- startmultipartbody() actually returns a file as well;
this can be used to write the initial 'if you can read this your
mailer is not MIME-aware' message.
- If you call flushheaders(), the headers accumulated so far are
written out (and forgotten); this is useful if you don't need a
body part at all, e.g. for a subpart of type message/rfc822
that's (mis)used to store some header-like information.
- Passing a keyword argument 'prefix=<flag>' to addheader(),
start*body() affects where the header is inserted; 0 means
append at the end, 1 means insert at the start; default is
append for addheader(), but insert for start*body(), which use
it to determine where the Content-Type header goes.
"""
def __init__(self, fp):
self._fp = fp
self._headers = []
def addheader(self, key, value, prefix=0):
"""Add a header line to the MIME message.
The key is the name of the header, where the value obviously provides
the value of the header. The optional argument prefix determines
where the header is inserted; 0 means append at the end, 1 means
insert at the start. The default is to append.
"""
lines = value.split("\n")
while lines and not lines[-1]: del lines[-1]
while lines and not lines[0]: del lines[0]
for i in range(1, len(lines)):
lines[i] = " " + lines[i].strip()
value = "\n".join(lines) + "\n"
line = key + ": " + value
if prefix:
self._headers.insert(0, line)
else:
self._headers.append(line)
def flushheaders(self):
"""Writes out and forgets all headers accumulated so far.
This is useful if you don't need a body part at all; for example,
for a subpart of type message/rfc822 that's (mis)used to store some
header-like information.
"""
self._fp.writelines(self._headers)
self._headers = []
def startbody(self, ctype, plist=[], prefix=1):
"""Returns a file-like object for writing the body of the message.
The content-type is set to the provided ctype, and the optional
parameter, plist, provides additional parameters for the
content-type declaration. The optional argument prefix determines
where the header is inserted; 0 means append at the end, 1 means
insert at the start. The default is to insert at the start.
"""
for name, value in plist:
ctype = ctype + ';\n %s=\"%s\"' % (name, value)
self.addheader("Content-Type", ctype, prefix=prefix)
self.flushheaders()
self._fp.write("\n")
return self._fp
def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
"""Returns a file-like object for writing the body of the message.
Additionally, this method initializes the multi-part code, where the
subtype parameter provides the multipart subtype, the boundary
parameter may provide a user-defined boundary specification, and the
plist parameter provides optional parameters for the subtype. The
optional argument, prefix, determines where the header is inserted;
0 means append at the end, 1 means insert at the start. The default
is to insert at the start. Subparts should be created using the
nextpart() method.
"""
self._boundary = boundary or mimetools.choose_boundary()
return self.startbody("multipart/" + subtype,
[("boundary", self._boundary)] + plist,
prefix=prefix)
def nextpart(self):
"""Returns a new instance of MimeWriter which represents an
individual part in a multipart message.
This may be used to write the part as well as used for creating
recursively complex multipart messages. The message must first be
initialized with the startmultipartbody() method before using the
nextpart() method.
"""
self._fp.write("\n--" + self._boundary + "\n")
return self.__class__(self._fp)
def lastpart(self):
"""This is used to designate the last part of a multipart message.
It should always be used when writing multipart messages.
"""
self._fp.write("\n--" + self._boundary + "--\n")
if __name__ == '__main__':
import test.test_MimeWriter
|
iraquitan/political-advisor
|
refs/heads/master
|
web/myapp/tests.py
|
1
|
from django.test import TestCase
# Create your tests here.
class BaseTests(TestCase):
def test_base(self):
self.assertEqual(True, True)
|
aldryn/djangocms-cascade
|
refs/heads/master
|
cmsplugin_cascade/gs960/__init__.py
|
14224
|
# -*- coding: utf-8 -*-
|
jeffbryner/MozDef
|
refs/heads/master
|
tests/loginput/loginput_test_suite.py
|
3
|
import os
from mozdef_util.utilities.dot_dict import DotDict
import mock
from configlib import OptionParser
from tests.http_test_suite import HTTPTestSuite
class LoginputTestSuite(HTTPTestSuite):
def setup(self):
sample_config = DotDict()
sample_config.configfile = os.path.join(os.path.dirname(__file__), 'index.conf')
OptionParser.parse_args = mock.Mock(return_value=(sample_config, {}))
from loginput import index as loginput_index
self.application = loginput_index.application
super().setup()
|
pfmoore/pip
|
refs/heads/main
|
tests/data/src/extension/setup.py
|
4
|
from setuptools import Extension, setup
module = Extension('extension', sources=['extension.c'])
setup(name='extension', version='0.0.1', ext_modules = [module])
|
arnicas/eyeo_nlp
|
refs/heads/master
|
python/get_sentiment_chunks.py
|
1
|
# Usage: python get_sentiment_chunks.py [filepath] [optional_chunk_size]
import json
import sys
NEGWORDS = "../data/sentiment_wordlists/negative-words.txt"
POSWORDS = "../data/sentiment_wordlists/positive-words.txt"
def load_words(path):
with open(path) as handle:
words = handle.readlines()
words = [w.strip() for w in words if w[0] != ';']
words = [word for word in words if word] # get rid of empty string
return words
negwords = load_words(NEGWORDS)
poswords = load_words(POSWORDS)
def read_lowercase(filename):
""" Read and lowercase the text of the source file """
with open(filename) as debate:
text = debate.readlines() # doesn't scale, should do with generator
text = [t.lower() for t in text] # lowercase it all
alltext = ' '.join(text)
return alltext
def get_chunks(filetext, words=50):
""" Breaks up the file into chunks of size words """
from nltk import tokenize
filewords = tokenize.word_tokenize(filetext)
return [filewords[i:i+words] for i in xrange(0, len(filewords), words)]
def get_overlap(list1, list2):
from collections import Counter
list1_multiset = Counter(list1)
list2_multiset = Counter(list2)
overlap = list((list1_multiset & list2_multiset).elements())
totals = []
for word in overlap:
totals.append((word, list1_multiset[word]))
return totals
def get_sentiment_counts(chunks, poswords=poswords, negwords=negwords):
from collections import Counter
counts = []
for i, chunk in enumerate(chunks):
overlap_pos = get_overlap(chunk, poswords)
overlap_neg = get_overlap(chunk, negwords)
counts.append({
"index": i,
"pos": sum(Counter(dict(overlap_pos)).values()),
"poswords": list(overlap_pos),
"neg": sum(Counter(dict(overlap_neg)).values()),
"negwords": list(overlap_neg),
"words": len(chunk)
})
for count in counts:
count['netpos'] = count['pos'] - count['neg']
return counts
def main():
if len(sys.argv) < 2:
print 'Usage: python get_sentiment_chunks [filepath] {optional chunk size}'
return
input_file = sys.argv[1]
if len(sys.argv) == 3:
chunk_size = int(sys.argv[2])
else:
chunk_size = 50
print "Using chunk size of: " + str(chunk_size)
text = read_lowercase(input_file)
chunks = get_chunks(text, words=chunk_size)
jsonversion = json.dumps(get_sentiment_counts(chunks))
with open('sentiment.json', 'w') as handle:
handle.write(jsonversion)
print "Writing out sentiment.json! Put it where your html can find it."
if __name__ == "__main__":
main()
|
grschafer/BejeweledBot
|
refs/heads/master
|
gfx/environment.py
|
1
|
__author__ = 'Tom Schaul, tom@idsia.ch'
import random
import copy
import numpy as np
from scipy import zeros
from pprint import pformat, pprint
import pygame
from pygame.locals import *
from pybrain.utilities import Named
from pybrain.rl.environments.environment import Environment
# TODO: mazes can have any number of dimensions?
BOARDWIDTH = 8
BOARDHEIGHT = 8
WINDOWWIDTH = 4
WINDOWHEIGHT = 4
NUMGEMTYPES = 7
assert NUMGEMTYPES >= 5, "numgemtypes > 5, for unique gem drop rule"
GEMTYPES = range(NUMGEMTYPES)
EMPTY_SPACE = -1
ROWABOVEBOARD = 'row above board'
MAX_ITERS = 100
FPS = 30
GUI_WIDTH = 600
GUI_HEIGHT = 600
GEMIMAGESIZE = 64
MOVERATE = 25 # 1 to 100, larger num means faster animations
# R G B
PURPLE = (255, 0, 255)
LIGHTBLUE = (170, 190, 255)
BLUE = ( 0, 0, 255)
RED = (255, 100, 100)
BLACK = ( 0, 0, 0)
BROWN = ( 85, 65, 0)
HIGHLIGHTCOLOR = PURPLE # color of the selected gem's border
BGCOLOR = LIGHTBLUE # background color on the screen
GRIDCOLOR = BLUE # color of the game board
GAMEOVERCOLOR = RED # color of the "Game over" text.
GAMEOVERBGCOLOR = BLACK # background color of the "Game over" text.
SCORECOLOR = BROWN # color of the text for the player's score
# The amount of space to the sides of the board to the edge of the window
# is used several times, so calculate it once here and store in variables.
XMARGIN = int((GUI_WIDTH - GEMIMAGESIZE * BOARDWIDTH) / 2)
YMARGIN = int((GUI_HEIGHT - GEMIMAGESIZE * BOARDHEIGHT) / 2)
DISPLAYSURF = None
FPSCLOCK = None
GEMIMAGES = []
BASICFONT = None
BOARDRECTS = []
pos = 0
got = 0
opti = 0
# constants for direction values (used for pygame animations)
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
class BejeweledBoard(Environment, Named):
board = None
score = 0
gameover = False
def main(self):
global FPSCLOCK, DISPLAYSURF, GEMIMAGES, BASICFONT, BOARDRECTS
# Initial set up.
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((GUI_WIDTH, GUI_HEIGHT))
pygame.display.set_caption('Gemgem')
BASICFONT = pygame.font.Font('freesansbold.ttf', 36)
# Load the images
GEMIMAGES = []
for i in range(1, NUMGEMTYPES+1):
gemImage = pygame.image.load('gfx/gem%s.png' % i)
if gemImage.get_size() != (GEMIMAGESIZE, GEMIMAGESIZE):
gemImage = pygame.transform.smoothscale(gemImage, (GEMIMAGESIZE, GEMIMAGESIZE))
GEMIMAGES.append(gemImage)
# Create pygame.Rect objects for each board space to
# do board-coordinate-to-pixel-coordinate conversions.
BOARDRECTS = []
for x in range(BOARDWIDTH):
BOARDRECTS.append([])
for y in range(BOARDHEIGHT):
r = pygame.Rect((XMARGIN + (x * GEMIMAGESIZE),
YMARGIN + (y * GEMIMAGESIZE),
GEMIMAGESIZE,
GEMIMAGESIZE))
BOARDRECTS[x].append(r)
def __init__(self, boardsize, numgemtypes, animspeed, **args):
global BOARDWIDTH, BOARDHEIGHT, NUMGEMTYPES, GEMTYPES, MOVERATE, GUI_WIDTH, GUI_HEIGHT
assert boardsize >= 4, "board must be at least 4x4"
assert numgemtypes >= 5, "numgemtypes > 5, for unique gem drop rule"
BOARDWIDTH = BOARDHEIGHT = boardsize
NUMGEMTYPES = numgemtypes
GEMTYPES = range(NUMGEMTYPES)
MOVERATE = animspeed
GUI_WIDTH = 88 + GEMIMAGESIZE * BOARDWIDTH
GUI_HEIGHT = 88 + GEMIMAGESIZE * BOARDWIDTH
self.setArgs(**args)
self.main()
self.reset()
def reset(self):
""" return to initial position (stochastically): """
self.board = self._getBlankBoard()
self._fillBoard(self.board, [], 0, True)
while not self._canMakeMove(self.board):
self.board = self._getBlankBoard()
self._fillBoard(self.board, [], 0, True)
self.score = 0
self.gameover = False
def _score(self, match, inboard):
score = 0
board = copy.deepcopy(inboard)
firstSelectedGem = {'x': match[0][0], 'y': match[0][1]}
clickedSpace = {'x': match[1][0], 'y': match[1][1]}
# Two gems have been clicked on and selected. Swap the gems.
firstSwappingGem, secondSwappingGem = self._getSwappingGems(board, firstSelectedGem, clickedSpace)
# Swap the gems in the board data structure.
board[firstSwappingGem['x']][firstSwappingGem['y']] = secondSwappingGem['imageNum']
board[secondSwappingGem['x']][secondSwappingGem['y']] = firstSwappingGem['imageNum']
matchedGems = self._findMatchingGems(board)
# This was a matching move.
while matchedGems != []:
# Remove matched gems, then pull down the board.
points = []
for gemSet in matchedGems:
score += (10 + (len(gemSet) - 3) * 10)
for gem in gemSet:
board[gem[0]][gem[1]] = EMPTY_SPACE
# Drop the new gems.
self._fillBoard(board, [], 0, False)
# Check if there are any new matches.
matchedGems = self._findMatchingGems(board)
return score
def _findOptimalMoves(self, board):
matches = self._possibleMoves(board)
scores = [self._score(match, board) for match in matches]
tup = zip(matches, scores)
maxVal = max(scores)
maxMoves = filter(lambda x: x[1] == maxVal, tup)
return [x[0] for x in maxMoves], maxVal
def performAction(self, action):
movePos = self._canMakeMove(self.board)
optiMoves, optiValue = self._findOptimalMoves(self.board)
scoreAdd = 0
action = self._actionIndexToSwapTuple(action)
#print 'optiMove', optiMoves, 'worth', optiValue, 'action', action, 'same?', list([action[0], action[1]]) in optiMoves
firstSelectedGem = {'x': action[0][0], 'y': action[0][1]}
clickedSpace = {'x': action[1][0], 'y': action[1][1]}
# Two gems have been clicked on and selected. Swap the gems.
firstSwappingGem, secondSwappingGem = self._getSwappingGems(self.board, firstSelectedGem, clickedSpace)
if firstSwappingGem == None and secondSwappingGem == None:
# If both are None, then the gems were not adjacent
print 'gems not adjacent'
firstSelectedGem = None # deselect the first gem
self.lastReward = -10
return 0
#print self
#print firstSwappingGem, secondSwappingGem
# Show the swap animation on the screen.
boardCopy = self._getBoardCopyMinusGems(self.board, (firstSwappingGem, secondSwappingGem))
animateMovingGems(boardCopy, [firstSwappingGem, secondSwappingGem], [], self.score)
# Swap the gems in the board data structure.
self.board[firstSwappingGem['x']][firstSwappingGem['y']] = secondSwappingGem['imageNum']
self.board[secondSwappingGem['x']][secondSwappingGem['y']] = firstSwappingGem['imageNum']
# See if this is a matching move.
matchedGems = self._findMatchingGems(self.board)
if matchedGems == []:
#print 'did not cause a match'
# Was not a matching move; swap the gems back
animateMovingGems(boardCopy, [firstSwappingGem, secondSwappingGem], [], self.score)
self.board[firstSwappingGem['x']][firstSwappingGem['y']] = firstSwappingGem['imageNum']
self.board[secondSwappingGem['x']][secondSwappingGem['y']] = secondSwappingGem['imageNum']
self.lastReward = -10
else:
# This was a matching move.
while matchedGems != []:
# Remove matched gems, then pull down the board.
points = []
for gemSet in matchedGems:
scoreAdd += (10 + (len(gemSet) - 3) * 10)
for gem in gemSet:
self.board[gem[0]][gem[1]] = EMPTY_SPACE
points.append({'points': scoreAdd,
'x': gem[0] * GEMIMAGESIZE + XMARGIN,
'y': gem[1] * GEMIMAGESIZE + YMARGIN})
self.score += scoreAdd
# Drop the new gems.
self._fillBoard(self.board, points, self.score, True)
# Check if there are any new matches.
matchedGems = self._findMatchingGems(self.board)
# TODO: set last reward before combos? otherwise it will get confused
# when it gets extra reward
# combos allowed from pieces already on the board falling into
# more matches, but not allowed for pieces newly falling into board
self.lastReward = scoreAdd
#print scoreAdd
firstSelectedGem = None
# Draw the board.
DISPLAYSURF.fill(BGCOLOR)
drawBoard(self.board)
if firstSelectedGem != None:
highlightSpace(firstSelectedGem['x'], firstSelectedGem['y'])
if self.gameover:
if clickContinueTextSurf == None:
# Only render the text once. In future iterations, just
# use the Surface object already in clickContinueTextSurf
clickContinueTextSurf = BASICFONT.render('Final Score: %s (Click to continue)' % (self.score), 1, GAMEOVERCOLOR, GAMEOVERBGCOLOR)
clickContinueTextRect = clickContinueTextSurf.get_rect()
clickContinueTextRect.center = int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2)
DISPLAYSURF.blit(clickContinueTextSurf, clickContinueTextRect)
drawScore(self.score)
pygame.display.update()
FPSCLOCK.tick(FPS)
global pos
global got
global opti
if movePos:
pos += 1
if scoreAdd > 0:
got += 1
if list([action[0], action[1]]) in optiMoves:
opti += 1
print 'found match:', got, '/', pos, '=', \
float(got) / pos, 'found optimal:', \
opti, '/', pos, '=', float(opti) / pos
if not self._canMakeMove(self.board):
#print 'game ended, no more moves available'
self.gameover = True
# TODO: tie gameover into episodic learning stuff?
self.reset()
return 0
def getSensors(self):
indices = []
board = np.array(self.board)
for i in range(BOARDHEIGHT - WINDOWHEIGHT + 1):
for j in range(BOARDWIDTH - WINDOWWIDTH + 1):
indices.append(self._boardToIndices(board[i:i + WINDOWHEIGHT, j:j + WINDOWWIDTH]))
return indices
def getLastReward(self):
return self.lastReward
# ====================================================================
# ==================== BEJEWELED HELPER FUNCTIONS ====================
# ====================================================================
# TODO: add rotation/mirroring support
def _actionIndexToSwapTuple(self, action):
""" Converts from action index to tuple of coords of gems to swap """
# TODO: explain indexing scheme better
action, loc = action
action = int(action[0]) # remove action number from its array
loc, color = divmod(loc, NUMGEMTYPES)
row, col = divmod(loc, BOARDWIDTH - WINDOWWIDTH + 1)
swapTuple = []
if action > 11: # vertical swap
swapTuple.append(divmod(action - 12, 4))
swapTuple.append((swapTuple[0][0] + 1, swapTuple[0][1]))
else: # horizontal swap
swapTuple.append(divmod(action, 3))
swapTuple.append((swapTuple[0][0], swapTuple[0][1] + 1))
translated = [(x[0] + row, x[1] + col) for x in swapTuple]
return tuple(translated)
def _boardToIndices(self, board):
""" Converts board to state index for each color (EXPLAIN MORE)
Also: ROTATIONS/REFLECTIONS? """
# TODO: explain indexing scheme better
b = np.array(board)
indices = []
for color in GEMTYPES:
tmp = np.array(b == color, dtype=int)
binstr = ''.join((str(i) for i in tmp.flatten()))
index = int(binstr, base=2)
indices.append([index]) # TODO: lame that this has to be in a list
return np.array(indices)
def _indicesToBoard(self, indices):
board = np.zeros((4,4))
for color, index in enumerate(indices):
s = bin(index[0])[2:]
s = '0' * (16 - len(s)) + s
coords = [divmod(i, 4) for i in range(len(s)) if s[i] == '1']
for c in coords:
board[c] = color
return board
def _getBlankBoard(self):
# TODO: change to numpy.array
board = []
for x in range(BOARDWIDTH):
board.append([EMPTY_SPACE] * BOARDHEIGHT)
return board
def _getSwappingGems(self, board, firstXY, secondXY):
# If the gems at the (X, Y) coordinates of the two gems are adjacent,
# then their 'direction' keys are set to the appropriate direction
# value to be swapped with each other.
# Otherwise, (None, None) is returned.
firstGem = {'imageNum': board[firstXY['x']][firstXY['y']],
'x': firstXY['x'],
'y': firstXY['y']}
secondGem = {'imageNum': board[secondXY['x']][secondXY['y']],
'x': secondXY['x'],
'y': secondXY['y']}
highlightedGem = None
if firstGem['x'] == secondGem['x'] + 1 and firstGem['y'] == secondGem['y']:
firstGem['direction'] = LEFT
secondGem['direction'] = RIGHT
elif firstGem['x'] == secondGem['x'] - 1 and firstGem['y'] == secondGem['y']:
firstGem['direction'] = RIGHT
secondGem['direction'] = LEFT
elif firstGem['y'] == secondGem['y'] + 1 and firstGem['x'] == secondGem['x']:
firstGem['direction'] = UP
secondGem['direction'] = DOWN
elif firstGem['y'] == secondGem['y'] - 1 and firstGem['x'] == secondGem['x']:
firstGem['direction'] = DOWN
secondGem['direction'] = UP
else:
# These gems are not adjacent and can't be swapped.
return None, None
return firstGem, secondGem
def _canMakeMove(self, board):
return len(self._possibleMoves(board)) > 0
def _possibleMoves(self, board):
# Return True if the board is in a state where a matching
# move can be made on it. Otherwise return False.
# The patterns in oneOffPatterns represent gems that are configured
# in a way where it only takes one move to make a triplet.
oneOffPatterns = (((0,1), (1,0), (2,0), ((0,0), (0,1))),
((0,1), (1,1), (2,0), ((2,0), (2,1))),
((0,0), (1,1), (2,0), ((1,0), (1,1))),
((0,1), (1,0), (2,1), ((1,0), (1,1))),
((0,0), (1,0), (2,1), ((2,0), (2,1))),
((0,0), (1,1), (2,1), ((0,0), (0,1))),
((0,0), (0,2), (0,3), ((0,0), (0,1))),
((0,0), (0,1), (0,3), ((0,2), (0,3))))
# The x and y variables iterate over each space on the board.
# If we use + to represent the currently iterated space on the
# board, then this pattern: ((0,1), (1,0), (2,0))refers to identical
# gems being set up like this:
#
# +A
# B
# C
#
# That is, gem A is offset from the + by (0,1), gem B is offset
# by (1,0), and gem C is offset by (2,0). In this case, gem A can
# be swapped to the left to form a vertical three-in-a-row triplet.
#
# There are eight possible ways for the gems to be one move
# away from forming a triple, hence oneOffPattern has 8 patterns.
moves = []
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
for pat in oneOffPatterns:
# check each possible pattern of "match in next move" to
# see if a possible move can be made.
if (self._getGemAt(board, x+pat[0][0], y+pat[0][1]) == \
self._getGemAt(board, x+pat[1][0], y+pat[1][1]) == \
self._getGemAt(board, x+pat[2][0], y+pat[2][1]) != None):
moves.append(map(lambda z: (z[0] + x, z[1] + y), pat[3]))
if (self._getGemAt(board, x+pat[0][1], y+pat[0][0]) == \
self._getGemAt(board, x+pat[1][1], y+pat[1][0]) == \
self._getGemAt(board, x+pat[2][1], y+pat[2][0]) != None):
moves.append(map(lambda z: (z[1] + x, z[0] + y), pat[3]))
return moves
def _pullDownAllGems(self, board):
# pulls down gems on the board to the bottom to fill in any gaps
for x in range(BOARDWIDTH):
gemsInColumn = []
for y in range(BOARDHEIGHT):
if board[x][y] != EMPTY_SPACE:
gemsInColumn.append(board[x][y])
board[x] = ([EMPTY_SPACE] * (BOARDHEIGHT - len(gemsInColumn))) + gemsInColumn
def _getGemAt(self, board, x, y):
if x < 0 or y < 0 or x >= BOARDWIDTH or y >= BOARDHEIGHT:
return None
else:
return board[x][y]
def _getDropSlots(self, board):
# Creates a "drop slot" for each column and fills the slot with a
# number of gems that that column is lacking. This function assumes
# that the gems have been gravity dropped already.
boardCopy = copy.deepcopy(board)
self._pullDownAllGems(boardCopy)
dropSlots = []
for i in range(BOARDWIDTH):
dropSlots.append([])
# TODO: remove restriction that there can be no combos from new gems?
# count the number of empty spaces in each column on the board
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT-1, -1, -1): # start from bottom, going up
if boardCopy[x][y] == EMPTY_SPACE:
possibleGems = list(range(len(GEMTYPES)))
for offsetX, offsetY in ((0, -1), (1, 0), (0, 1), (-1, 0)):
# Narrow down the possible gems we should put in the
# blank space so we don't end up putting an two of
# the same gems next to each other when they drop.
neighborGem = self._getGemAt(boardCopy, x + offsetX, y + offsetY)
if neighborGem != None and neighborGem in possibleGems:
possibleGems.remove(neighborGem)
newGem = random.choice(possibleGems)
boardCopy[x][y] = newGem
dropSlots[x].append(newGem)
return dropSlots
def _findMatchingGems(self, board):
gemsToRemove = [] # a list of lists of gems in matching triplets that should be removed
boardCopy = copy.deepcopy(board)
# loop through each space, checking for 3 adjacent identical gems
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
# TODO: make 3x3 L/T-shape matches work
# look for horizontal matches
if self._getGemAt(boardCopy, x, y) == self._getGemAt(boardCopy, x + 1, y) == self._getGemAt(boardCopy, x + 2, y) and self._getGemAt(boardCopy, x, y) != EMPTY_SPACE:
targetGem = boardCopy[x][y]
offset = 0
removeSet = []
while self._getGemAt(boardCopy, x + offset, y) == targetGem:
# keep checking if there's more than 3 gems in a row
removeSet.append((x + offset, y))
boardCopy[x + offset][y] = EMPTY_SPACE
offset += 1
gemsToRemove.append(removeSet)
# look for vertical matches
if self._getGemAt(boardCopy, x, y) == self._getGemAt(boardCopy, x, y + 1) == self._getGemAt(boardCopy, x, y + 2) and self._getGemAt(boardCopy, x, y) != EMPTY_SPACE:
targetGem = boardCopy[x][y]
offset = 0
removeSet = []
while self._getGemAt(boardCopy, x, y + offset) == targetGem:
# keep checking, in case there's more than 3 gems in a row
removeSet.append((x, y + offset))
boardCopy[x][y + offset] = EMPTY_SPACE
offset += 1
gemsToRemove.append(removeSet)
return gemsToRemove
def _getDroppingGems(self, board):
# Find all the gems that have an empty space below them
boardCopy = copy.deepcopy(board)
droppingGems = []
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT - 2, -1, -1):
if boardCopy[x][y + 1] == EMPTY_SPACE and boardCopy[x][y] != EMPTY_SPACE:
# This space drops if not empty but the space below it is
droppingGems.append( {'imageNum': boardCopy[x][y], 'x': x, 'y': y, 'direction': DOWN} )
boardCopy[x][y] = EMPTY_SPACE
return droppingGems
def _moveGems(self, board, movingGems):
# movingGems is a list of dicts with keys x, y, direction, imageNum
for gem in movingGems:
if gem['y'] != ROWABOVEBOARD:
board[gem['x']][gem['y']] = EMPTY_SPACE
movex = 0
movey = 0
if gem['direction'] == LEFT:
movex = -1
elif gem['direction'] == RIGHT:
movex = 1
elif gem['direction'] == DOWN:
movey = 1
elif gem['direction'] == UP:
movey = -1
board[gem['x'] + movex][gem['y'] + movey] = gem['imageNum']
else:
# gem is located above the board (where new gems come from)
board[gem['x']][0] = gem['imageNum'] # move to top row
def _fillBoard(self, board, points, score, animate):
dropSlots = self._getDropSlots(board)
while dropSlots != [[]] * BOARDWIDTH:
# do the dropping animation as long as there are more gems to drop
movingGems = self._getDroppingGems(board)
for x in range(len(dropSlots)):
if len(dropSlots[x]) != 0:
# cause the lowest gem in each slot to begin moving in the DOWN direction
movingGems.append({'imageNum': dropSlots[x][0], 'x': x, 'y': ROWABOVEBOARD, 'direction': DOWN})
boardCopy = self._getBoardCopyMinusGems(board, movingGems)
if animate:
animateMovingGems(boardCopy, movingGems, points, score)
self._moveGems(board, movingGems)
# Make the next row of gems from the drop slots
# the lowest by deleting the previous lowest gems.
for x in range(len(dropSlots)):
if len(dropSlots[x]) == 0:
continue
board[x][0] = dropSlots[x][0]
del dropSlots[x][0]
def _getBoardCopyMinusGems(self, board, gems):
# Creates and returns a copy of the passed board data structure,
# with the gems in the "gems" list removed from it.
#
# Gems is a list of dicts, with keys x, y, direction, imageNum
boardCopy = copy.deepcopy(board)
# Remove some of the gems from this board data structure copy.
for gem in gems:
if gem['y'] != ROWABOVEBOARD:
boardCopy[gem['x']][gem['y']] = EMPTY_SPACE
return boardCopy
def __str__(self):
""" Ascii representation of the maze, with the current state """
return pformat(self.board)
def drawMovingGem(gem, progress):
# Draw a gem sliding in the direction that its 'direction' key
# indicates. The progress parameter is a number from 0 (just
# starting) to 100 (slide complete).
movex = 0
movey = 0
progress *= 0.01
if gem['direction'] == UP:
movey = -int(progress * GEMIMAGESIZE)
elif gem['direction'] == DOWN:
movey = int(progress * GEMIMAGESIZE)
elif gem['direction'] == RIGHT:
movex = int(progress * GEMIMAGESIZE)
elif gem['direction'] == LEFT:
movex = -int(progress * GEMIMAGESIZE)
basex = gem['x']
basey = gem['y']
if basey == ROWABOVEBOARD:
basey = -1
pixelx = XMARGIN + (basex * GEMIMAGESIZE)
pixely = YMARGIN + (basey * GEMIMAGESIZE)
r = pygame.Rect( (pixelx + movex, pixely + movey, GEMIMAGESIZE, GEMIMAGESIZE) )
DISPLAYSURF.blit(GEMIMAGES[gem['imageNum']], r)
def highlightSpace(x, y):
pygame.draw.rect(DISPLAYSURF, HIGHLIGHTCOLOR, BOARDRECTS[x][y], 4)
def animateMovingGems(board, gems, pointsText, score):
# pointsText is a dictionary with keys 'x', 'y', and 'points'
progress = 0 # progress at 0 represents beginning, 100 means finished.
while progress < 100: # animation loop
DISPLAYSURF.fill(BGCOLOR)
drawBoard(board)
for gem in gems: # Draw each gem.
drawMovingGem(gem, progress)
drawScore(score)
for pointText in pointsText:
pointsSurf = BASICFONT.render(str(pointText['points']), 1, SCORECOLOR)
pointsRect = pointsSurf.get_rect()
pointsRect.center = (pointText['x'], pointText['y'])
DISPLAYSURF.blit(pointsSurf, pointsRect)
pygame.display.update()
FPSCLOCK.tick(FPS)
progress += MOVERATE # progress the animation a little bit more for the next frame
def drawBoard(board):
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
pygame.draw.rect(DISPLAYSURF, GRIDCOLOR, BOARDRECTS[x][y], 1)
gemToDraw = board[x][y]
if gemToDraw != EMPTY_SPACE:
DISPLAYSURF.blit(GEMIMAGES[gemToDraw], BOARDRECTS[x][y])
def drawScore(score):
scoreImg = BASICFONT.render(str(score), 1, SCORECOLOR)
scoreRect = scoreImg.get_rect()
scoreRect.bottomleft = (10, WINDOWHEIGHT - 6)
DISPLAYSURF.blit(scoreImg, scoreRect)
|
zenodo/invenio
|
refs/heads/zenodo-master
|
invenio/legacy/bibcirculation/web/admin/bibcirculationadmin.py
|
13
|
# This file is part of Invenio.
# Copyright (C) 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio BibCirculation Administrator (URLs) Interface."""
__revision__ = ""
import invenio.legacy.bibcirculation.adminlib as bal
from invenio.config import CFG_SITE_LANG
from invenio.utils.url import wash_url_argument
def index(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py
"""
return bal.index(req, ln)
def borrower_search(req, empty_barcode=None, redirect_to_new_request=False,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/borrowers_search
"""
return bal.borrower_search(req, empty_barcode,
redirect_to_new_request=redirect_to_new_request, ln=ln)
def item_search(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/holdings_search
"""
return bal.item_search(req, [], ln)
def borrower_notification(req, borrower_id=None, template=None,
message=None, load_msg_template=None,
subject=None, send_message=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/borrower_notification
"""
return bal.borrower_notification(req, borrower_id, template,
message, load_msg_template,
subject, send_message, ln)
def get_pending_requests(req, request_id=None, print_data=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_pending_requests
"""
return bal.get_pending_requests(req, request_id, print_data, ln)
def item_search_result(req, p=None, f=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/item_search_result
"""
return bal.item_search_result(req, p, f, ln)
def loan_return(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/loan_return
"""
return bal.loan_return(req, ln)
def loan_on_desk_step1(req, key=None, string=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/loan_on_desk_step1
"""
return bal.loan_on_desk_step1(req, key, string, ln)
def loan_on_desk_step2(req, user_info=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/loan_on_desk_step2
"""
user_info = user_info.split(',')
return bal.loan_on_desk_step2(req, user_info, ln)
def loan_on_desk_step3(req, user_info=None, barcode=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/loan_on_desk_step4
"""
user_info = eval(user_info)
return bal.loan_on_desk_step3(req, user_info, barcode, ln)
def loan_on_desk_step4(req, list_of_books=None, user_info=None, due_date=None,
note=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/loan_on_desk_step5
"""
user_info = eval(user_info)
list_of_books = eval(list_of_books)
due_date = wash_url_argument(due_date, 'list')
return bal.loan_on_desk_step4(req, list_of_books, user_info,
due_date, note, ln)
def loan_on_desk_confirm(req, barcode=None, borrower_id=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/loan_on_desk_confirm
"""
return bal.loan_on_desk_confirm(req, barcode, borrower_id, ln)
def register_new_loan(req, barcode=None, borrower_id=None, request_id=None,
new_note=None, print_data=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/register_new_loan
"""
return bal.register_new_loan(req, barcode, borrower_id, request_id,
new_note, print_data, ln)
def loan_return_confirm(req, barcode=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/loan_return_confirm
"""
return bal.loan_return_confirm(req, barcode, ln)
def get_next_waiting_loan_request(req, recid=None, barcode=None, check_id=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_next_waiting_loan_request
"""
return bal.get_next_waiting_loan_request(req, recid, barcode, check_id, ln)
def make_new_loan_from_request(req, check_id=None, barcode=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/make_new_loan_from_request
"""
return bal.make_new_loan_from_request(req, check_id,
barcode, ln)
def all_requests(req, request_id=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/all_requests
"""
return bal.all_requests(req, request_id, ln)
def get_item_req_historical_overview(req, recid=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_item_req_historical_overview
"""
return bal.get_item_req_historical_overview(req, recid, ln)
def get_item_loans_historical_overview(req, recid=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_item_loans_historical_overview
"""
return bal.get_item_loans_historical_overview(req, recid, ln)
#def all_loans_test(req, ln=CFG_SITE_LANG):
# """
# http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/all_loans
# """
# return bal.all_loans_test(req, ln)
def all_loans(req, msg=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/all_loans
"""
return bal.all_loans(req, msg=msg, ln=ln)
def bor_loans_historical_overview(req, borrower_id=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/bor_loans_historical_overview
"""
return bal.bor_loans_historical_overview(req, borrower_id, ln)
def bor_requests_historical_overview(req, borrower_id=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/bor_requests_historical_overview
"""
return bal.bor_requests_historical_overview(req, borrower_id, ln)
def get_item_requests_details(req, recid=None, request_id=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/borrowers_search
"""
return bal.get_item_requests_details(req, recid, request_id, ln)
def get_item_loans_details(req, recid=None, barcode=None, loan_id=None,
force=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/borrowers_search
"""
return bal.get_item_loans_details(req, recid, barcode, loan_id, force, ln)
def get_borrower_details(req, borrower_id=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/borrowers_search
"""
return bal.get_borrower_details(req, borrower_id, ln)
def get_item_details(req, recid=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/borrowers_search
"""
return bal.get_item_details(req, recid, ln)
def get_library_details(req, library_id=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_library_details
"""
return bal.get_library_details(req, library_id, ln)
def get_borrower_requests_details(req, borrower_id=None, request_id=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_borrower_requests_details
"""
return bal.get_borrower_requests_details(req, borrower_id, request_id, ln)
def get_borrower_loans_details(req, recid=None, barcode=None, borrower_id=None,
renewall=None, force=None, loan_id=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_borrower_loans_details
"""
return bal.get_borrower_loans_details(req, recid, barcode, borrower_id,
renewall, force, loan_id, ln)
def borrower_search_result(req, column, string, redirect_to_new_request=False,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/borrower_search_result
"""
return bal.borrower_search_result(req, column, string,
redirect_to_new_request=redirect_to_new_request, ln=ln)
def associate_barcode(req, request_id=None, recid=None, borrower_id=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/associate_barcode
"""
return bal.associate_barcode(req, request_id, recid, borrower_id, ln)
def get_borrower_notes(req, borrower_id=None, delete_key=None,
library_notes=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_borrower_notes
"""
return bal.get_borrower_notes(req, borrower_id, delete_key,
library_notes, ln)
def get_loans_notes(req, loan_id=None, recid=None, delete_key=None,
library_notes=None, back="", ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_loans_notes
"""
return bal.get_loans_notes(req, loan_id, delete_key,
library_notes, back, ln)
def get_item_loans_notes(req, loan_id=None, recid=None,
add_notes=None, new_note=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_item_loans_notes
"""
return bal.get_item_loans_notes(req, loan_id, recid, add_notes,
new_note, ln)
def new_item(req, isbn=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/new_item
"""
return bal.new_item(req, isbn, ln)
def add_new_borrower_step1(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_borrower_step1
"""
return bal.add_new_borrower_step1(req, ln)
def add_new_borrower_step2(req, name=None, email=None, phone=None, address=None,
mailbox=None, notes=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_borrower_step2
"""
return bal.add_new_borrower_step2(req, name, email, phone, address,
mailbox, notes, ln)
def add_new_borrower_step3(req, tup_infos=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_borrower_step3
"""
tup_infos = eval(tup_infos)
return bal.add_new_borrower_step3(req, tup_infos, ln)
def update_borrower_info_step1(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_borrower_info_step1
"""
return bal.update_borrower_info_step1(req, ln)
def update_borrower_info_step2(req, column=None, string=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_borrower_info_step2
"""
return bal.update_borrower_info_step2(req, column, string, ln)
def update_borrower_info_step1(req, borrower_id=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_borrower_info_step1
"""
return bal.update_borrower_info_step1(req, borrower_id, ln)
def update_borrower_info_step2(req, name=None, email=None, phone=None,
address=None, mailbox=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_borrower_info_step2
"""
return bal.update_borrower_info_step2(req, name, email, phone, address,
mailbox, ln)
def add_new_library_step1(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_library_step1
"""
return bal.add_new_library_step1(req, ln)
def add_new_library_step2(req, name=None, email=None, phone=None, address=None,
type=None, notes=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_library_step2
"""
return bal.add_new_library_step2(req, name, email, phone, address,
type, notes, ln)
def add_new_library_step3(req, tup_infos=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_library_step3
"""
tup_infos = eval(tup_infos)
return bal.add_new_library_step3(req, tup_infos, ln)
def update_library_info_step1(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_library_info_step1
"""
return bal.update_library_info_step1(req, ln)
def update_library_info_step2(req, column=None, string=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_library_info_step2
"""
return bal.update_library_info_step2(req, column, string, ln)
def update_library_info_step3(req, library_id=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_library_info_step3
"""
return bal.update_library_info_step3(req, library_id, ln)
def update_library_info_step4(req, name=None, email=None, phone=None,
address=None, library_id=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_library_info_step4
"""
return bal.update_library_info_step4(req, name, email, phone, address,
library_id, ln)
def update_library_info_step5(req, tup_infos, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_library_info_step5
"""
tup_infos = eval(tup_infos)
return bal.update_library_info_step5(req, tup_infos, ln)
def new_book_step1(req,ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/new_book_step1
"""
return bal.new_book_step1(req, ln)
def new_book_step2(req,ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/new_book_step2
"""
return bal.new_book_step2(req, ln)
def add_new_copy_step1(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_copy_step1
"""
return bal.add_new_copy_step1(req)
def add_new_copy_step2(req, p=None, f=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_copy_step2
"""
return bal.add_new_copy_step2(req, p, f, ln)
def add_new_copy_step3(req, recid=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_copy_step3
"""
return bal.add_new_copy_step3(req, recid, ln)
def add_new_copy_step4(req, barcode=None, library=None, location=None,
collection=None, description=None, loan_period=None,
status=None, recid=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_copy_step4
"""
return bal.add_new_copy_step4(req, barcode, library, location, collection,
description, loan_period, status, recid, ln)
def add_new_copy_step5(req, tup_infos=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_copy_step5
"""
tup_infos = eval(tup_infos)
return bal.add_new_copy_step5(req, tup_infos, ln)
def update_item_info_step1(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_item_info_step1
"""
return bal.update_item_info_step1(req, ln)
def update_item_info_step2(req, p, f, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_item_info_step2
"""
return bal.update_item_info_step2(req, p, f, ln)
def update_item_info_step3(req, recid, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_item_info_step3
"""
return bal.update_item_info_step3(req, recid, ln)
def update_item_info_step4(req, barcode, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_item_info_step4
"""
return bal.update_item_info_step4(req, barcode, ln)
def update_item_info_step5(req, barcode, library, location, collection,
description, loan_period, status, recid,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_item_info_step5
"""
return bal.update_item_info_step5(req, barcode, library, location,
collection, description, loan_period,
status, recid, ln)
def update_item_info_step6(req, tup_infos, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_item_info_step6
"""
tup_infos = eval(tup_infos)
return bal.update_item_info_step6(req, tup_infos, ln)
def search_library_step1(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/search_library_step1
"""
return bal.search_library_step1(req=req, ln=ln)
def search_library_step2(req, column, string, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/search_library_step2
"""
return bal.search_library_step2(req, column, string, ln)
def get_library_notes(req, library_id=None, delete_key=None, library_notes=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_library_notes
"""
return bal.get_library_notes(req, library_id, delete_key, library_notes, ln)
def change_due_date_step1(req, loan_id=None, borrower_id=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/change_due_date_step1
"""
return bal.change_due_date_step1(req, loan_id, borrower_id, ln)
def change_due_date_step2(req, due_date=None, loan_id=None, borrower_id=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/change_due_date_step2
"""
return bal.change_due_date_step2(req, due_date, loan_id, borrower_id, ln)
def claim_book_return(req, borrower_id=None, recid=None, loan_id=None,
template=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/claim_book_return
"""
return bal.claim_book_return(req, borrower_id, recid, loan_id, template, ln)
def all_expired_loans(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/all_expired_loans
"""
return bal.all_expired_loans(req, ln)
def get_waiting_requests(req, request_id=None, print_data=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_waiting_requests
"""
return bal.get_waiting_requests(req, request_id, print_data, ln)
def create_new_loan_step1(req, borrower_id=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/create_new_loan_step1
"""
return bal.create_new_loan_step1(req, borrower_id, ln)
def create_new_loan_step2(req, borrower_id=None, barcode=None, notes=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/create_new_loan_step2
"""
return bal.create_new_loan_step2(req, borrower_id, barcode, notes, ln)
def create_new_request_step1(req, borrower_id=None, p=None, f=None, search=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/create_new_request_step1
"""
return bal.create_new_request_step1(req, borrower_id, p, f, search, ln)
def create_new_request_step2(req, recid=None, borrower_id=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/create_new_request_step2
"""
return bal.create_new_request_step2(req, recid, borrower_id, ln)
def create_new_request_step3(req, borrower_id=None, barcode=None, recid=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/create_new_request_step3
"""
return bal.create_new_request_step3(req, borrower_id, barcode, recid, ln)
def create_new_request_step4(req, period_from=None, period_to=None,
barcode=None, borrower_id=None, recid=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/create_new_request_step4
"""
return bal.create_new_request_step4(req, period_from, period_to, barcode,
borrower_id, recid, ln)
def place_new_request_step1(req, barcode=None, recid=None, key=None,
string=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/place_new_request_step1
"""
return bal.place_new_request_step1(req, barcode, recid, key, string, ln)
def place_new_request_step2(req, barcode=None, recid=None, user_info=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/place_new_request_step2
"""
if user_info is not None:
user_info = user_info.split(',')
return bal.place_new_request_step2(req, barcode, recid, user_info, ln)
def place_new_request_step3(req, barcode=None, recid=None, user_info=None,
period_from=None, period_to=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/place_new_request_step3
"""
user_info = eval(user_info)
return bal.place_new_request_step3(req, barcode, recid, user_info,
period_from, period_to, ln)
def place_new_loan_step1(req, barcode=None, recid=None, key=None,
string=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/place_new_loan_step1
"""
return bal.place_new_loan_step1(req, barcode, recid, key, string, ln)
def place_new_loan_step2(req, barcode=None, recid=None, user_info=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/place_new_loan_step2
"""
return bal.place_new_loan_step2(req, barcode, recid, user_info, ln)
def place_new_loan_step3(req, barcode=None, recid=None, ccid=None, name=None,
email=None, phone=None, address=None, mailbox=None,
due_date=None, notes=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/place_new_loan_step3
"""
return bal.place_new_loan_step3(req, barcode, recid, ccid, name, email,
phone, address, mailbox, due_date, notes,
ln)
def order_new_copy_step1(req, recid=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/order_new_copy_step1
"""
return bal.order_new_copy_step1(req, recid, ln)
def order_new_copy_step2 (req, recid=None, barcode=None, vendor_id=None,
cost=None, currency=None, status=None,
order_date=None, expected_date=None,
library_id=None, notes=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/order_new_copy_step2
"""
return bal.order_new_copy_step2(req, recid, barcode, vendor_id, cost,
currency, status, order_date, expected_date,
library_id, notes, ln)
def order_new_copy_step3(req, order_info=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/order_new_copy_step3
"""
order_info = eval(order_info)
return bal.order_new_copy_step3(req, order_info, ln)
def ordered_books(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/ordered_books
"""
return bal.list_ordered_books(req, ln)
def get_purchase_notes(req, purchase_id=None, delete_key=None,
library_notes=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_purchase_notes
"""
return bal.get_purchase_notes(req, purchase_id, delete_key,
library_notes, ln)
def register_ill_request_step0(req, recid=None, key=None, string=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/register_ill_request_step0
"""
return bal.register_ill_request_step0(req, recid, key, string, ln)
def register_ill_request_step1(req, recid=None, user_info=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/register_ill_request_step1
"""
return bal.register_ill_request_step1(req, recid, user_info, ln)
def register_ill_request_step2(req, recid=None, user_info=None,
period_of_interest_from=None,
period_of_interest_to=None, notes=None,
only_edition=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/register_ill_request_step2
"""
return bal.register_ill_request_step2(req, recid, user_info,
period_of_interest_from,
period_of_interest_to,
notes, only_edition, ln)
def register_ill_request_step3(req, borrower_id=None, request_info=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/register_ill_request_step3
"""
request_info = eval(request_info)
return bal.register_ill_request_step3(req, borrower_id, request_info, ln)
def list_ill_request(req, status=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/list_ill_request
"""
return bal.list_ill_request(req, status, ln)
def ill_request_details_step1(req, delete_key=None, ill_request_id=None,
new_status=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/ill_request_details_step1
"""
return bal.ill_request_details_step1(req, delete_key, ill_request_id,
new_status, ln)
def ill_request_details_step2(req, delete_key=None, ill_request_id=None,
new_status=None, library_id=None,
request_date=None, expected_date=None,
arrival_date=None, due_date=None,
return_date=None, cost=None,
currency=None, barcode=None, library_notes=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/ill_request_details_step2
"""
return bal.ill_request_details_step2(req, delete_key, ill_request_id,
new_status, library_id,
request_date, expected_date,
arrival_date, due_date,
return_date, cost, currency,
barcode, library_notes, ln)
def ill_request_details_step3(req, request_info=None, ill_status=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/ill_request_details_step3
"""
request_info = eval(request_info)
return bal.ill_request_details_step3(req, request_info, ill_status, ln)
def ordered_books_details_step1(req, purchase_id=None, delete_key=None,
ln=CFG_SITE_LANG):
"""
"""
return bal.ordered_books_details_step1(req, purchase_id, delete_key, ln)
def ordered_books_details_step2(req, purchase_id=None, recid=None,
vendor_id=None, cost=None, currency=None,
status=None, order_date=None,
expected_date=None, purchase_notes=None,
library_notes=None,
ln=CFG_SITE_LANG):
"""
"""
return bal.ordered_books_details_step2(req, purchase_id, recid, vendor_id,
cost, currency, status, order_date,
expected_date,
purchase_notes, library_notes, ln)
def ordered_books_details_step3(req, purchase_id=None, recid=None,
vendor_id=None, cost=None, currency=None,
status=None, order_date=None, expected_date=None,
purchase_notes=None, library_notes=None,
ln=CFG_SITE_LANG):
"""
"""
return bal.ordered_books_details_step3(req, purchase_id, recid, vendor_id,
cost, currency, status, order_date,
expected_date, purchase_notes,
library_notes, ln)
def add_new_vendor_step1(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_vendor_step1
"""
return bal.add_new_vendor_step1(req, ln)
def add_new_vendor_step2(req, name=None, email=None, phone=None, address=None,
notes=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_vendor_step2
"""
return bal.add_new_vendor_step2(req, name, email, phone, address,
notes, ln)
def add_new_vendor_step3(req, tup_infos=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/add_new_vendor_step3
"""
tup_infos = eval(tup_infos)
return bal.add_new_vendor_step3(req, tup_infos, ln)
def update_vendor_info_step1(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_vendor_info_step1
"""
return bal.update_vendor_info_step1(req, ln)
def update_vendor_info_step2(req, column=None, string=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_vendor_info_step2
"""
return bal.update_vendor_info_step2(req, column, string, ln)
def update_vendor_info_step3(req, vendor_id=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_vendor_info_step3
"""
return bal.update_vendor_info_step3(req, vendor_id, ln)
def update_vendor_info_step4(req, name=None, email=None, phone=None,
address=None, vendor_id=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_vendor_info_step4
"""
return bal.update_vendor_info_step4(req, name, email, phone, address,
vendor_id, ln)
def update_vendor_info_step5(req, tup_infos, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/update_vendor_info_step5
"""
tup_infos = eval(tup_infos)
return bal.update_vendor_info_step5(req, tup_infos, ln)
def search_vendor_step1(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/search_vendor_step1
"""
return bal.search_vendor_step1(req, ln)
def search_vendor_step2(req, column, string, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/search_vendor_step2
"""
return bal.search_vendor_step2(req, column, string, ln)
def get_vendor_details(req, vendor_id=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_vendor_details
"""
return bal.get_vendor_details(req, vendor_id, ln)
def get_vendor_notes(req, vendor_id=None, add_notes=None, new_note=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/get_vendor_notes
"""
return bal.get_vendor_notes(req, vendor_id, add_notes, new_note, ln)
def register_ill_request_with_no_recid_step1(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/register_ill_request_with_no_recid_step1
"""
return bal.register_ill_request_with_no_recid_step1(req, ln)
def register_ill_request_with_no_recid_step2(req, title=None, authors=None,
place=None, publisher=None, year=None,
edition=None, isbn=None,
period_of_interest_from=None,
period_of_interest_to=None,
additional_comments=None,
only_edition=None, key=None, string=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/register_ill_request_with_no_recid_step2
"""
return bal.register_ill_request_with_no_recid_step2(req, title, authors,
place, publisher, year, edition, isbn,
period_of_interest_from,
period_of_interest_to,
additional_comments, only_edition,
key, string, ln)
def register_ill_request_with_no_recid_step3(req, book_info=None,
user_info=None,
request_details=None,
ln=CFG_SITE_LANG):
"""
"""
if type(book_info) is str:
book_info = eval(book_info)
if type(request_details) is str:
request_details = eval(request_details)
if type(user_info) is str:
user_info = user_info.split(',')
return bal.register_ill_request_with_no_recid_step3(req, book_info,
user_info,
request_details, ln)
def register_ill_request_with_no_recid_step4(req, book_info=None,
user_info=None,
request_details=None,
ln=CFG_SITE_LANG):
"""
"""
if type(book_info) is str:
book_info = eval(book_info)
if type(request_details) is str:
request_details = eval(request_details)
if type(user_info) is str:
user_info = eval(user_info)
return bal.register_ill_request_with_no_recid_step4(req, book_info,
user_info,
request_details, ln)
def get_borrower_ill_details(req, borrower_id=None, ill_id=None,
ln=CFG_SITE_LANG):
"""
"""
return bal.get_borrower_ill_details(req, borrower_id, ill_id, ln)
def get_ill_library_notes(req, ill_id=None, delete_key=None, library_notes=None,
ln=CFG_SITE_LANG):
"""
"""
return bal.get_ill_library_notes(req, ill_id, delete_key, library_notes, ln)
def get_expired_loans_with_requests(req, request_id=None, ln=CFG_SITE_LANG):
"""
"""
return bal.get_expired_loans_with_requests(req, request_id, ln)
def register_ill_book_request(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/holdings_search
"""
return bal.register_ill_book_request(req, ln)
def register_ill_book_request_result(req, p=None, f=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/item_search_result
"""
return bal.register_ill_book_request_result(req, p, f, ln)
def register_ill_book_request_from_borrower_page(req, borrower_id=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/holdings_search
"""
return bal.register_ill_book_request_from_borrower_page(req, borrower_id,
ln)
def register_ill_book_request_from_borrower_page_result(req, borrower_id=None,
p=None, f=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/item_search_result
"""
return bal.register_ill_book_request_from_borrower_page_result(req,
borrower_id, p, f, ln)
def register_ill_request_from_borrower_page_step1(req, borrower_id=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/register_ill_request_with_no_recid_step1
"""
return bal.register_ill_request_from_borrower_page_step1(req, borrower_id,
ln)
def register_ill_request_from_borrower_page_step2(req, borrower_id=None,
title=None, authors=None, place=None,
publisher=None, year=None, edition=None,
isbn=None, period_of_interest_from=None,
period_of_interest_to=None,
additional_comments=None,
only_edition=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/register_ill_request_with_no_recid_step2
"""
return bal.register_ill_request_from_borrower_page_step2(req, borrower_id,
title, authors, place,
publisher, year, edition, isbn,
period_of_interest_from,
period_of_interest_to,
additional_comments,
only_edition, ln)
def register_ill_article_request_step1(req, ln=CFG_SITE_LANG):
"""
"""
return bal.register_ill_article_request_step1(req, ln)
def register_ill_article_request_step2(req, periodical_title=None,
article_title=None, author=None,
report_number=None, volume=None,
issue=None, page=None, year=None,
issn=None, period_of_interest_from=None,
period_of_interest_to=None,
additional_comments=None,
key=None, string=None, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/register_ill_request_with_no_recid_step2
"""
return bal.register_ill_article_request_step2(req, periodical_title,
article_title, author, report_number,
volume, issue, page, year, issn,
period_of_interest_from,
period_of_interest_to,
additional_comments, key, string, ln)
def register_ill_article_request_step3(req, book_info, user_info,
request_details, ln=CFG_SITE_LANG):
book_info = eval(book_info)
request_details = eval(request_details)
user_info = user_info.split(',')
return bal.register_ill_article_request_step3(req, book_info, user_info,
request_details, ln)
def ill_search(req, ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/holdings_search
"""
return bal.ill_search(req, ln)
def ill_search_result(req, p=None, f=None, date_from=None, date_to=None,
ln=CFG_SITE_LANG):
"""
http://cds.cern.ch/admin/bibcirculation/bibcirculationadmin.py/item_search_result
"""
return bal.ill_search_result(req, p, f, date_from, date_to, ln)
|
elishowk/flaskexperiment
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup, find_packages
import sys, os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README')).read()
version = '0.1'
install_requires = [
'flask',
'pymongo',
'silk-deployment',
'simplejson'
# For more details, see:
# http://packages.python.org/distribute/setuptools.html#declaring-dependencies
]
setup(
name='commonecouteserver',
version=version,
description="Core CommOnEcoute Server",
long_description=README,
classifiers=[
"License :: OSI Approved :: GNU Affero General Public License v3",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Server"
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
],
keywords='http web server riak',
author='Elias Showk',
author_email='elishowk@nonutc.fr',
url='http://commonecoute.com',
license='GNU AGPL v3',
packages=find_packages('.'),
#package_dir = {'': '.'},
include_package_data=True,
scripts = ['bin/coeserver.py'],
zip_safe=False,
install_requires=install_requires,
#entry_points={
# 'console_scripts':
# ['commonecouteserver=commonecouteserver:main']
#}
)
|
fintech-circle/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/oauth_dispatch/adapters/__init__.py
|
55
|
"""
Adapters to provide a common interface to django-oauth2-provider (DOP) and
django-oauth-toolkit (DOT).
"""
from .dop import DOPAdapter
from .dot import DOTAdapter
|
zaina/nova
|
refs/heads/master
|
nova/tests/functional/v3/test_attach_interfaces.py
|
28
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.compute import api as compute_api
from nova import exception
from nova.network import api as network_api
from nova.tests.functional.v3 import test_servers
from nova.tests.unit import fake_network_cache_model
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class AttachInterfacesSampleJsonTest(test_servers.ServersSampleBase):
extension_name = 'os-attach-interfaces'
# TODO(gmann): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
extra_extensions_to_load = ["os-access-ips"]
def _get_flags(self):
f = super(AttachInterfacesSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.'
'attach_interfaces.Attach_interfaces')
return f
def setUp(self):
super(AttachInterfacesSampleJsonTest, self).setUp()
def fake_list_ports(self, *args, **kwargs):
uuid = kwargs.get('device_id', None)
if not uuid:
raise exception.InstanceNotFound(instance_id=None)
port_data = {
"id": "ce531f90-199f-48c0-816c-13e38010b442",
"network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "fa:16:3e:4c:2c:30",
"fixed_ips": [
{
"ip_address": "192.168.1.3",
"subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
}
],
"device_id": uuid,
}
ports = {'ports': [port_data]}
return ports
def fake_show_port(self, context, port_id=None):
if not port_id:
raise exception.PortNotFound(port_id=None)
port_data = {
"id": port_id,
"network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "fa:16:3e:4c:2c:30",
"fixed_ips": [
{
"ip_address": "192.168.1.3",
"subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
}
],
"device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
}
port = {'port': port_data}
return port
def fake_attach_interface(self, context, instance,
network_id, port_id,
requested_ip='192.168.1.3'):
if not network_id:
network_id = "fake_net_uuid"
if not port_id:
port_id = "fake_port_uuid"
vif = fake_network_cache_model.new_vif()
vif['id'] = port_id
vif['network']['id'] = network_id
vif['network']['subnets'][0]['ips'][0] = requested_ip
return vif
def fake_detach_interface(self, context, instance, port_id):
pass
self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
self.stubs.Set(network_api.API, 'show_port', fake_show_port)
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
self.flags(auth_strategy=None, group='neutron')
self.flags(url='http://anyhost/', group='neutron')
self.flags(timeout=30, group='neutron')
def generalize_subs(self, subs, vanilla_regexes):
subs['subnet_id'] = vanilla_regexes['uuid']
subs['net_id'] = vanilla_regexes['uuid']
subs['port_id'] = vanilla_regexes['uuid']
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
subs['ip_address'] = vanilla_regexes['ip']
return subs
def test_list_interfaces(self):
instance_uuid = self._post_server()
response = self._do_get('servers/%s/os-interface'
% instance_uuid)
subs = {
'ip_address': '192.168.1.3',
'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
'mac_addr': 'fa:16:3e:4c:2c:30',
'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
'port_state': 'ACTIVE'
}
self._verify_response('attach-interfaces-list-resp', subs,
response, 200)
def _stub_show_for_instance(self, instance_uuid, port_id):
show_port = network_api.API().show_port(None, port_id)
show_port['port']['device_id'] = instance_uuid
self.stubs.Set(network_api.API, 'show_port', lambda *a, **k: show_port)
def test_show_interfaces(self):
instance_uuid = self._post_server()
port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
self._stub_show_for_instance(instance_uuid, port_id)
response = self._do_get('servers/%s/os-interface/%s' %
(instance_uuid, port_id))
subs = {
'ip_address': '192.168.1.3',
'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
'mac_addr': 'fa:16:3e:4c:2c:30',
'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
'port_id': port_id,
'port_state': 'ACTIVE'
}
self._verify_response('attach-interfaces-show-resp', subs,
response, 200)
def test_create_interfaces(self, instance_uuid=None):
if instance_uuid is None:
instance_uuid = self._post_server()
subs = {
'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
'ip_address': '192.168.1.3',
'port_state': 'ACTIVE',
'mac_addr': 'fa:16:3e:4c:2c:30',
}
self._stub_show_for_instance(instance_uuid, subs['port_id'])
response = self._do_post('servers/%s/os-interface'
% instance_uuid,
'attach-interfaces-create-req', subs)
subs.update(self._get_regexes())
self._verify_response('attach-interfaces-create-resp', subs,
response, 200)
def test_delete_interfaces(self):
instance_uuid = self._post_server()
port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
response = self._do_delete('servers/%s/os-interface/%s' %
(instance_uuid, port_id))
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.